提交 965fd225 编写于 作者: X Xinghai Sun

Merge branch 'develop' into cos_sim_vector

...@@ -4,7 +4,6 @@ cache: ...@@ -4,7 +4,6 @@ cache:
- $HOME/.ccache - $HOME/.ccache
- $HOME/.cache/pip - $HOME/.cache/pip
- $TRAVIS_BUILD_DIR/build/third_party - $TRAVIS_BUILD_DIR/build/third_party
- $TRAVIS_BUILD_DIR/build_android/third_party
sudo: required sudo: required
dist: trusty dist: trusty
os: os:
...@@ -12,7 +11,6 @@ os: ...@@ -12,7 +11,6 @@ os:
env: env:
- JOB=build_doc - JOB=build_doc
- JOB=check_style - JOB=check_style
- JOB=build_android
addons: addons:
apt: apt:
packages: packages:
...@@ -23,7 +21,6 @@ addons: ...@@ -23,7 +21,6 @@ addons:
- python - python
- python-pip - python-pip
- python2.7-dev - python2.7-dev
- python-numpy
- python-wheel - python-wheel
- libboost-dev - libboost-dev
- curl - curl
...@@ -37,8 +34,8 @@ before_install: ...@@ -37,8 +34,8 @@ before_install:
- if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python # Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
# protobuf version. # protobuf version.
- pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt - sudo pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt
- pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker - sudo pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker
- curl https://glide.sh/get | bash - curl https://glide.sh/get | bash
- eval "$(GIMME_GO_VERSION=1.8.3 gimme)" - eval "$(GIMME_GO_VERSION=1.8.3 gimme)"
- go get -u github.com/alecthomas/gometalinter - go get -u github.com/alecthomas/gometalinter
......
...@@ -65,8 +65,8 @@ if(NOT CMAKE_BUILD_TYPE) ...@@ -65,8 +65,8 @@ if(NOT CMAKE_BUILD_TYPE)
endif() endif()
if(ANDROID) if(ANDROID)
if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16")
message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 21") message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16")
endif() endif()
set(WITH_GPU OFF CACHE STRING set(WITH_GPU OFF CACHE STRING
......
...@@ -4,9 +4,15 @@ MAINTAINER PaddlePaddle Authors <paddle-dev@baidu.com> ...@@ -4,9 +4,15 @@ MAINTAINER PaddlePaddle Authors <paddle-dev@baidu.com>
ARG UBUNTU_MIRROR ARG UBUNTU_MIRROR
RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi' RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi'
# ENV variables
ARG ANDROID_ABI
ENV ANDROID_ABI=${ANDROID_ABI:-"armeabi-v7a"}
ENV HOME=/root \ ENV HOME=/root \
ANDROID_NDK_HOME=/opt/android-ndk-linux \ ANDROID_NDK_HOME=/opt/android-ndk-linux \
ANDROID_STANDALONE_TOOLCHAIN=/opt/android-toolchain-gcc ANDROID_ARM_STANDALONE_TOOLCHAIN=/opt/arm-toolchain \
ANDROID_ARM64_STANDALONE_TOOLCHAIN=/opt/arm64-toolchain
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y \ apt-get install -y \
...@@ -15,12 +21,11 @@ RUN apt-get update && \ ...@@ -15,12 +21,11 @@ RUN apt-get update && \
apt-get clean -y apt-get clean -y
# Install Go and glide # Install Go and glide
RUN wget -O go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz && \ RUN wget -qO- go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \
tar -C /usr/local -xzf go.tgz && \ tar -xz -C /usr/local && \
mkdir /root/gopath && \ mkdir /root/gopath && \
mkdir /root/gopath/bin && \ mkdir /root/gopath/bin && \
mkdir /root/gopath/src && \ mkdir /root/gopath/src
rm go.tgz
ENV GOROOT=/usr/local/go GOPATH=/root/gopath ENV GOROOT=/usr/local/go GOPATH=/root/gopath
# should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT. # should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT.
ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin
...@@ -42,7 +47,8 @@ RUN mkdir /opt/android-ndk-tmp && \ ...@@ -42,7 +47,8 @@ RUN mkdir /opt/android-ndk-tmp && \
wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \ wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \
unzip -q android-ndk-r14b-linux-x86_64.zip && \ unzip -q android-ndk-r14b-linux-x86_64.zip && \
mv android-ndk-r14b ${ANDROID_NDK_HOME} && \ mv android-ndk-r14b ${ANDROID_NDK_HOME} && \
${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-21 --install-dir=${ANDROID_STANDALONE_TOOLCHAIN} && \ ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-23 --install-dir=${ANDROID_ARM_STANDALONE_TOOLCHAIN} && \
${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm64 --platform=android-23 --install-dir=${ANDROID_ARM64_STANDALONE_TOOLCHAIN} && \
rm -rf /opt/android-ndk-tmp && \ rm -rf /opt/android-ndk-tmp && \
rm -rf ${ANDROID_NDK_HOME} rm -rf ${ANDROID_NDK_HOME}
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
# The supported variables are listed belows: # The supported variables are listed belows:
# #
# ANDROID_STANDALONE_TOOLCHAIN # ANDROID_STANDALONE_TOOLCHAIN
# ANDROID_TOOLCHAIN
# ANDROID_ABI # ANDROID_ABI
# ANDROID_NATIVE_API_LEVEL # ANDROID_NATIVE_API_LEVEL
# ANDROID_ARM_MODE # ANDROID_ARM_MODE
...@@ -57,6 +58,10 @@ IF(NOT DEFINED CMAKE_SYSTEM_VERSION AND ANDROID_NATIVE_API_LEVEL) ...@@ -57,6 +58,10 @@ IF(NOT DEFINED CMAKE_SYSTEM_VERSION AND ANDROID_NATIVE_API_LEVEL)
ENDIF() ENDIF()
ENDIF() ENDIF()
IF(NOT DEFINED ANDROID_TOOLCHAIN)
SET(ANDROID_TOOLCHAIN clang)
ENDIF()
IF(NOT DEFINED ANDROID_ABI) IF(NOT DEFINED ANDROID_ABI)
SET(ANDROID_ABI "armeabi-v7a") SET(ANDROID_ABI "armeabi-v7a")
ENDIF() ENDIF()
...@@ -82,6 +87,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -82,6 +87,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
"${CMAKE_VERSION}), when cross-compiling for Android.") "${CMAKE_VERSION}), when cross-compiling for Android.")
IF(ANDROID_STANDALONE_TOOLCHAIN) IF(ANDROID_STANDALONE_TOOLCHAIN)
# Use standalone toolchain
SET(CMAKE_SYSROOT "${ANDROID_STANDALONE_TOOLCHAIN}/sysroot") SET(CMAKE_SYSROOT "${ANDROID_STANDALONE_TOOLCHAIN}/sysroot")
IF(NOT CMAKE_SYSTEM_VERSION) IF(NOT CMAKE_SYSTEM_VERSION)
...@@ -96,26 +102,44 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -96,26 +102,44 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
ENDIF() ENDIF()
# Toolchain # Toolchain
SET(ANDROID_TOOLCHAIN "gcc")
SET(ANDROID_TOOLCHAIN_ROOT ${ANDROID_STANDALONE_TOOLCHAIN}) SET(ANDROID_TOOLCHAIN_ROOT ${ANDROID_STANDALONE_TOOLCHAIN})
IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$") ELSE(ANDROID_NDK)
SET(ANDROID_TOOLCHAIN_NAME arm-linux-androideabi) # TODO: use android ndk
IF(ANDROID_ABI STREQUAL "armeabi") ENDIF()
SET(CMAKE_SYSTEM_PROCESSOR armv5te)
ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a") IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
SET(CMAKE_SYSTEM_PROCESSOR armv7-a) SET(ANDROID_TOOLCHAIN_NAME arm-linux-androideabi)
ENDIF() IF(ANDROID_ABI STREQUAL "armeabi")
ENDIF() SET(CMAKE_SYSTEM_PROCESSOR armv5te)
IF(ANDROID_ABI STREQUAL "arm64-v8a") SET(ANDROID_CLANG_TRIPLE armv5te-none-linux-androideabi)
SET(ANDROID_TOOLCHAIN_NAME aarch64-linux-android) ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a")
SET(CMAKE_SYSTEM_PROCESSOR aarch64) SET(CMAKE_SYSTEM_PROCESSOR armv7-a)
SET(ANDROID_CLANG_TRIPLE armv7-none-linux-androideabi)
ENDIF() ENDIF()
SET(ANDROID_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_NAME}-") ELSEIF(ANDROID_ABI STREQUAL "arm64-v8a")
SET(ANDROID_TOOLCHAIN_NAME aarch64-linux-android)
SET(CMAKE_SYSTEM_PROCESSOR aarch64)
SET(ANDROID_CLANG_TRIPLE aarch64-none-linux-android)
ELSE()
MESSAGE(FATAL_ERROR "Invalid Android ABI: ${ANDROID_ABI}.")
ENDIF()
SET(ANDROID_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_NAME}-")
IF(ANDROID_TOOLCHAIN STREQUAL clang)
SET(ANDROID_C_COMPILER_NAME clang)
SET(ANDROID_CXX_COMPILER_NAME clang++)
SET(CMAKE_C_COMPILER_TARGET ${ANDROID_CLANG_TRIPLE})
SET(CMAKE_CXX_COMPILER_TARGET ${ANDROID_CLANG_TRIPLE})
ELSEIF(ANDROID_TOOLCHAIN STREQUAL gcc)
SET(ANDROID_C_COMPILER_NAME gcc)
SET(ANDROID_CXX_COMPILER_NAME g++)
ELSE()
MESSAGE(FATAL_ERROR "Invalid Android toolchain: ${ANDROID_TOOLCHAIN}")
ENDIF() ENDIF()
# C compiler # C compiler
IF(NOT CMAKE_C_COMPILER) IF(NOT CMAKE_C_COMPILER)
SET(ANDROID_C_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}gcc") SET(ANDROID_C_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}${ANDROID_C_COMPILER_NAME}")
ELSE() ELSE()
GET_FILENAME_COMPONENT(ANDROID_C_COMPILER ${CMAKE_C_COMPILER} PROGRAM) GET_FILENAME_COMPONENT(ANDROID_C_COMPILER ${CMAKE_C_COMPILER} PROGRAM)
ENDIF() ENDIF()
...@@ -125,7 +149,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -125,7 +149,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
# CXX compiler # CXX compiler
IF(NOT CMAKE_CXX_COMPILER) IF(NOT CMAKE_CXX_COMPILER)
SET(ANDROID_CXX_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}g++") SET(ANDROID_CXX_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}${ANDROID_CXX_COMPILER_NAME}")
ELSE() ELSE()
GET_FILENAME_COMPONENT(ANDROID_CXX_COMPILER ${CMAKE_CXX_COMPILER} PROGRAM) GET_FILENAME_COMPONENT(ANDROID_CXX_COMPILER ${CMAKE_CXX_COMPILER} PROGRAM)
ENDIF() ENDIF()
...@@ -137,7 +161,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -137,7 +161,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
SET(CMAKE_CXX_COMPILER ${ANDROID_CXX_COMPILER} CACHE PATH "CXX compiler" FORCE) SET(CMAKE_CXX_COMPILER ${ANDROID_CXX_COMPILER} CACHE PATH "CXX compiler" FORCE)
# Toolchain and ABI specific flags. # Toolchain and ABI specific flags.
SET(ANDROID_COMPILER_FLAGS "-ffunction-sections -fdata-sections -finline-limit=64") SET(ANDROID_COMPILER_FLAGS "-ffunction-sections -fdata-sections")
SET(ANDROID_LINKER_FLAGS "-Wl,--gc-sections") SET(ANDROID_LINKER_FLAGS "-Wl,--gc-sections")
IF(ANDROID_ABI STREQUAL "armeabi") IF(ANDROID_ABI STREQUAL "armeabi")
...@@ -145,8 +169,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -145,8 +169,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
-march=armv5te -march=armv5te
-mtune=xscale -mtune=xscale
-msoft-float) -msoft-float)
ENDIF() ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a")
IF(ANDROID_ABI STREQUAL "armeabi-v7a")
LIST(APPEND ANDROID_COMPILER_FLAGS LIST(APPEND ANDROID_COMPILER_FLAGS
-march=armv7-a -march=armv7-a
-mfloat-abi=softfp) -mfloat-abi=softfp)
...@@ -156,6 +179,8 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -156,6 +179,8 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
LIST(APPEND ANDROID_COMPILER_FLAGS -mfpu=vfpv3-d16) LIST(APPEND ANDROID_COMPILER_FLAGS -mfpu=vfpv3-d16)
ENDIF() ENDIF()
LIST(APPEND ANDROID_LINKER_FLAGS -Wl,--fix-cortex-a8) LIST(APPEND ANDROID_LINKER_FLAGS -Wl,--fix-cortex-a8)
ELSEIF(ANDROID_ABI STREQUAL "arm64-v8a")
LIST(APPEND ANDROID_COMPILER_FLAGS -march=armv8-a)
ENDIF() ENDIF()
IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$") IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
...@@ -164,10 +189,18 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -164,10 +189,18 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
ELSE() ELSE()
LIST(APPEND ANDROID_COMPILER_FLAGS -mthumb) LIST(APPEND ANDROID_COMPILER_FLAGS -mthumb)
ENDIF() ENDIF()
IF(ANDROID_TOOLCHAIN STREQUAL clang)
# Disable integrated-as for better compatibility.
LIST(APPEND ANDROID_COMPILER_FLAGS -fno-integrated-as)
ENDIF()
ENDIF() ENDIF()
IF(ANDROID_ABI STREQUAL "arm64-v8a") IF(ANDROID_TOOLCHAIN STREQUAL clang)
LIST(APPEND ANDROID_COMPILER_FLAGS -march=armv8-a) # CMake automatically forwards all compiler flags to the linker,
# and clang doesn't like having -Wa flags being used for linking.
# To prevent CMake from doing this would require meddling with
# the CMAKE_<LANG>_COMPILE_OBJECT rules, which would get quite messy.
LIST(APPEND ANDROID_LINKER_FLAGS -Qunused-arguments)
ENDIF() ENDIF()
STRING(REPLACE ";" " " ANDROID_COMPILER_FLAGS "${ANDROID_COMPILER_FLAGS}") STRING(REPLACE ";" " " ANDROID_COMPILER_FLAGS "${ANDROID_COMPILER_FLAGS}")
......
...@@ -12,6 +12,10 @@ ...@@ -12,6 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
IF(USE_EIGEN_FOR_BLAS)
return()
ENDIF(USE_EIGEN_FOR_BLAS)
INCLUDE(cblas) INCLUDE(cblas)
IF(NOT ${CBLAS_FOUND}) IF(NOT ${CBLAS_FOUND})
......
...@@ -86,12 +86,13 @@ def layer.fc(X): ...@@ -86,12 +86,13 @@ def layer.fc(X):
We'd like to have Python bindings to operators in package `paddle.operator`, and Python compositions of operators in package `paddle.layer`. So we have the following concepts in above illustrative example: We'd like to have Python bindings to operators in package `paddle.operator`, and Python compositions of operators in package `paddle.layer`. So we have the following concepts in above illustrative example:
```
| C++ functions/functors | mul | add | | | | C++ functions/functors | mul | add | | |
|------------------------|--------------|--------------|-------------|----------|
| C++ operator class | mulOp | addOp | FCOp | | | C++ operator class | mulOp | addOp | FCOp | |
| Python binding | operator.mul | operator.add | operator.fc | | | Python binding | operator.mul | operator.add | operator.fc | |
| Python function | | | | layer.fc | | Python function | | | | layer.fc |
```
This is how we differentiate layer and operators in PaddlePaddle: This is how we differentiate layer and operators in PaddlePaddle:
......
# Design Doc: Computations as Graphs # Design Doc: Computations as a Graph
A primary goal of the refactorization of PaddlePaddle is a more flexible representation of deep learning computation, in particular, a graph of operators and variables, instead of sequences of layers as before. A primary goal of the refactorization of PaddlePaddle is a more flexible representation of deep learning computation, in particular, a graph of operators and variables, instead of sequences of layers as before.
...@@ -8,6 +8,8 @@ This document explains that the construction of a graph as three steps: ...@@ -8,6 +8,8 @@ This document explains that the construction of a graph as three steps:
- construct the backward part - construct the backward part
- construct the optimization part - construct the optimization part
## The Construction of a Graph
Let us take the problem of image classification as a simple example. The application program that trains the model looks like: Let us take the problem of image classification as a simple example. The application program that trains the model looks like:
```python ```python
...@@ -25,7 +27,9 @@ The first four lines of above program build the forward part of the graph. ...@@ -25,7 +27,9 @@ The first four lines of above program build the forward part of the graph.
![](images/graph_construction_example_forward_only.png) ![](images/graph_construction_example_forward_only.png)
In particular, the first line `x = layer.data("images")` creates variable x and a Feed operator that copies a column from the minibatch to x. `y = layer.fc(x)` creates not only the FC operator and output variable y, but also two parameters, W and b. In particular, the first line `x = layer.data("images")` creates variable x and a Feed operator that copies a column from the minibatch to x. `y = layer.fc(x)` creates not only the FC operator and output variable y, but also two parameters, W and b, and the initialization operators.
Initialization operators are kind of "run-once" operators -- the `Run` method increments a class data member counter so to run at most once. By doing so, a parameter wouldn't be initialized repeatedly, say, in every minibatch.
In this example, all operators are created as `OpDesc` protobuf messages, and all variables are `VarDesc`. These protobuf messages are saved in a `BlockDesc` protobuf message. In this example, all operators are created as `OpDesc` protobuf messages, and all variables are `VarDesc`. These protobuf messages are saved in a `BlockDesc` protobuf message.
...@@ -49,3 +53,18 @@ According to the chain rule of gradient computation, `ConstructBackwardGraph` wo ...@@ -49,3 +53,18 @@ According to the chain rule of gradient computation, `ConstructBackwardGraph` wo
For each parameter, like W and b created by `layer.fc`, marked as double circles in above graphs, `ConstructOptimizationGraph` creates an optimization operator to apply its gradient. Here results in the complete graph: For each parameter, like W and b created by `layer.fc`, marked as double circles in above graphs, `ConstructOptimizationGraph` creates an optimization operator to apply its gradient. Here results in the complete graph:
![](images/graph_construction_example_all.png) ![](images/graph_construction_example_all.png)
## Block and Graph
The word block and graph are interchangable in the desgin of PaddlePaddle. A [Block[(https://github.com/PaddlePaddle/Paddle/pull/3708) is a metaphore of the code and local variables in a pair of curly braces in programming languages, where operators are like statements or instructions. A graph of operators and variables is a representation of the block.
A Block keeps operators in an array `BlockDesc::ops`
```protobuf
message BlockDesc {
repeated OpDesc ops = 1;
repeated VarDesc vars = 2;
}
```
in the order that there appear in user programs, like the Python program at the beginning of this article. We can imagine that in `ops`, we have some forward operators, followed by some gradient operators, and then some optimization operators.
...@@ -2,6 +2,8 @@ digraph ImageClassificationGraph { ...@@ -2,6 +2,8 @@ digraph ImageClassificationGraph {
///////// The forward part ///////// ///////// The forward part /////////
FeedX [label="Feed", color=blue, shape=box]; FeedX [label="Feed", color=blue, shape=box];
FeedY [label="Feed", color=blue, shape=box]; FeedY [label="Feed", color=blue, shape=box];
InitW [label="Init", color=blue, shape=diamond];
Initb [label="Init", color=blue, shape=diamond];
FC [label="FC", color=blue, shape=box]; FC [label="FC", color=blue, shape=box];
MSE [label="MSE", color=blue, shape=box]; MSE [label="MSE", color=blue, shape=box];
...@@ -14,6 +16,8 @@ digraph ImageClassificationGraph { ...@@ -14,6 +16,8 @@ digraph ImageClassificationGraph {
FeedX -> x -> FC -> y -> MSE -> cost [color=blue]; FeedX -> x -> FC -> y -> MSE -> cost [color=blue];
FeedY -> l [color=blue]; FeedY -> l [color=blue];
InitW -> W [color=blue];
Initb -> b [color=blue];
W -> FC [color=blue]; W -> FC [color=blue];
b -> FC [color=blue]; b -> FC [color=blue];
l -> MSE [color=blue]; l -> MSE [color=blue];
......
# Design Doc: Operation Graph Based Parameter Server
## Abstract
We propose an approach to implement the parameter server. In this
approach, there is no fundamental difference between the trainer and
the parameter server: they both run subgraphs, but subgraphs of
different purposes.
## Background
The previous implementations of the parameter server does not run a
subgraph. parameter initialization, optimizer computation, network
communication and checkpointing are implemented twice on both the
trainer and the parameter server.
It would be great if we can write code once and use them on both the
trainer and the parameter server: reduces code duplication and
improves extensibility. Given that after the current refactor, we are
representing everything as a computing graph on the
trainer. Representing everything as a computing graph on the parameter
server becomes a natural extension.
## Design
### Graph Converter
The *graph converter* converts the user-defined operation (OP) graph
into subgraphs to be scheduled on different nodes with the following
steps:
1. OP placement: the OPs will be placed on different nodes according
to heuristic that minimizes estimated total computation
time. Currently we will use a simple heuristic that puts parameter
varable on parameter server workers and everything else on trainer
workers.
1. Add communication OPs to enable the communication between nodes.
We will need these OPs: *Send*, *Recv*, *Enqueue*, *Dequeue*.
Below is an example of converting the user defined graph to the
subgraphs for the trainer and the parameter server:
<img src="src/local-graph.png" width="300"/>
After converting:
<img src="src/dist-graph.png" width="700"/>
1. The parameter variable W and it's optimizer subgraph are placed on the parameter server.
1. Operators are added to the subgraphs.
- *Send* sends data to the connected *Recv* operator. The
scheduler on the receive node will only schedule *Recv* operator
to run when the *Send* operator has ran (the *Send* OP will mark
the *Recv* OP runnable automatically).
- *Enueue* enqueues the input variable, it can block until space
become available in the queue.
- *Dequeue* outputs configurable numbers of tensors from the
queue. It will block until the queue have the required number of
tensors.
### Benefits
- Model parallelism become easier to implement: it's an extension to
the trainer - parameter server approach. we already have the
communication OPs, but need to extend the graph converter's
placement functionality.
- User-defined optimizer is easier to add - user can now express it as
a subgraph.
- No more duplication logic inside the trainer and the parameter
server mentioned in the background section.
### Challenges
- It might be hard for the graph converter to cut a general graph
(without any hint for which subgraph is the optimizer). We may need
to label which subgraph inside the OP graph is the optimizer.
- It's important to balance the parameter shards of on multiple
parameter server. If a single parameter is very big (some
word-embedding, fully connected, softmax layer), we need to
automatically partition the single parameter onto different
parameter servers when possible (only element-wise optimizer depends
on the parameter variable).
### Discussion
- In the "Aync SGD" figure, the "W" variable on the parameter server
could be read and wrote concurrently, what is our locking strategy?
E.g., each variable have a lock cpp method to be invoked by every
OP, or, have a lock OP.
- Can the Enqueue OP be implemented under our current tensor design
(puts the input tensor into the queue tensor)?
- *Dequeue* OP will have variable numbers of output (depends on the
`min_count` attribute), does our current design support it? (similar
question for the *Add* OP)
### References:
[1] [TensorFlow: Large-Scale Machine Learning on Heterogeneous Distributed Systems](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
...@@ -147,7 +147,7 @@ class CosineOp { ...@@ -147,7 +147,7 @@ class CosineOp {
struct CosineOpProtoMaker : public OpProtoMaker { struct CosineOpProtoMaker : public OpProtoMaker {
CosineOpProtoMaker(OpProto* proto) : OpProtoMaker(proto) { CosineOpProtoMaker(OpProto* proto) : OpProtoMaker(proto) {
AddInput("input", "input of cosine op"); AddInput("input", "input of cosine op");
AddAttr("scale", "scale of cosine op", float).Default(1.0).LargerThan(0.0); AddAttr("scale", "scale of cosine op", float).Default(1.0).GreaterThan(0.0);
AddType("cos"); AddType("cos");
AddComment("This is cos op"); AddComment("This is cos op");
} }
......
## Background
PaddlePaddle divides the description of neural network computation graph into two stages: compile time and runtime.
PaddlePaddle use proto message to describe compile time graph for
1. Computation graph should be able to be saved to a file.
1. In distributed training, the graph will be serialized and send to multiple workers.
The computation graph is constructed by Data Node and Operation Node. The concept to represent them is in the table below.
| |compile time|runtime|
|---|---|---|
|Data|VarDesc(proto)|Variable(cpp)|
|Operation|OpDesc(proto)|Operator(cpp)|
## Definition of VarDesc
A VarDesc should have a name and value, in PaddlePaddle, the value will always be a tensor. Since we use LoDTensor most of the time. We add a LoDTesnorDesc to represent it.
```proto
message VarDesc {
required string name = 1;
optional LoDTensorDesc lod_tensor = 2;
}
```
## Definition of LodTensorDesc
```proto
enum DataType {
BOOL = 0;
INT16 = 1;
INT32 = 2;
INT64 = 3;
FP16 = 4;
FP32 = 5;
FP64 = 6;
}
message LoDTensorDesc {
required DataType data_type = 1;
repeated int32 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480]
optional int32 lod_level = 3 [default=0];
}
```
## Definition of Variable in Python
In Python API, layer will take Variable as Input, and return Variable as Output. There should be a class `Variable` in python to help create and manage Variable.
```python
image = Variable(dims=[-1, 640, 480])
# fc1 and fc2 are both Variable
fc1 = layer.fc(input=image, output_size=10)
fc2 = layer.fc(input=fc1, output_size=20)
```
### what should class `Variable` Have
1. `name`.a name of string type is used to mark the value of the Variable.
1. `initializer`. Since our Tensor does not have value. we will always use some Operator to fullfill it when run. So we should have a initialize method to help add the init operator.
1. `operator`. Variable should record which operator produce itself. The reaon is:
- we use pd.eval(targets=[var1, var2]) to run the related ops to get the value of var1 and var2. var.op is used to trace the dependency of the current variable.
In PaddlePaddle, we use Block to describe Computation Graph, so in the code we will use Block but not Graph.
```python
import VarDesc
import LoDTensorDesc
import framework
def AddInitialOperator(variable, initializer):
# add an initialize Operator to block to init this Variable
class Variable(object):
def __init__(self, name, dims, type, initializer):
self._block = get_default_block()
self._name = name
self.op = None
tensor_desc = LoDTensorDesc(data_type=type, dims=dims)
_var_desc = VarDesc(name=name, lod_tensor=tensor_desc)
self._var = framework.CreateVar(_var_desc)
self._block.add_var(self)
# add initial op according to initializer
if initializer is not None:
AddInitialOperator(self, initializer)
def dims(self):
return self._var.dims()
def data_type(self):
return self._var.data_type()
def to_proto(self):
pass
```
Then we can use this Variable to create a fc layer in Python.
```python
import paddle as pd
def flatten_size(X, num_flatten_dims):
prod = 1 # of last num_flatten_dims
for i in xrange(num_flatten_dims):
prod = prod * X.dims[-i-1]
return prod
def layer.fc(X, output_size, num_flatten_dims):
W = Variable(pd.random_uniform(), type=FP32, dims=[flatten_size(X, num_flatten_dims), output_size])
b = Variable(pd.random_uniform(), type=FP32, dims=[output_size])
out = Variable(type=FP32)
y = operator.fc(X, W, b, output=out) # fc will put fc op input into out
pd.InferShape(y)
return out
x = Variable(dims=[-1, 640, 480])
y = layer.fc(x, output_size=100)
z = layer.fc(y, output_size=200)
paddle.eval(targets=[z], ...)
print(z)
```
...@@ -23,17 +23,20 @@ ...@@ -23,17 +23,20 @@
- `framework::OperatorWithKernel`:继承自OperatorBase,Op有计算函数,称作有Kernel。 - `framework::OperatorWithKernel`:继承自OperatorBase,Op有计算函数,称作有Kernel。
- `class OpProtoAndCheckerMaker`:描述该Op的输入、输出、属性、注释,主要用于Python API接口生成 - `class OpProtoAndCheckerMaker`:描述该Op的输入、输出、属性、注释,主要用于Python API接口生成
依据是否包含kernel,将Op分为两种:包含Kernel的Op和不包含kernel的Op,前者Op的定义继承自`OperatorBase`,后者继承自`OperatorWithKernel`。本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下: 依据是否包含kernel,可以将Op分为两种:包含Kernel的Op和不包含kernel的Op,前者Op的定义继承自`OperatorBase`,后者继承自`OperatorWithKernel`。本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下:
内容 | 定义位置 内容 | 定义位置
-------------- | :---------------------- -------------- | :----------------------
OpProtoMake定义 | `.cc`文件,Backward Op不需要定义OpProtoMake OpProtoMake定义 | `.cc`文件,Backward Op不需要定义OpProtoMake
Op定义 | `.cc`文件 Op定义 | `.cc`文件
Kernel实现 | CPU、GPU共享Kernel在`.h`文件,否则,CPU可以在`.cc`文件,GPU可在`.cu`文件。 Kernel实现 | CPU、GPU共享Kernel实现在`.h`文件中,否则,CPU 实现在`.cc`文件中,GPU 实现在`.cu`文件中。
注册Op | Op注册在`.cc`文件;Kernel注册CPU在`.cc`文件,GPU在`.cu`文件 注册Op | Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,GPU实现在`.cu`文件中
实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc``*_op.cu`(如有)结尾。
下面以矩阵乘操作,即[MulOp](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc)为例来介绍如何写带Kernel的Operator。 下面以矩阵乘操作,即[MulOp](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc)为例来介绍如何写带Kernel的Operator。
...@@ -42,9 +45,11 @@ Kernel实现 | CPU、GPU共享Kernel在`.h`文件,否则,CPU可以在` ...@@ -42,9 +45,11 @@ Kernel实现 | CPU、GPU共享Kernel在`.h`文件,否则,CPU可以在`
### 1. 定义ProtoMaker类 ### 1. 定义ProtoMaker类
矩阵乘的公式:$Out = X * Y$, 可见该计算由两个输入,一个输出组成。首先定义`ProtoMaker`来描述该Op的输入、输出及注释: 矩阵乘法的公式:$Out = X * Y$, 可见该计算由两个输入,一个输出组成。
``` 首先定义`ProtoMaker`来描述该Op的输入、输出,并添加注释:
```cpp
class MulOpMaker : public framework::OpProtoAndCheckerMaker { class MulOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
...@@ -59,20 +64,20 @@ The equation is: Out = X * Y ...@@ -59,20 +64,20 @@ The equation is: Out = X * Y
} }
}; };
``` ```
[`MulOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L43)继承自`framework::OpProtoAndCheckerMaker`,构造函数包括2个 [`MulOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L43)继承自`framework::OpProtoAndCheckerMaker`,构造函数含有2个参数
- `framework::OpProto` : 前者存储Op的输入输出和参数属性,将用于Python API接口的生成。 - `framework::OpProto` : 前者存储Op的输入输出和参数属性,将用于Python API接口的生成。
- `framework::OpAttrChecker` :后者用于检查参数属性的合法性。 - `framework::OpAttrChecker` :后者用于检查参数属性的合法性。
构造函数里通过`AddInput`添加输入参数,通过`AddOutput`添加输出参数,通过`AddComment`添加该Op的注释,这些函数会将对应内容添加到`OpProto`中。
`MulOp`中添加两个输入`X``Y`,添加了一个输出`Out`,并解释了各自含义,该命名尽可能的规范 构造函数里通过`AddInput`添加输入参数,通过`AddOutput`添加输出参数,通过`AddComment`添加Op的注释。这些函数会将对应内容添加到`OpProto`
上面的代码在`MulOp`中添加两个输入`X``Y`,添加了一个输出`Out`,并解释了各自含义,命名请遵守命名规范。
再举个[`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37)的例子:
``` 再以[`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37)为例:
```cpp
template <typename AttrType> template <typename AttrType>
class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { class ScaleOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
...@@ -87,17 +92,19 @@ The equation is: Out = scale*X ...@@ -87,17 +92,19 @@ The equation is: Out = scale*X
} }
}; };
``` ```
在这个例子里,两处不同: 这个例子有两处不同:
- `AddInput("X","...").NotInGradient()` : 表示`X`这个输入不参与`ScaleOp`对应的梯度Op计算之中。 - `AddInput("X","...").NotInGradient()` : 表示`X`这个输入不参与`ScaleOp`对应的梯度Op计算之中,如果Op的某个输入不参与反向梯度的计算,请显示地调用`.NotInGradient()`进行设置。
- `AddAttr<AttrType>("scale", "...").SetDefault(1.0);` : 增加`scale`系数,作为参数属性,并且设置默认值为1.0。
- `AddAttr<AttrType>("scale", "...").SetDefault(1.0);` : 增加`scale`系数,作为参数属性,并且设置默认值为1.0。
### 2. 定义Operator类 ### 2. 定义Operator类
下面的点实现了MulOp的定义:
```c++ ```cpp
class MulOp : public framework::OperatorWithKernel { class MulOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
...@@ -121,33 +128,46 @@ class MulOp : public framework::OperatorWithKernel { ...@@ -121,33 +128,46 @@ class MulOp : public framework::OperatorWithKernel {
``` ```
[`MulOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L22)继承自`OperatorWithKernel``public`成员: [`MulOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L22)继承自`OperatorWithKernel``public`成员:
```c++ ```cpp
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
``` ```
这句表示使用基类`OperatorWithKernel`的构造函数,也可写成: 这句表示使用基类`OperatorWithKernel`的构造函数,也可写成:
```c++ ```cpp
MulOp(const std::string &type, const framework::VariableNameMap &inputs, MulOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs, const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs) const framework::AttributeMap &attrs)
: OperatorWithKernel(type, inputs, outputs, attrs) {} : OperatorWithKernel(type, inputs, outputs, attrs) {}
``` ```
还需要重写`InferShape`接口。`InferShape`为const函数,不能修改Op的成员变量,参数为`const framework::InferShapeContext &ctx`,通过该参数可获取到输入输出以及属性。它的功能是: 还需要重写`InferShape`接口。`InferShape`为const函数,不能修改Op的成员变量,参数为`const framework::InferShapeContext &ctx`,通过该参数可获取到输入输出以及属性。它的功能是:
- 1). 做检查, 尽早报错:检查输入数据维度、类型等是否合法。 - 1). 做检查, 尽早报错:检查输入数据维度、类型等是否合法。
- 2). 设置输出Tensor的形状。 - 2). 设置输出Tensor的形状。
通常`OpProtoMaker``Op`类的定义写在`.cc`文件中,和要讲到的注册函数一起放在`.cc` 通常`OpProtoMaker``Op`类的定义写在`.cc`文件中,和下面将要介绍的注册函数一起放在`.cc`
### 3. 定义OpKernel类 ### 3. 定义OpKernel类
```C++ `MulKernel`继承自`framework::OpKernel`,带有下面两个模板参数:
template <typename Place, typename T>
class MulKernel : public framework::OpKernel { - `typename Place`: 表示设备类型,不同设备(CPU、GPU)共享同一个Kernel时,需加该模板参数,不共享则不加,一个不共享的例子是[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)
public:
- `typename T` : 表示数据类型,如`float`, `double`等。
需要为`MulKernel`类重写`Compute`接口。
- `Compute`接受一个输入参数:`const framework::ExecutionContext& context`
-`InferShapeContext`相比,`ExecutionContext`增加了设备类型,同样可获取到输入输出和属性参数。
- `Compute`函数里实现`OpKernel`的具体计算逻辑。
下面是 `MulKernel` `Compute`的实现:
```cpp
template <typename Place, typename T>
class MulKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<Tensor>("X"); auto* X = context.Input<Tensor>("X");
auto* Y = context.Input<Tensor>("Y"); auto* Y = context.Input<Tensor>("Y");
...@@ -157,141 +177,136 @@ class MulKernel : public framework::OpKernel { ...@@ -157,141 +177,136 @@ class MulKernel : public framework::OpKernel {
const_cast<platform::DeviceContext*>(context.device_context_); const_cast<platform::DeviceContext*>(context.device_context_);
math::matmul<Place, T>(*X, false, *Y, false, 1, Z, 0, device_context); math::matmul<Place, T>(*X, false, *Y, false, 1, Z, 0, device_context);
} }
}; };
``` ```
需要注意:**不同设备(CPU、GPU)共享一个Op定义,是否则共享同一个`OpKernel`,取决于`Compute`调用的函数是否支持不同设备。**
`MulKernel`继承自`framework::OpKernel`,带有模板参数: `MulOp`的CPU、GPU实现共享同一个`Kernel``OpKernel`不共享的例子可以参考:[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)
- `typename Place`: 表示设备类型,不同设备(CPU、GPU)共享同一个Kernel时,需加该模板参数,不共享则不加,一个不共享的例子是[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43) 为了使`OpKernel`的计算过程书写更加简单,并且CPU、GPU的代码可以复用,我们通常借助 Eigen unsupported Tensor模块来实现`Compute`接口。关于在PaddlePaddle中如何使用Eigen库,请参考[使用文档](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/use_eigen_cn.md)
- `typename T` : 表示数据类型,如`float`, `double`等。
到此,前向Op实现完成。接下来,需要在`.cc`文件中注册该op和kernel。
`MulKernel`需要重写`Compute`接口,该接口参数为`const framework::ExecutionContext& context`, `ExecutionContext`相比`InferShapeContext`增加了设备类型,同样可获取到输入输出和属性参数,`Compute`函数里写具体实现时。 反向Op类的定义,反向OpKernel的定义与前向Op类似,这里不再赘述。**但需注意反向Op没有`ProtoMaker`**
注意,不同设备(CPU、GPU)共享一个Op定义,是否则共享同一个`OpKernel`,取决于`Compute`调用的函数是否支持不同设备。`MulOp`的CPU、GPU实现共享同一个`Kernel``OpKernel`不共享的例子可以参考[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)
为了使得`OpKernel`的计算过程书写较为简单,CPU、GPU的代码可以复用,我们通常借助Eigen unsupported Tensor模块来实现。关于在paddle中如何使用Eigen库,请参考对应的使用[文档](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/use_eigen_cn.md)
到此前向Op实现完成,需要在`.cc`文件中注册该op和kernel。反向Op类的定义和Kernel定义与前向Op类似,这里不再重复。但注意,反向Op没有`ProtoMaker`
### 4. 注册Operator ### 4. 注册Operator
`.cc`文件中注册前向、反向Op类,注册CPU Kernel。 - `.cc`文件中注册前向、反向Op类,注册CPU Kernel。
```c++ ```cpp
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulOpGrad); REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulOpGrad);
REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel<paddle::platform::CPUPlace, float>); REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(mul_grad, REGISTER_OP_CPU_KERNEL(mul_grad,
ops::MulGradKernel<paddle::platform::CPUPlace, float>); ops::MulGradKernel<paddle::platform::CPUPlace, float>);
``` ```
- `REGISTER_OP` : 注册`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker``ops::MulOpMaker`,注册`ops::MulOpGrad`,类型名为`mul_grad` 在上面的代码中:
- `REGISTER_OP_WITHOUT_GRADIENT` : 用于注册没有反向的Op。
- `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace``float`类型,同理,注册`ops::MulKernel`类。 - `REGISTER_OP` : 注册`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,注册`ops::MulOpGrad`,类型名为`mul_grad`。
- `REGISTER_OP_WITHOUT_GRADIENT` : 用于注册没有反向的Op。
`.cu`文件中注册GPU Kernel。请注意,如果GPU Kernel的实现是基于Eigen unsupported模块,那么在 `.cu`的最前面请加上宏定义 `#define EIGEN_USE_GPU` - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulKernel`类。
```c++
// if use Eigen unsupported module before include head files -`.cu`文件中注册GPU Kernel。
#define EIGEN_USE_GPU - 请注意,如果GPU Kernel的实现基于Eigen unsupported模块,那么在 `.cu`的开始请加上宏定义 `#define EIGEN_USE_GPU`,代码示例如下:
namespace ops = paddle::operators; ```cpp
REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel<paddle::platform::GPUPlace, float>); // if use Eigen unsupported module before include head files
REGISTER_OP_GPU_KERNEL(mul_grad, #define EIGEN_USE_GPU
ops::MulGradKernel<paddle::platform::GPUPlace, float>);
``` namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(mul_grad,
ops::MulGradKernel<paddle::platform::GPUPlace, float>);
```
### 5. 编译 ### 5. 编译
[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt)文件中添加编译。 - 简单**无特殊依赖**的OP无需修改CMakeList.txt文件。[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt) 会自动将 `paddle/operators` 目录下新增的 `*_op.cc` 文件加入编译。
- 较为复杂、**有额外依赖** 的operator仍需要修改[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt)。如,`mul_op` 依赖 `math_function`,需要在`CMakeLists.txt`中添加如下内容:
```
op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function) ```
``` op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function) +
```
下面命令可以编译:
- 运行下面命令可以进行编译:
```
make mul_op ```
``` make mul_op
```
## 绑定Python ## 绑定Python
- 绑定Python - 绑定Python
[`paddle/pybind/pybind.cc 在 [`paddle/pybind/pybind.cc
`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.cc)文件中添加该类: `](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.cc) 使用`USE_OP`告知编译器需要链接的Op,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。
``` ```
USE_OP(mul); USE_OP(mul);
``` ```
如果只实现了CPU版本,则使用`USE_CPU_ONLY_OP`: 如果只实现了CPU版本,则使用`USE_CPU_ONLY_OP`:
``` ```
USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(gather);
``` ```
如果OP不带Kernel,则使用`USE_NO_KENREL_OP`: 如果OP不带Kernel,则使用`USE_NO_KENREL_OP`:
``` ```
USE_NO_KENREL_OP(recurrent); USE_NO_KENREL_OP(recurrent);
``` ```
使用`USE_OP`告知编译器需要链接该Op的目标文件,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。
- 生成库 - 生成库
[`paddle/pybind/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/CMakeLists.txt)文件添加类到`DEPS`中,使得该Op可以链接到生成的lib库中。 `paddle/operators` 目录下新增的 `*_op.cc` 文件会被自动添加链接到生成的lib库中。
```
if(WITH_PYTHON)
cc_library(paddle_pybind SHARED
SRCS pybind.cc
DEPS pybind python backward
mul_op
minus_op)
endif(WITH_PYTHON)
```
## 实现单元测试 ## 实现单元测试
单测包括对比前向Op不同设备(CPU、GPU)的实现、对比反向OP不同设备(CPU、GPU)的实现、反向Op的梯度测试。下面介绍介绍[`MulOp`的单](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/test_mul_op.py) 单测包括对比前向Op不同设备(CPU、GPU)的实现、对比反向OP不同设备(CPU、GPU)的实现、反向Op的梯度测试。下面介绍介绍[`MulOp`的单元测试](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/test_mul_op.py)。
### 前向Operator单 ### 前向Operator单元测试
前向Op单测继承自`unittest.TestCase`,并定义元类`__metaclass__ = OpTestMeta`,具体单测流程在`OpTestMeta`里完成。需在`setUp`函数定义输入输出和属性参数,以及Python对比的输出值。 前向Op单元测试继承自`unittest.TestCase`,并定义元类`__metaclass__ = OpTestMeta`。各项更加具体的单元测试在`OpTestMeta`里完成。测试前向Operator,需要:
``` 1. 在`setUp`函数定义输入、输出,以及相关的属性参数。
import unittest 2. 生成随机的输入数据。
import numpy as np 3. 在Python脚本中实现与前向operator相同的计算逻辑,得到输出值,与operator前向计算的输出进行对比。
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
class TestMulOp(unittest.TestCase):
__metaclass__ = OpTestMeta
def setUp(self): ```python
self.type = "mul" import unittest
self.inputs = { import numpy as np
'X': np.random.random((32, 84)).astype("float32"), from gradient_checker import GradientChecker, create_op
'Y': np.random.random((84, 100)).astype("float32") from op_test_util import OpTestMeta
}
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
```
首先需要`import`必要的包,下面详细解释其他值:
- `self.type = "mul" ` : 定义类型,和注册的类型一致。
- `self.inputs` : 定义输入,类型为Numpy.array,并初始化。
- `self.outputs` : 定义输出,并得到Python结算结果。
class TestMulOp(unittest.TestCase):
### 反向Operator单测 __metaclass__ = OpTestMeta
反向Op单测继承自`GradientChecker`,而`GradientChecker`集成自`unittest.TestCase`,所以反向单测函数需要`test_`开头。 def setUp(self):
self.type = "mul"
self.inputs = {
'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32")
}
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
```
``` 上面的代码首先导入依赖的包,下面是对`setUp`函数中操作的重要变量的详细解释:
- `self.type = "mul" ` : 定义类型,与operator注册时注册的类型一致。
- `self.inputs` : 定义输入,类型为`numpy.array`,并初始化。
- `self.outputs` : 定义输出,并在Python脚本中完成与operator同样的计算逻辑,返回Python端的计算结果。
### 反向Operator单元测试
反向Op单元测试继承自`GradientChecker`,而`GradientChecker`继承自`unittest.TestCase`,因此,**反向单元测试函数需要以`test_`开头**。
```python
class TestMulGradOp(GradientChecker): class TestMulGradOp(GradientChecker):
def setUp(self): def setUp(self):
self.op = create_op("mul") self.op = create_op("mul")
...@@ -325,33 +340,30 @@ class TestMulGradOp(GradientChecker): ...@@ -325,33 +340,30 @@ class TestMulGradOp(GradientChecker):
no_grad_set={"Y"}) no_grad_set={"Y"})
``` ```
下面解释一些关键的地方: 下面解释代码中一些关键的地方:
- 调用`create_op("mul")`创建反向Op对应的前向Op。 - 调用`create_op("mul")`创建反向Op对应的前向Op。
- 调用`compare_grad`函数对比CPU、GPU计算结果。 - 调用`compare_grad`函数对比CPU、GPU计算结果。
- `test_normal`中调用`check_grad`检查梯度稳定性,这里采用数值法检测梯度正确性。 - `test_normal`中调用`check_grad`使用数值法检测梯度正确性和稳定性。
- 第一个参数`self.op` : 前向Op。 - 第一个参数`self.op` : 前向Op。
- 第二个参数`self.inputs` : 输入词典,词典的Key和`ProtoMaker`定义保持一致。 - 第二个参数`self.inputs` : 输入词典,词典的Key和`ProtoMaker`定义保持一致。
- 第三个参数`["X", "Y"]` : 指定对输入变量`X``Y`做梯度检测。 - 第三个参数`["X", "Y"]` : 指定对输入变量`X`、`Y`做梯度检测。
- 第四个参数`"Out"` : 指定前向网络最终的输出目标变量`Out` - 第四个参数`"Out"` : 指定前向网络最终的输出目标变量`Out`
- `test_ignore_x``test_ignore_y`分支测试只需要计算一个输入梯度的情况。 - `test_ignore_x`和`test_ignore_y`分支用来测试只需要计算一个输入梯度的情况。
### 编译和执行 ### 编译和执行单元测试
单测完成之后,在[`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt)里添加编译: `python/paddle/v2/framework/tests` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译。
``` 请注意,**不同于Op的编译测试,运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`, 即`cmake paddle_dir -DWITH_TESTING=ON`。编译成功后,执行下面的命令来运行单元测试:
py_test(test_mul_op SRCS test_mul_op.py)
```
编译时需要打开`WITH_TESTING`, 即 `cmake paddle_dir -DWITH_TESTING=ON`,编译成功之后执行单测命令为: ```bash
```
make test ARGS="-R test_mul_op -V" make test ARGS="-R test_mul_op -V"
``` ```
或者: 或者:
``` ```bash
ctest -R test_mul_op ctest -R test_mul_op
``` ```
...@@ -5,15 +5,13 @@ ...@@ -5,15 +5,13 @@
PaddlePaddle的文档包括英文文档 ``doc`` 和中文文档 ``doc_cn`` 两个部分。文档都是通过 `cmake`_ 驱动 `sphinx`_ 编译生成,生成后的文档分别存储在编译目录的 ``doc`` 和 ``doc_cn`` 两个子目录下。 PaddlePaddle的文档包括英文文档 ``doc`` 和中文文档 ``doc_cn`` 两个部分。文档都是通过 `cmake`_ 驱动 `sphinx`_ 编译生成,生成后的文档分别存储在编译目录的 ``doc`` 和 ``doc_cn`` 两个子目录下。
如何构建PaddlePaddle的文档 如何构建文档
========================== ============
PaddlePaddle的文档构建有直接构建和基于Docker构建两种方式,我们提供了一个构建脚本build_docs.sh来进行构建。 PaddlePaddle的文档构建有两种方式。
PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使用基于Docker来构建PaddlePaddle的文档。
使用Docker构建
使用Docker构建PaddlePaddle的文档 --------------
--------------------------------
使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 <https://docs.docker.com/>`_ 。安装好Docker之后可以使用源码目录下的脚本构建文档,即 使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 <https://docs.docker.com/>`_ 。安装好Docker之后可以使用源码目录下的脚本构建文档,即
...@@ -21,58 +19,46 @@ PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使 ...@@ -21,58 +19,46 @@ PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使
cd TO_YOUR_PADDLE_CLONE_PATH cd TO_YOUR_PADDLE_CLONE_PATH
cd paddle/scripts/tools/build_docs cd paddle/scripts/tools/build_docs
bash build_docs.sh with_docker sh build_docs.sh
编译完成后,会在当前目录生成两个子目录\:
* doc 英文文档目录
* doc_cn 中文文档目录
编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。
打开浏览器访问对应目录下的index.html即可访问本地文档。 打开浏览器访问对应目录下的index.html即可访问本地文档。
直接构建
--------
直接构建PaddlePaddle的文档
--------------------------
因为PaddlePaddle的v2 api文档生成过程依赖于py_paddle Python包,用户需要首先确认py_paddle包已经安装。
.. code-block:: bash
python -c "import py_paddle"
如果提示错误,那么用户需要在本地编译安装PaddlePaddle,请参考 `源码编译文档 <http://doc.paddlepaddle.org/develop/doc/getstarted/build_and_install/build_from_source_en.html>`_ 。
注意,用户在首次编译安装PaddlePaddle时,请将WITH_DOC选项关闭。在编译安装正确之后,请再次确认py_paddle包已经安装,即可进行下一步操作。
如果提示正确,可以执行以下命令编译生成文档,即 如果提示正确,可以执行以下命令编译生成文档,即
.. code-block:: bash .. code-block:: bash
cd TO_YOUR_PADDLE_CLONE_PATH cd TO_YOUR_PADDLE_CLONE_PATH
cd paddle/scripts/tools/build_docs mkdir -p build
bash build_docs.sh local cd build
cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_DOC=ON
编译完成之后,会在当前目录生成两个子目录\: make gen_proto_py
make paddle_docs paddle_docs_cn
* doc 英文文档目录
* doc_cn 中文文档目录
编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。
打开浏览器访问对应目录下的index.html即可访问本地文档。 打开浏览器访问对应目录下的index.html即可访问本地文档。
如何书写PaddlePaddle的文档 如何书写文档
========================== ============
PaddlePaddle文档使用 `sphinx`_ 自动生成,用户可以参考sphinx教程进行书写。 PaddlePaddle文档使用 `sphinx`_ 自动生成,用户可以参考sphinx教程进行书写。
如何更新www.paddlepaddle.org文档 如何更新文档主题
================================ ================
PaddlePaddle文档主题在 `TO_YOUR_PADDLE_CLONE_PATH/doc_theme` 文件夹下,包含所有和前端网页设计相关的文件。
开发者给PaddlePaddle代码增加的注释以PR的形式提交到github中,提交方式可参见 `贡献文档 <http://doc.paddlepaddle.org/develop/doc_cn/howto/dev/contribute_to_paddle_cn.html>`_ 。 如何更新doc.paddlepaddle.org
============================
更新的文档以PR的形式提交到github中,提交方式参见 `贡献文档 <http://doc.paddlepaddle.org/develop/doc_cn/howto/dev/contribute_to_paddle_cn.html>`_ 。
目前PaddlePaddle的develop分支的文档是自动触发更新的,用户可以分别查看最新的 `中文文档 <http://doc.paddlepaddle.org/develop/doc_cn/>`_ 和 目前PaddlePaddle的develop分支的文档是自动触发更新的,用户可以分别查看最新的 `中文文档 <http://doc.paddlepaddle.org/develop/doc_cn/>`_ 和
`英文文档 <http://doc.paddlepaddle.org/develop/doc/>`_ 。 `英文文档 <http://doc.paddlepaddle.org/develop/doc/>`_ 。
.. _cmake: https://cmake.org/ .. _cmake: https://cmake.org/
.. _sphinx: http://www.sphinx-doc.org/en/1.4.8/ .. _sphinx: http://www.sphinx-doc.org/en/1.4.8/
...@@ -18,14 +18,6 @@ limitations under the License. */ ...@@ -18,14 +18,6 @@ limitations under the License. */
#ifndef __NVCC__ #ifndef __NVCC__
#include "paddle/math/MathFunctions.h"
#ifndef PADDLE_TYPE_DOUBLE
#define CBLAS_GEMM paddle::gemm<float>
#else
#define CBLAS_GEMM paddle::gemm<double>
#endif
template<class OpResetOutput> template<class OpResetOutput>
void hl_naive_gru_forward_reset_output(OpResetOutput opResetOutput, void hl_naive_gru_forward_reset_output(OpResetOutput opResetOutput,
real *gateValue, real *gateValue,
...@@ -210,51 +202,6 @@ inline void forward_final_output(OpFinalOutput opFinalOutput, ...@@ -210,51 +202,6 @@ inline void forward_final_output(OpFinalOutput opFinalOutput,
} }
} }
template<class OpResetOutput, class OpFinalOutput>
void hl_cpu_gru_forward(OpResetOutput opResetOutput,
OpFinalOutput opFinalOutput,
hl_gru_value value,
int frameSize,
int batchSize,
hl_activation_mode_t active_node,
hl_activation_mode_t active_gate) {
if (value.prevOutValue) {
CBLAS_GEMM(CblasNoTrans,
CblasNoTrans,
batchSize,
2 * frameSize,
frameSize,
1,
value.prevOutValue,
frameSize,
value.gateWeight,
frameSize * 2,
1,
value.gateValue,
frameSize * 3);
}
forward_reset_output(opResetOutput, value, frameSize, batchSize, active_gate);
if (value.prevOutValue) {
CBLAS_GEMM(CblasNoTrans,
CblasNoTrans,
batchSize,
frameSize,
frameSize,
1,
value.resetOutputValue,
frameSize,
value.stateWeight,
frameSize,
1,
value.gateValue + frameSize * 2,
frameSize * 3);
}
forward_final_output(opFinalOutput, value, frameSize, batchSize, active_node);
}
template<class OpStateGrad> template<class OpStateGrad>
void hl_naive_gru_backward_state_grad(OpStateGrad opStateGrad, void hl_naive_gru_backward_state_grad(OpStateGrad opStateGrad,
real *gateValue, real *gateValue,
...@@ -525,86 +472,6 @@ inline void backward_reset_grad(OpResetGrad opResetGrad, ...@@ -525,86 +472,6 @@ inline void backward_reset_grad(OpResetGrad opResetGrad,
} }
} }
template<class OpStateGrad, class OpResetGrad>
void hl_cpu_gru_backward(OpStateGrad opStateGrad,
OpResetGrad opResetGrad,
hl_gru_value value,
hl_gru_grad grad,
int frameSize,
int batchSize,
hl_activation_mode_t active_node,
hl_activation_mode_t active_gate) {
backward_state_grad(opStateGrad, value, grad,
frameSize, batchSize, active_node);
if (value.prevOutValue && grad.prevOutGrad) {
CBLAS_GEMM(CblasNoTrans,
CblasTrans,
batchSize,
frameSize,
frameSize,
1,
grad.gateGrad + frameSize * 2,
frameSize * 3,
value.stateWeight,
frameSize,
0,
grad.resetOutputGrad,
frameSize);
if (grad.stateWeightGrad) {
CBLAS_GEMM(CblasTrans,
CblasNoTrans,
frameSize,
frameSize,
batchSize,
1,
value.resetOutputValue,
frameSize,
grad.gateGrad + frameSize * 2,
frameSize * 3,
1,
grad.stateWeightGrad,
frameSize);
}
}
backward_reset_grad(opResetGrad, value, grad,
frameSize, batchSize, active_gate);
if (grad.prevOutGrad && value.prevOutValue) {
CBLAS_GEMM(CblasNoTrans,
CblasTrans,
batchSize,
frameSize,
frameSize * 2,
1,
grad.gateGrad,
frameSize * 3,
value.gateWeight,
frameSize * 2,
1,
grad.prevOutGrad,
frameSize);
if (grad.gateWeightGrad) {
CBLAS_GEMM(CblasTrans,
CblasNoTrans,
frameSize,
frameSize * 2,
batchSize,
1,
value.prevOutValue,
frameSize,
grad.gateGrad,
frameSize * 3,
1,
grad.gateWeightGrad,
frameSize * 2);
}
}
}
#endif #endif
#endif // HL_CPU_GRU_CUH_ #endif // HL_CPU_GRU_CUH_
...@@ -9,6 +9,7 @@ cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) ...@@ -9,6 +9,7 @@ cc_test(eigen_test SRCS eigen_test.cc DEPS tensor)
cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor)
cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor)
nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor)
cc_test(variable_test SRCS variable_test.cc) cc_test(variable_test SRCS variable_test.cc)
......
...@@ -41,11 +41,23 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc); ...@@ -41,11 +41,23 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc);
// check whether a value(attribute) fit a certain limit // check whether a value(attribute) fit a certain limit
template <typename T> template <typename T>
class LargerThanChecker { class GreaterThanChecker {
public: public:
explicit LargerThanChecker(T lower_bound) : lower_bound_(lower_bound) {} explicit GreaterThanChecker(T lower_bound) : lower_bound_(lower_bound) {}
void operator()(T& value) const { void operator()(T& value) const {
PADDLE_ENFORCE(value > lower_bound_, "larger_than check fail"); PADDLE_ENFORCE(value > lower_bound_, "larger_than check fails.");
}
private:
T lower_bound_;
};
template <typename T>
class EqualGreaterThanChecker {
public:
explicit EqualGreaterThanChecker(T lower_bound) : lower_bound_(lower_bound) {}
void operator()(T& value) const {
PADDLE_ENFORCE_GE(value, lower_bound_, "equal_larger_than check fails.");
} }
private: private:
...@@ -110,8 +122,13 @@ class TypedAttrChecker { ...@@ -110,8 +122,13 @@ class TypedAttrChecker {
return *this; return *this;
} }
TypedAttrChecker& LargerThan(const T& lower_bound) { TypedAttrChecker& GreaterThan(const T& lower_bound) {
value_checkers_.push_back(LargerThanChecker<T>(lower_bound)); value_checkers_.push_back(GreaterThanChecker<T>(lower_bound));
return *this;
}
TypedAttrChecker& EqualGreaterThan(const T& lower_bound) {
value_checkers_.push_back(EqualGreaterThanChecker<T>(lower_bound));
return *this; return *this;
} }
......
...@@ -2,20 +2,31 @@ ...@@ -2,20 +2,31 @@
## Motivation ## Motivation
In Neural Network, the backpropagation algorithm follows the chain rule, so we need to compound the fundmental gradient operators/expressions together with chain rule . Every forward network need a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need a module chains the gradient operators/expressions together with to construct the backward pass. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass.
## Backward Operator Registry
A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs and output gradients and then calculate its input gradients. ## Implementation
In this design doc, we exported only one API for generating the backward pass.
```c++
std::unique_ptr<OperatorBase> Backward(const OperatorBase& forwardOp,
const std::unordered_set<std::string>& no_grad_vars);
```
The implementation behind it can be divided into two parts, **Backward Operator Creating** and **Backward Operator Building**.
### Backward Operator Registry
A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs, and output gradients and then calculate its input gradients.
| | forward operator | backward operator | | forward operator | backward operator
| ---------------------- | ---------------- |------------------------- | | ---------------------- | ---------------- |------------------------- |
| **Operator::inputs_** | Inputs | Inputs, Outputs, OutputGradients | | **Operator::inputs_** | Inputs | Inputs, Outputs, OutputGradients |
| **Operator::outputs_** | Outputs | InputGradients | | **Operator::outputs_** | Outputs | InputGradients |
In most cases, there is a one-to-one correspondence between forward and backward operators. These correspondences are recorded by a global hash map(`OpInfoMap`). To follow the philosophy of minimum core and make operators pluggable, the registry mechanism is introduced. In most cases, there is a one-to-one correspondence between the forward and backward operators. These correspondences are recorded by a global hash map(`OpInfoMap`). To follow the philosophy of minimum core and make operators pluggable, the registry mechanism is introduced.
For example, we have got a `mul_op`, and we can register it's information and corresponding backward operator by the following macro: For example, we have got a `mul_op`, and we can register its information and corresponding backward operator by the following macro:
```cpp ```cpp
REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad); REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad);
...@@ -25,58 +36,65 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad); ...@@ -25,58 +36,65 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad);
`mul_grad` is the type of backward operator, and `MulOpGrad` is its class name. `mul_grad` is the type of backward operator, and `MulOpGrad` is its class name.
## Backward Opeartor Creating ### Backward Opeartor Creating
Given a certain forward operator, we can get its corresponding backward opeartor by calling: Given a certain forward operator, we can get its corresponding backward operator by calling:
```cpp ```cpp
OperatorBase* bwd_op = BuildGradOp(const OperatorBase* fwd_op); OperatorBase* bwd_op = BuildGradOp(const OperatorBase* fwd_op);
``` ```
The function `BuildGradOp` will sequentially execute following processes: The function `BuildGradOp` will sequentially execute following processes:
1. Get the `type_` of given forward operator, and then get the corresponding backward operator's type by looking up the `OpInfoMap`. 1. Get the `type_` of given forward operator, and then get the corresponding backward operator's type by looking up the `OpInfoMap`.
2. Build two maps named `inputs` and `outputs` to temporary storage backward operator's inputs and outputs. Copy forward operator's `inputs_` and `outputs_` to map `inputs`, except these are not necessary for gradient computing. 2. Build two maps named `inputs` and `outputs` to temporary storage backward operator's inputs and outputs. Copy forward operator's `inputs_` and `outputs_` to map `inputs`, except these, are not necessary for gradient computing.
3. Add forward inputs' gradient variables into map `output`, adding forward outputs' gradient variables into map `input`. 3. Add forward inputs' gradient variables into map `output`, adding forward outputs' gradient variables into map `input`.
4. Building backward operator with `inputs`, `outputs` and forward operator's attributes. 4. Building backward operator with `inputs`, `outputs` and forward operator's attributes.
## Backward Network Building ### Backward Network Building
A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and put them together. A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and append them together one by one. There is some corner case need to process specially.
In our design, the network itself is also a kind of operator. So the operators contained by a big network may be some small network.
given a forward network, it generates the backward network. We only care about the Gradients—`OutputGradients`,`InputGradients`.
1. Op 1. Op
when the input forward network is a Op, return its gradient Operator Immediately. When the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`.
2. NetOp 2. NetOp
when the input forward network is a NetOp, it need to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to forward NetOp. In our design, the network itself is also a kind of operator(**NetOp**). So the operators contained by a big network may be some small network. When the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp.
3. RnnOp
RnnOp is a nested stepnet operator. Backward module need to recusively call `Backward` for every stepnet.
4. Sharing Variables
**sharing variables**. As illustrated in the pictures, two operator's share the same variable name of W@GRAD, which will overwrite their sharing input variable.
<p align="center">
<img src="./images/duplicate_op.png" width="50%" ><br/>
**shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwirte their shared input variable. ​ pic 1. Sharing variables in operators.
<p align="center"> </p>
<img src="./images/duplicate_op.png" width="70%" ><br/>
1. shared variable in two operators. ​ Sharing variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator to replace the overwrite links.
</p> <p align="center">
<img src="images/duplicate_op2.png" width="40%" ><br/>
Share variable between operators or same input variable used in multiple operators lead to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively, and add a generic add operator replace the overwirte links. ​ pic 2. Replace sharing variable's gradient with `Add` operator.
<p align="center"> </p>
<img src="images/duplicate_op2.png" width="90%" ><br/>
2. replace shared variable gradient with `Add` Operator ​ Because our framework finds variables accord to their names, we need to rename the output links. We add a suffix of number to represent its position in clockwise.
</p> 5. Part of Gradient is Zero.
In the whole graph, there is some case of that one operator's gradient is not needed, but its input's gradient is a dependency link of other operator, we need to fill a same shape gradient matrix in the position. In our implement, we insert a special `fillZeroLike` operator.
​ Then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it. Follow these rules above, then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it.
...@@ -283,5 +283,14 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) { ...@@ -283,5 +283,14 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) {
DDim::DDim(std::initializer_list<int64_t> init_list) { DDim::DDim(std::initializer_list<int64_t> init_list) {
*this = make_ddim(init_list); *this = make_ddim(init_list);
} }
DDim flatten_to_2d(const DDim& src, int num_col_dims) {
int rank = src.size();
return make_ddim({product(slice_ddim(src, 0, num_col_dims)),
product(slice_ddim(src, num_col_dims, rank))});
}
DDim flatten_to_1d(const DDim& src) { return make_ddim({product(src)}); }
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -115,6 +115,12 @@ int arity(const DDim& ddim); ...@@ -115,6 +115,12 @@ int arity(const DDim& ddim);
std::ostream& operator<<(std::ostream&, const DDim&); std::ostream& operator<<(std::ostream&, const DDim&);
// Reshape a tensor to a matrix. The matrix's first dimension(column length)
// will be the product of tensor's first `num_col_dims` dimensions.
DDim flatten_to_2d(const DDim& src, int num_col_dims);
DDim flatten_to_1d(const DDim& src);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
......
...@@ -63,20 +63,35 @@ struct EigenTensor { ...@@ -63,20 +63,35 @@ struct EigenTensor {
template <typename T, int MajorType = Eigen::RowMajor, template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex> typename IndexType = Eigen::DenseIndex>
struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> {}; struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> {
static typename EigenMatrix::Type Reshape(Tensor& tensor, int num_col_dims) {
int rank = tensor.dims_.size();
PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
"`num_col_dims` must be between (0, rank_of_tensor).");
return EigenMatrix::From(tensor,
flatten_to_2d(tensor.dims(), num_col_dims));
}
static typename EigenMatrix::ConstType Reshape(const Tensor& tensor,
int num_col_dims) {
int rank = tensor.dims_.size();
PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
"`num_col_dims` must be between (0, rank_of_tensor).");
return EigenMatrix::From(tensor,
flatten_to_2d(tensor.dims(), num_col_dims));
}
};
template <typename T, int MajorType = Eigen::RowMajor, template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex> typename IndexType = Eigen::DenseIndex>
struct EigenVector : public EigenTensor<T, 1, MajorType, IndexType> { struct EigenVector : public EigenTensor<T, 1, MajorType, IndexType> {
// Flatten reshapes a Tensor into an EigenVector. // Flatten reshapes a Tensor into an EigenVector.
static typename EigenVector::Type Flatten(Tensor& tensor) { static typename EigenVector::Type Flatten(Tensor& tensor) {
return EigenVector::From( return EigenVector::From(tensor, {product(tensor.dims_)});
tensor, make_ddim({static_cast<int>(product(tensor.dims_))}));
} }
static typename EigenVector::ConstType Flatten(const Tensor& tensor) { static typename EigenVector::ConstType Flatten(const Tensor& tensor) {
return EigenVector::From( return EigenVector::From(tensor, {product(tensor.dims_)});
tensor, make_ddim({static_cast<int>(product(tensor.dims_))}));
} }
}; };
......
...@@ -108,5 +108,24 @@ TEST(Eigen, Matrix) { ...@@ -108,5 +108,24 @@ TEST(Eigen, Matrix) {
} }
} }
TEST(Eigen, MatrixReshape) {
Tensor t;
float* p = t.mutable_data<float>({2, 3, 6, 4}, platform::CPUPlace());
for (int i = 0; i < 2 * 3 * 6 * 4; ++i) {
p[i] = static_cast<float>(i);
}
EigenMatrix<float>::Type em = EigenMatrix<float>::Reshape(t, 2);
ASSERT_EQ(2 * 3, em.dimension(0));
ASSERT_EQ(6 * 4, em.dimension(1));
for (int i = 0; i < 2 * 3; i++) {
for (int j = 0; j < 6 * 4; j++) {
ASSERT_NEAR(i * 6 * 4 + j, em(i, j), 1e-6f);
}
}
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -87,3 +87,24 @@ message OpProto { ...@@ -87,3 +87,24 @@ message OpProto {
repeated Attr attrs = 4; repeated Attr attrs = 4;
required string comment = 5; required string comment = 5;
} }
enum DataType {
BOOL = 0;
INT16 = 1;
INT32 = 2;
INT64 = 3;
FP16 = 4;
FP32 = 5;
FP64 = 6;
}
message LoDTensorDesc {
required DataType data_type = 1;
repeated int32 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480]
optional int32 lod_level = 3 [ default = 0 ];
}
message VarDesc {
required string name = 1;
optional LoDTensorDesc lod_tensor = 2;
}
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h" #include "paddle/framework/operator.h"
USE_OP(add_two); USE_OP(add);
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -41,7 +41,7 @@ namespace f = paddle::framework; ...@@ -41,7 +41,7 @@ namespace f = paddle::framework;
TEST(GradOpBuilder, AddTwo) { TEST(GradOpBuilder, AddTwo) {
std::shared_ptr<f::OperatorBase> add_op(f::OpRegistry::CreateOp( std::shared_ptr<f::OperatorBase> add_op(f::OpRegistry::CreateOp(
"add_two", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); "add", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {}));
std::shared_ptr<f::OperatorBase> grad_add_op = std::shared_ptr<f::OperatorBase> grad_add_op =
f::OpRegistry::CreateGradOp(*add_op); f::OpRegistry::CreateGradOp(*add_op);
EXPECT_EQ(grad_add_op->Inputs().size(), 4UL); EXPECT_EQ(grad_add_op->Inputs().size(), 4UL);
......
...@@ -19,8 +19,8 @@ ...@@ -19,8 +19,8 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end) { LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end) {
LOD new_lod; LoD new_lod;
new_lod.reserve(level_end - level_begin); new_lod.reserve(level_end - level_begin);
for (size_t i = level_begin; i < level_end; i++) { for (size_t i = level_begin; i < level_end; i++) {
new_lod.emplace_back(in.at(i)); new_lod.emplace_back(in.at(i));
...@@ -28,10 +28,10 @@ LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end) { ...@@ -28,10 +28,10 @@ LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end) {
return new_lod; return new_lod;
} }
LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin, LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin,
size_t elem_end) { size_t elem_end) {
// slice the lod. // slice the lod.
LOD new_lod; LoD new_lod;
new_lod.reserve(in.size() - level); new_lod.reserve(in.size() - level);
auto start = in.at(level)[elem_begin]; auto start = in.at(level)[elem_begin];
auto end = in.at(level)[elem_end]; auto end = in.at(level)[elem_end];
...@@ -46,13 +46,13 @@ LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin, ...@@ -46,13 +46,13 @@ LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin,
std::transform(new_lod.back().begin(), new_lod.back().end(), std::transform(new_lod.back().begin(), new_lod.back().end(),
new_lod.back().begin(), new_lod.back().begin(),
[start](int v) { return v - start; }); [start](int v) { return v - start; });
PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LOD"); PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LoD");
} }
PADDLE_ENFORCE_LE(new_lod.size(), in.size()); PADDLE_ENFORCE_LE(new_lod.size(), in.size());
return new_lod; return new_lod;
} }
bool operator==(const LOD& a, const LOD& b) { bool operator==(const LoD& a, const LoD& b) {
if (a.size() != b.size()) { if (a.size() != b.size()) {
return false; return false;
} }
...@@ -72,12 +72,12 @@ bool operator==(const LOD& a, const LOD& b) { ...@@ -72,12 +72,12 @@ bool operator==(const LOD& a, const LOD& b) {
return true; return true;
} }
void LODTensor::SliceLevels(size_t level_begin, size_t level_end) { void LoDTensor::SliceLevels(size_t level_begin, size_t level_end) {
auto new_lod = framework::SliceLevels(lod_, level_begin, level_end); auto new_lod = framework::SliceLevels(lod_, level_begin, level_end);
lod_ = new_lod; lod_ = new_lod;
} }
void LODTensor::SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) { void LoDTensor::SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels()); NumLevels());
PADDLE_ENFORCE(elem_begin < NumElements(level), PADDLE_ENFORCE(elem_begin < NumElements(level),
......
...@@ -18,8 +18,10 @@ ...@@ -18,8 +18,10 @@
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
#include <thrust/device_vector.h> #include <thrust/device_vector.h>
#include <thrust/host_vector.h> #include <thrust/host_vector.h>
#include <thrust/system/cuda/experimental/pinned_allocator.h>
#endif #endif
#include <glog/logging.h>
#include "paddle/framework/ddim.h" #include "paddle/framework/ddim.h"
#include "paddle/framework/tensor.h" #include "paddle/framework/tensor.h"
#include "paddle/platform/enforce.h" #include "paddle/platform/enforce.h"
...@@ -32,37 +34,38 @@ template <typename T> ...@@ -32,37 +34,38 @@ template <typename T>
using Vector = std::vector<T>; using Vector = std::vector<T>;
#else #else
template <typename T> template <typename T>
using Vector = thrust::host_vector<T>; using Vector = thrust::host_vector<
T, thrust::system::cuda::experimental::pinned_allocator<T>>;
#endif #endif
using LOD = std::vector<Vector<size_t>>; using LoD = std::vector<Vector<size_t>>;
LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end); LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end);
LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin, LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin,
size_t elem_end); size_t elem_end);
bool operator==(const LOD& a, const LOD& b); bool operator==(const LoD& a, const LoD& b);
/* /*
* LODTensor (Level of details Tensor) * LoDTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference. * see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/ */
class LODTensor { class LoDTensor {
public: public:
LODTensor() {} LoDTensor() {}
LODTensor(const LOD& lod, Tensor* t) : lod_(lod), tensor_(t) {} LoDTensor(const LoD& lod, Tensor* t) : lod_(lod), tensor_(t) {}
void set_lod(const LOD& lod) { lod_ = lod; } void set_lod(const LoD& lod) { lod_ = lod; }
void set_tensor(Tensor* tensor) { tensor_ = tensor; } void set_tensor(Tensor* tensor) { tensor_ = tensor; }
Tensor& tensor() { return *tensor_; } Tensor& tensor() { return *tensor_; }
LOD lod() { return lod_; } LoD lod() { return lod_; }
/* /*
* Get a element from LOD. * Get a element from LoD.
*/ */
size_t lod_element(size_t level, size_t elem) const { size_t lod_element(size_t level, size_t elem) const {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
...@@ -74,7 +77,7 @@ class LODTensor { ...@@ -74,7 +77,7 @@ class LODTensor {
} }
/* /*
* Number of LODTensor's levels, each level has units of data, for example, * Number of LoDTensor's levels, each level has units of data, for example,
* in the sentence's view, article, paragraph, sentence are 3 levels. * in the sentence's view, article, paragraph, sentence are 3 levels.
*/ */
size_t NumLevels() const { return lod_.size(); } size_t NumLevels() const { return lod_.size(); }
...@@ -100,7 +103,7 @@ class LODTensor { ...@@ -100,7 +103,7 @@ class LODTensor {
void SliceInLevel(size_t level, size_t elem_begin, size_t elem_end); void SliceInLevel(size_t level, size_t elem_begin, size_t elem_end);
private: private:
LOD lod_; LoD lod_;
Tensor* tensor_; // not owned Tensor* tensor_; // not owned
}; };
} // namespace framework } // namespace framework
......
...@@ -94,7 +94,7 @@ Let's go on slicing this slice. Its <1,1>-slice is ...@@ -94,7 +94,7 @@ Let's go on slicing this slice. Its <1,1>-slice is
||| |||
``` ```
### The General Slicing Algorithm ### The Slicing Algorithm
The algorithm, with over-simplified data structure, is defined as The algorithm, with over-simplified data structure, is defined as
...@@ -106,17 +106,41 @@ struct LoDTensor { ...@@ -106,17 +106,41 @@ struct LoDTensor {
float* tensor_; float* tensor_;
}; };
LoDTensor Slice(const LoDTensor& lodt, int level, int sequence) { LoDTensor Slice(const LoDTensor& lodt, int level, int sequence);
```
Let us revisit the example above
} ```
3
3 1 2
3 2 4 1 2 3
||| || |||| | || |||
``` ```
### Slicing the Top Level Suppose that we want to retrieve the <1,2>-slice
Please be aware that an RNN operator only slices the top level of a LoD Tensor to get the step inputs. ```
2
2 3
|| |||
```
```c++ we will need to find out the starting position of this slice by summing over all leaf nodes in `LoD` to the left of the slice, i.e., 3 + 2 + 4 + 1 = 10.
LoDTensor Slice(const LoDTensor& lodt, int sequence) {
To avoid the traversal of the LoD tree at slcing time, we can do it at the construction time -- instead of saving the lengths of the next level in the LoD tree, we can save the starting offset of the next level. For example, above LoD Tensor can be transformed into
```
0
0 9 10
0 3 5 9 10 12
||| || |||| | || |||
```
We don't really need the 0 on top, so the LoD Tensor could be
} ```
0 9 10
0 3 5 9 10 12
||| || |||| | || |||
``` ```
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
class LODTensorTester : public ::testing::Test { class LoDTensorTester : public ::testing::Test {
public: public:
virtual void SetUp() override { virtual void SetUp() override {
// tensor's batch_size: 30 // tensor's batch_size: 30
...@@ -29,7 +29,7 @@ class LODTensorTester : public ::testing::Test { ...@@ -29,7 +29,7 @@ class LODTensorTester : public ::testing::Test {
// 0 10 20 // 0 10 20
// 0 5 10 15 20 // 0 5 10 15 20
// 0 2 5 7 10 12 15 20 // 0 2 5 7 10 12 15 20
LOD lod; LoD lod;
lod.push_back(std::vector<size_t>{0, 10, 20}); lod.push_back(std::vector<size_t>{0, 10, 20});
lod.push_back(std::vector<size_t>{0, 5, 10, 15, 20}); lod.push_back(std::vector<size_t>{0, 5, 10, 15, 20});
lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20}); lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20});
...@@ -47,21 +47,21 @@ class LODTensorTester : public ::testing::Test { ...@@ -47,21 +47,21 @@ class LODTensorTester : public ::testing::Test {
protected: protected:
platform::CPUPlace place; platform::CPUPlace place;
Tensor tensor; Tensor tensor;
LODTensor lod_tensor; LoDTensor lod_tensor;
}; };
TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); } TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); }
TEST_F(LODTensorTester, NumElements) { TEST_F(LoDTensorTester, NumElements) {
ASSERT_EQ(lod_tensor.NumElements(0), 2UL); ASSERT_EQ(lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(lod_tensor.NumElements(1), 4UL); ASSERT_EQ(lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(lod_tensor.NumElements(2), 8UL); ASSERT_EQ(lod_tensor.NumElements(2), 8UL);
} }
TEST_F(LODTensorTester, SliceLevels) { TEST_F(LoDTensorTester, SliceLevels) {
// slice 1 level // slice 1 level
for (size_t level = 0; level < 3UL; ++level) { for (size_t level = 0; level < 3UL; ++level) {
LODTensor new_lod_tensor = lod_tensor; LoDTensor new_lod_tensor = lod_tensor;
new_lod_tensor.SliceLevels(level, level + 1); new_lod_tensor.SliceLevels(level, level + 1);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
...@@ -70,7 +70,7 @@ TEST_F(LODTensorTester, SliceLevels) { ...@@ -70,7 +70,7 @@ TEST_F(LODTensorTester, SliceLevels) {
} }
// slice 2 level // slice 2 level
for (size_t level = 0; level < 2UL; ++level) { for (size_t level = 0; level < 2UL; ++level) {
LODTensor new_lod_tensor = lod_tensor; LoDTensor new_lod_tensor = lod_tensor;
new_lod_tensor.SliceLevels(level, level + 2); new_lod_tensor.SliceLevels(level, level + 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
...@@ -80,9 +80,9 @@ TEST_F(LODTensorTester, SliceLevels) { ...@@ -80,9 +80,9 @@ TEST_F(LODTensorTester, SliceLevels) {
} }
} }
TEST_F(LODTensorTester, SliceInLevel) { TEST_F(LoDTensorTester, SliceInLevel) {
size_t level = 0; size_t level = 0;
LODTensor new_lod_tensor = lod_tensor; LoDTensor new_lod_tensor = lod_tensor;
new_lod_tensor.SliceInLevel(level, 0, 2); new_lod_tensor.SliceInLevel(level, 0, 2);
EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL); EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL);
EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL); EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL);
......
/*
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "paddle/framework/lod_tensor.h"
#include "paddle/platform/assert.h"
#include <gtest/gtest.h>
__global__ void test(size_t* a, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
a[i] *= 2;
}
}
TEST(LoDTensor, LoDInGPU) {
paddle::framework::Tensor tensor;
paddle::framework::LoDTensor lod_tensor;
paddle::platform::GPUPlace place(0);
paddle::framework::LoD src_lod;
src_lod.push_back(std::vector<size_t>{0, 2, 4, 6, 8, 10, 12, 14});
tensor.Resize({14, 16});
tensor.mutable_data<float>(place);
lod_tensor.set_lod(src_lod);
lod_tensor.set_tensor(&tensor);
CHECK_EQ(lod_tensor.lod_element(0, 2), 4);
CHECK_EQ(lod_tensor.lod_element(0, 4), 8);
auto lod = lod_tensor.lod();
test<<<1, 8>>>(lod[0].data(), lod[0].size());
cudaDeviceSynchronize();
for (size_t i = 0; i < src_lod[0].size(); ++i) {
CHECK_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2);
}
}
...@@ -21,7 +21,7 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -21,7 +21,7 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
AddOutput("output", "output of cosine op"); AddOutput("output", "output of cosine op");
AddAttr<float>("scale", "scale of cosine op") AddAttr<float>("scale", "scale of cosine op")
.SetDefault(1.0) .SetDefault(1.0)
.LargerThan(0.0); .GreaterThan(0.0);
AddComment("This is cos op"); AddComment("This is cos op");
} }
}; };
...@@ -80,7 +80,7 @@ TEST(OpRegistry, CreateOp) { ...@@ -80,7 +80,7 @@ TEST(OpRegistry, CreateOp) {
paddle::framework::Scope scope; paddle::framework::Scope scope;
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
float scale_get = op->GetAttr<float>("scale"); float scale_get = op->Attr<float>("scale");
ASSERT_EQ(scale_get, scale); ASSERT_EQ(scale_get, scale);
} }
...@@ -121,7 +121,7 @@ TEST(OpRegistry, DefaultValue) { ...@@ -121,7 +121,7 @@ TEST(OpRegistry, DefaultValue) {
paddle::framework::Scope scope; paddle::framework::Scope scope;
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
ASSERT_EQ(op->GetAttr<float>("scale"), 1.0); ASSERT_EQ(op->Attr<float>("scale"), 1.0);
} }
TEST(OpRegistry, CustomChecker) { TEST(OpRegistry, CustomChecker) {
...@@ -172,6 +172,6 @@ TEST(OpRegistry, CustomChecker) { ...@@ -172,6 +172,6 @@ TEST(OpRegistry, CustomChecker) {
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
paddle::framework::Scope scope; paddle::framework::Scope scope;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
int test_attr = op->GetAttr<int>("test_attr"); int test_attr = op->Attr<int>("test_attr");
ASSERT_EQ(test_attr, 4); ASSERT_EQ(test_attr, 4);
} }
\ No newline at end of file
...@@ -123,6 +123,15 @@ OperatorBase::OperatorBase(const std::string& type, ...@@ -123,6 +123,15 @@ OperatorBase::OperatorBase(const std::string& type,
CheckAllInputOutputSet(); CheckAllInputOutputSet();
} }
std::vector<std::string> OperatorBase::InputVars() const {
std::vector<std::string> ret_val;
for (auto& o : outputs_) {
ret_val.reserve(ret_val.size() + o.second.size());
ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
}
return ret_val;
}
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const { std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
std::vector<std::string> ret_val; std::vector<std::string> ret_val;
if (has_intermediate) { if (has_intermediate) {
......
...@@ -69,7 +69,7 @@ class OperatorBase { ...@@ -69,7 +69,7 @@ class OperatorBase {
virtual ~OperatorBase() {} virtual ~OperatorBase() {}
template <typename T> template <typename T>
inline const T& GetAttr(const std::string& name) const { inline const T& Attr(const std::string& name) const {
PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap", PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap",
name); name);
return boost::get<T>(attrs_.at(name)); return boost::get<T>(attrs_.at(name));
...@@ -94,11 +94,14 @@ class OperatorBase { ...@@ -94,11 +94,14 @@ class OperatorBase {
const VariableNameMap& Inputs() const { return inputs_; } const VariableNameMap& Inputs() const { return inputs_; }
const VariableNameMap& Outputs() const { return outputs_; } const VariableNameMap& Outputs() const { return outputs_; }
//! Get a input with argument's name described in `op_proto` //! Get a input with argument's name described in `op_proto`
std::string Input(const std::string& name) const; std::string Input(const std::string& name) const;
//! Get a input which has multiple variables. //! Get a input which has multiple variables.
const std::vector<std::string>& Inputs(const std::string& name) const; const std::vector<std::string>& Inputs(const std::string& name) const;
std::vector<std::string> InputVars() const;
//! Get a output with argument's name described in `op_proto` //! Get a output with argument's name described in `op_proto`
std::string Output(const std::string& name) const; std::string Output(const std::string& name) const;
//! Get an output which has multiple variables. //! Get an output which has multiple variables.
...@@ -238,8 +241,8 @@ class InferShapeContext { ...@@ -238,8 +241,8 @@ class InferShapeContext {
const Scope& scope() const { return scope_; } const Scope& scope() const { return scope_; }
template <typename T> template <typename T>
inline const T& GetAttr(const std::string& name) const { inline const T& Attr(const std::string& name) const {
return op_.GetAttr<T>(name); return op_.Attr<T>(name);
} }
size_t InputSize(const std::string& name) const { size_t InputSize(const std::string& name) const {
...@@ -311,9 +314,9 @@ class InferShapeContext { ...@@ -311,9 +314,9 @@ class InferShapeContext {
} }
template <typename T> template <typename T>
std::vector<const T*> MultiOutput(const std::string& name) const { std::vector<T*> MultiOutput(const std::string& name) const {
auto names = op_.Outputs(name); auto names = op_.Outputs(name);
std::vector<const T*> res; std::vector<T*> res;
res.reserve(names.size()); res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res), std::transform(names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) { [&](const std::string& sub_name) {
......
...@@ -102,7 +102,7 @@ class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -102,7 +102,7 @@ class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
AddOutput("y", "output of test op"); AddOutput("y", "output of test op");
AddAttr<float>("scale", "scale of cosine op") AddAttr<float>("scale", "scale of cosine op")
.SetDefault(1.0) .SetDefault(1.0)
.LargerThan(0.0); .GreaterThan(0.0);
AddComment("This is test op"); AddComment("This is test op");
} }
}; };
...@@ -140,7 +140,7 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker ...@@ -140,7 +140,7 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker
AddOutput("ys", "outputs of test op").AsDuplicable(); AddOutput("ys", "outputs of test op").AsDuplicable();
AddAttr<float>("scale", "scale of cosine op") AddAttr<float>("scale", "scale of cosine op")
.SetDefault(1.0) .SetDefault(1.0)
.LargerThan(0.0); .GreaterThan(0.0);
AddComment("This is test op"); AddComment("This is test op");
} }
}; };
......
...@@ -43,6 +43,9 @@ class Tensor { ...@@ -43,6 +43,9 @@ class Tensor {
template <typename T, size_t D, int MajorType, typename IndexType> template <typename T, size_t D, int MajorType, typename IndexType>
friend struct EigenTensor; friend struct EigenTensor;
template <typename T, int MajorType, typename IndexType>
friend struct EigenMatrix;
template <typename T, int MajorType, typename IndexType> template <typename T, int MajorType, typename IndexType>
friend struct EigenVector; friend struct EigenVector;
...@@ -78,6 +81,9 @@ class Tensor { ...@@ -78,6 +81,9 @@ class Tensor {
/*! Return the dimensions of the memory block. */ /*! Return the dimensions of the memory block. */
inline const DDim& dims() const; inline const DDim& dims() const;
/*! Return the numel of the memory block. */
inline int64_t numel() const;
/*! Resize the dimensions of the memory block. */ /*! Resize the dimensions of the memory block. */
inline Tensor& Resize(const DDim& dims); inline Tensor& Resize(const DDim& dims);
...@@ -159,6 +165,12 @@ class Tensor { ...@@ -159,6 +165,12 @@ class Tensor {
/*! points to dimensions of memory block. */ /*! points to dimensions of memory block. */
DDim dims_; DDim dims_;
/**
* A cache of the number of elements in a tensor.
* Would be 0 for an uninitialized tensor.
*/
int64_t numel_;
/** /**
* @brief A PlaceHolder may be shared by more than one tensor. * @brief A PlaceHolder may be shared by more than one tensor.
* *
......
...@@ -24,7 +24,7 @@ inline void Tensor::check_memory_size() const { ...@@ -24,7 +24,7 @@ inline void Tensor::check_memory_size() const {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
holder_, "Tenosr holds no memory. Call Tensor::mutable_data first."); holder_, "Tenosr holds no memory. Call Tensor::mutable_data first.");
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
holder_->size(), product(dims_) * sizeof(T) + offset_, holder_->size(), numel() * sizeof(T) + offset_,
"Tensor's dims_ is out of bound. Call Tensor::mutable_data " "Tensor's dims_ is out of bound. Call Tensor::mutable_data "
"first to re-allocate memory.\n" "first to re-allocate memory.\n"
"or maybe the required data-type mismatches the data already stored."); "or maybe the required data-type mismatches the data already stored.");
...@@ -54,11 +54,11 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { ...@@ -54,11 +54,11 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) {
template <typename T> template <typename T>
inline T* Tensor::mutable_data(platform::Place place) { inline T* Tensor::mutable_data(platform::Place place) {
static_assert(std::is_pod<T>::value, "T must be POD"); static_assert(std::is_pod<T>::value, "T must be POD");
PADDLE_ENFORCE_GT(product(dims_), 0, PADDLE_ENFORCE_GT(numel(), 0,
"Tensor's numel must be larger than zero to call " "Tensor's numel must be larger than zero to call "
"Tensor::mutable_data. Call Tensor::set_dim first."); "Tensor::mutable_data. Call Tensor::set_dim first.");
/* some versions of boost::variant don't have operator!= */ /* some versions of boost::variant don't have operator!= */
int64_t size = product(dims_) * sizeof(T); int64_t size = numel() * sizeof(T);
if (holder_ == nullptr || !(holder_->place() == place) || if (holder_ == nullptr || !(holder_->place() == place) ||
holder_->size() < size + offset_) { holder_->size() < size + offset_) {
if (platform::is_cpu_place(place)) { if (platform::is_cpu_place(place)) {
...@@ -97,7 +97,7 @@ inline void Tensor::CopyFrom(const Tensor& src, ...@@ -97,7 +97,7 @@ inline void Tensor::CopyFrom(const Tensor& src,
auto dst_ptr = static_cast<void*>(mutable_data<T>(dst_place)); auto dst_ptr = static_cast<void*>(mutable_data<T>(dst_place));
auto size = product(src.dims_) * sizeof(T); auto size = src.numel() * sizeof(T);
if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
memory::Copy(boost::get<platform::CPUPlace>(dst_place), dst_ptr, memory::Copy(boost::get<platform::CPUPlace>(dst_place), dst_ptr,
...@@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { ...@@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
PADDLE_ENFORCE_LT(begin_idx, end_idx, PADDLE_ENFORCE_LT(begin_idx, end_idx,
"Begin index must be less than end index."); "Begin index must be less than end index.");
PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1."); PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1.");
size_t base = product(dims_) / dims_[0]; size_t base = numel() / dims_[0];
Tensor dst; Tensor dst;
dst.holder_ = holder_; dst.holder_ = holder_;
DDim dst_dims = dims_; DDim dst_dims = dims_;
...@@ -143,10 +143,21 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { ...@@ -143,10 +143,21 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
inline Tensor& Tensor::Resize(const DDim& dims) { inline Tensor& Tensor::Resize(const DDim& dims) {
dims_ = dims; dims_ = dims;
numel_ = product(dims_);
return *this; return *this;
} }
inline const DDim& Tensor::dims() const { return dims_; } inline const DDim& Tensor::dims() const { return dims_; }
inline int64_t Tensor::numel() const { return numel_; }
template <typename T>
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
Tensor res;
res.ShareDataWith<T>(src);
res.Resize(flatten_to_2d(src.dims(), num_col_dims));
return res;
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -262,3 +262,16 @@ TEST(Tensor, CopyFrom) { ...@@ -262,3 +262,16 @@ TEST(Tensor, CopyFrom) {
} }
#endif #endif
} }
TEST(Tensor, ReshapeToMatrix) {
using namespace paddle::framework;
using namespace paddle::platform;
Tensor src;
int* src_ptr = src.mutable_data<int>({2, 3, 4, 9}, CPUPlace());
for (int i = 0; i < 2 * 3 * 4 * 9; ++i) {
src_ptr[i] = i;
}
Tensor res = ReshapeToMatrix<int>(src, 2);
ASSERT_EQ(res.dims()[0], 2 * 3);
ASSERT_EQ(res.dims()[1], 4 * 9);
}
\ No newline at end of file
...@@ -44,6 +44,7 @@ if(WITH_GPU) ...@@ -44,6 +44,7 @@ if(WITH_GPU)
add_simple_unittest(RowConvOpTest) add_simple_unittest(RowConvOpTest)
add_simple_unittest(BlockExpandOpTest) add_simple_unittest(BlockExpandOpTest)
add_simple_unittest(CropOpTest) add_simple_unittest(CropOpTest)
add_simple_unittest(SwitchOpTest)
endif() endif()
add_simple_unittest(Im2ColTest) add_simple_unittest(Im2ColTest)
......
...@@ -83,9 +83,9 @@ struct EigenBlasGemm { ...@@ -83,9 +83,9 @@ struct EigenBlasGemm {
}; };
#ifdef PADDLE_TYPE_DOUBLE #ifdef PADDLE_TYPE_DOUBLE
template class EigenBlasGemm<double>; template struct EigenBlasGemm<double>;
#else #else
template class EigenBlasGemm<float>; template struct EigenBlasGemm<float>;
#endif #endif
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "GemmFunctor.h"
#include "hl_cpu_gru.cuh"
namespace paddle {
template <DeviceType Device, class T>
struct GruFunctor {
template <class OpResetOutput, class OpFinalOutput>
static void compute(OpResetOutput opResetOutput,
OpFinalOutput opFinalOutput,
hl_gru_value value,
int frameSize,
int batchSize,
hl_activation_mode_t active_node,
hl_activation_mode_t active_gate) {
#ifndef __NVCC__
if (value.prevOutValue) {
BlasGemm<Device, T>::compute(false,
false,
batchSize,
2 * frameSize,
frameSize,
1,
value.prevOutValue,
frameSize,
value.gateWeight,
frameSize * 2,
1,
value.gateValue,
frameSize * 3);
}
forward_reset_output(
opResetOutput, value, frameSize, batchSize, active_gate);
if (value.prevOutValue) {
BlasGemm<Device, T>::compute(false,
false,
batchSize,
frameSize,
frameSize,
1,
value.resetOutputValue,
frameSize,
value.stateWeight,
frameSize,
1,
value.gateValue + frameSize * 2,
frameSize * 3);
}
forward_final_output(
opFinalOutput, value, frameSize, batchSize, active_node);
#endif
}
};
template <DeviceType Device, class T>
struct GruGradFunctor {
template <class OpStateGrad, class OpResetGrad>
static void compute(OpStateGrad opStateGrad,
OpResetGrad opResetGrad,
hl_gru_value value,
hl_gru_grad grad,
int frameSize,
int batchSize,
hl_activation_mode_t active_node,
hl_activation_mode_t active_gate) {
#ifndef __NVCC__
backward_state_grad(
opStateGrad, value, grad, frameSize, batchSize, active_node);
if (value.prevOutValue && grad.prevOutGrad) {
BlasGemm<Device, T>::compute(false,
true,
batchSize,
frameSize,
frameSize,
1,
grad.gateGrad + frameSize * 2,
frameSize * 3,
value.stateWeight,
frameSize,
0,
grad.resetOutputGrad,
frameSize);
if (grad.stateWeightGrad) {
BlasGemm<Device, T>::compute(true,
false,
frameSize,
frameSize,
batchSize,
1,
value.resetOutputValue,
frameSize,
grad.gateGrad + frameSize * 2,
frameSize * 3,
1,
grad.stateWeightGrad,
frameSize);
}
}
backward_reset_grad(
opResetGrad, value, grad, frameSize, batchSize, active_gate);
if (grad.prevOutGrad && value.prevOutValue) {
BlasGemm<Device, T>::compute(false,
true,
batchSize,
frameSize,
frameSize * 2,
1,
grad.gateGrad,
frameSize * 3,
value.gateWeight,
frameSize * 2,
1,
grad.prevOutGrad,
frameSize);
if (grad.gateWeightGrad) {
BlasGemm<Device, T>::compute(true,
false,
frameSize,
frameSize * 2,
batchSize,
1,
value.prevOutValue,
frameSize,
grad.gateGrad,
frameSize * 3,
1,
grad.gateWeightGrad,
frameSize * 2);
}
}
#endif
}
};
} // namespace paddle
...@@ -94,95 +94,4 @@ public: ...@@ -94,95 +94,4 @@ public:
int paddingWidth); int paddingWidth);
}; };
template <class T>
struct Padding {
static void run(const T* src,
T* dest,
int channels,
int inputHeight,
int inputWidth,
int paddingHeight,
int paddingWidth) {
const int destWidth = inputWidth + 2 * paddingWidth;
for (int c = 0; c < channels; c++) {
if (paddingHeight > 0) {
memset(dest, 0, destWidth * paddingHeight * sizeof(T));
dest += destWidth * paddingHeight;
}
for (int i = 0; i < inputHeight; i++) {
// padding head
for (int j = 0; j < paddingWidth; j++) {
*dest++ = T(0);
}
memcpy(dest, src, inputWidth * sizeof(T));
dest += inputWidth;
src += inputWidth;
// padding tail
for (int j = 0; j < paddingWidth; j++) {
*dest++ = T(0);
}
}
if (paddingHeight > 0) {
memset(dest, 0, destWidth * paddingHeight * sizeof(T));
dest += destWidth * paddingHeight;
}
}
}
};
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <>
struct Padding<float> {
static void run(const float* src,
float* dest,
int channels,
int inputHeight,
int inputWidth,
int paddingHeight,
int paddingWidth) {
const int destWidth = inputWidth + 2 * paddingWidth;
for (int c = 0; c < channels; c++) {
if (paddingHeight > 0) {
memset(dest, 0, destWidth * paddingHeight * sizeof(float));
dest += destWidth * paddingHeight;
}
for (int i = 0; i < inputHeight; i++) {
// padding head
for (int j = 0; j < paddingWidth; j++) {
*dest++ = float(0);
}
int step = inputWidth >> 2;
int remain = inputWidth & 3;
for (int s = 0; s < step; s++) {
float32x4_t s0 = vld1q_f32(src);
vst1q_f32(dest, s0);
src += 4;
dest += 4;
}
for (int r = 0; r < remain; r++) {
*dest++ = *src++;
}
// padding tail
for (int j = 0; j < paddingWidth; j++) {
*dest++ = float(0);
}
}
if (paddingHeight > 0) {
memset(dest, 0, destWidth * paddingHeight * sizeof(float));
dest += destWidth * paddingHeight;
}
}
}
};
#endif
} // namespace paddle } // namespace paddle
...@@ -13,18 +13,10 @@ See the License for the specific language governing permissions and ...@@ -13,18 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "MulOp.h" #include "MulOp.h"
/// todo(tianbing), delete it #include "GemmFunctor.h"
#include <iostream>
#include "paddle/math/MathFunctions.h"
#include "paddle/math/SIMDFunctions.h" #include "paddle/math/SIMDFunctions.h"
#include "paddle/utils/ThreadLocal.h" #include "paddle/utils/ThreadLocal.h"
#ifndef PADDLE_TYPE_DOUBLE
#define GEMM paddle::gemm<float>
#else
#define GEMM paddle::gemm<double>
#endif
namespace { namespace {
inline void vecAddTo(real* a, const real* b, real scaleB, size_t len) { inline void vecAddTo(real* a, const real* b, real scaleB, size_t len) {
for (unsigned int i = 0; i < len; ++i) { for (unsigned int i = 0; i < len; ++i) {
...@@ -114,19 +106,20 @@ void MulOp<DEVICE_TYPE_CPU>(CpuMatrix& out, ...@@ -114,19 +106,20 @@ void MulOp<DEVICE_TYPE_CPU>(CpuMatrix& out,
real scaleT, real scaleT,
bool aTrans, bool aTrans,
bool bTrans) { bool bTrans) {
GEMM(aTrans ? CblasTrans : CblasNoTrans, BlasGemm<DEVICE_TYPE_CPU, real>::compute(
bTrans ? CblasTrans : CblasNoTrans, aTrans,
out.getHeight(), bTrans,
out.getWidth(), out.getHeight(),
!aTrans ? a.getWidth() : a.getHeight(), out.getWidth(),
scaleAB, !aTrans ? a.getWidth() : a.getHeight(),
a.getData(), scaleAB,
a.getStride(), a.getData(),
b.getData(), a.getStride(),
b.getStride(), b.getData(),
scaleT, b.getStride(),
out.getData(), scaleT,
out.getStride()); out.getData(),
out.getStride());
} }
/// dense matrix (+)= sparse matrix * dense matrix /// dense matrix (+)= sparse matrix * dense matrix
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "SwitchOp.h"
#include "paddle/math/Vector.h"
namespace paddle {
template <>
void NCHW2NHWC<DEVICE_TYPE_CPU>(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int argType) {
for (int n = 0; n < num; ++n) {
for (int c = 0; c < inC; ++c) {
for (int h = 0; h < inH; ++h) {
for (int w = 0; w < inW; ++w) {
if (argType == ADD_TO) {
outputs[((n * inH + h) * inW + w) * inC + c] += *(inputs++);
} else {
outputs[((n * inH + h) * inW + w) * inC + c] = *(inputs++);
}
}
}
}
}
}
template <>
void NHWC2NCHW<DEVICE_TYPE_CPU>(real* outputs,
const real* inputs,
const int num,
const int inH,
const int inW,
const int inC,
const int argType) {
for (int n = 0; n < num; ++n) {
for (int h = 0; h < inH; ++h) {
for (int w = 0; w < inW; ++w) {
for (int c = 0; c < inC; ++c) {
if (argType == ADD_TO) {
outputs[((n * inC + c) * inH + h) * inW + w] += *(inputs++);
} else {
outputs[((n * inC + c) * inH + h) * inW + w] = *(inputs++);
}
}
}
}
}
}
/**
* \brief Switch dimension order of image input.
* The input and output is a 4D tensor. Switch order
* 'batch_size,channels, height, width' to
* order 'batch_size, height, width, channels'.
*
* Argument in this Function:
* \param inputs input data with order 'batch_size,channels, height, width'.
* \param outputs output data with order 'batch_size, height, width, channels'.
*/
template <DeviceType Device>
class NCHW2NHWCFunc : public FunctionBase {
public:
void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(1UL, inputs.size());
CHECK_EQ(1UL, outputs.size());
size_t num = inputs[0].shape()[0];
size_t inC = inputs[0].shape()[1];
size_t inH = inputs[0].shape()[2];
size_t inW = inputs[0].shape()[3];
NCHW2NHWC<Device>(outputs[0].data<real>(),
inputs[0].data<real>(),
num,
inC,
inH,
inW,
outputs[0].getArgType());
}
};
/**
* \brief Switch dimension order of image input.
* The input and output is a 4D tensor. Switch order
* 'batch_size, height, width, channels' to
* order 'batch_size, channels, height, width'.
*
* Argument in this Function:
* \param inputs input data with order 'batch_size, height, width, channels'.
* \param outputs output data with order 'batch_size, channels, height, width'.
*/
template <DeviceType Device>
class NHWC2NCHWFunc : public FunctionBase {
public:
void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(1UL, inputs.size());
CHECK_EQ(1UL, outputs.size());
size_t num = inputs[0].shape()[0];
size_t inH = inputs[0].shape()[1];
size_t inW = inputs[0].shape()[2];
size_t inC = inputs[0].shape()[3];
NHWC2NCHW<Device>(outputs[0].data<real>(),
inputs[0].data<real>(),
num,
inH,
inW,
inC,
outputs[0].getArgType());
}
};
REGISTER_TYPED_FUNC(NCHW2NHWC, CPU, NCHW2NHWCFunc);
REGISTER_TYPED_FUNC(NHWC2NCHW, CPU, NHWC2NCHWFunc);
#ifndef PADDLE_ONLY_CPU
REGISTER_TYPED_FUNC(NCHW2NHWC, GPU, NCHW2NHWCFunc);
REGISTER_TYPED_FUNC(NHWC2NCHW, GPU, NHWC2NCHWFunc);
#endif
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Function.h"
namespace paddle {
/**
* \brief This funtion switch dimension order of image input.
* The input and output is a 4D tensor. Switch order 'batch_size,
*channels, height, width' to
* order 'batch_size, height, width, channels'.
*
* \param[out] outputs save results.
* \param[in] inputs input data.
* \param[in] num batch size of input data.
* \param[in] inC channel number of input data.
* \param[in] inH height of input data.
* \param[in] inH with of input data.
* \param[in] argType type of output argument.
*/
template <DeviceType Device>
void NCHW2NHWC(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int argtype);
/**
* \brief This funtion switch dimension order of image input.
* The input and output is a 4D tensor. Switch order 'batch_size,
*height, width, channels' to
* order 'batch_size, channels, height, width'.
*
* \param[out] inGrad gradients of previous layer.
* \param[in] outGrad output gradients.
* \param[in] num batch size of input data.
* \param[in] inH height of input data.
* \param[in] inW with of input data.
* \param[in] inC channel number of input data.
* \param[in] argType type of output argument.
*/
template <DeviceType Device>
void NHWC2NCHW(real* inGrad,
const real* outGrad,
const int num,
const int inH,
const int inW,
const int inC,
const int argType);
} // namespace paddle
/* Copyright (c) 2016 Paddle
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "SwitchOp.h"
#include "hl_base.h"
namespace paddle {
__global__ void KeNCHW2NHWC(real* outputs,
const real* inputs,
int inC,
int inH,
int inW,
int nthreads,
int argType) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * inH + h) * inW + w) * inC + c;
if (argType == ADD_TO) {
outputs[off] += inputs[idx];
} else {
outputs[off] = inputs[idx];
}
}
}
template <>
void NCHW2NHWC<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int argType) {
size_t nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
KeNCHW2NHWC<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
outputs, inputs, inC, inH, inW, nth, argType);
CHECK_SYNC("NCHW2NHWC");
}
__global__ void KeNHWC2NCHW(real* outputs,
const real* inputs,
int inH,
int inW,
int inC,
int nthreads,
int argType) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int c = idx % inC;
const int w = (idx / inC) % inW;
const int h = (idx / inC / inW) % inH;
const int n = idx / inW / inH / inC;
const int off = ((n * inC + c) * inH + h) * inW + w;
if (argType == ADD_TO) {
outputs[off] += inputs[idx];
} else {
outputs[off] = inputs[idx];
}
}
}
template <>
void NHWC2NCHW<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inH,
const int inW,
const int inC,
const int argType) {
int nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
KeNHWC2NCHW<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
outputs, inputs, inH, inW, inC, nth, argType);
CHECK_SYNC("NHWC2NCHW");
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "FunctionTest.h"
namespace paddle {
TEST(Pad, real) {
for (size_t numSamples : {1, 4, 8, 16}) {
for (size_t channels : {1, 4, 8, 16}) {
for (size_t imgSizeH : {1, 4, 8, 16}) {
for (size_t imgSizeW : {1, 4, 8, 16}) {
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
for (bool test_grad : {true, false}) {
CpuGpuFuncCompare compare(test_grad ? "NHWC2NCHW" : "NCHW2NHWC",
FuncConfig());
TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW};
TensorShape outDims{numSamples, imgSizeH, imgSizeW, channels};
compare.addInputs(
BufferArg(VALUE_TYPE_FLOAT, test_grad ? outDims : inDims));
compare.addOutputs(BufferArg(
VALUE_TYPE_FLOAT, test_grad ? inDims : outDims, ASSIGN_TO));
compare.run();
}
}
}
}
}
}
} // namespace paddle
...@@ -12,468 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,468 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "neon_util.h" #include "NeonDepthwiseConv.h"
#include "paddle/function/ConvOp.h" #include "paddle/function/ConvOp.h"
#include "paddle/function/Im2Col.h"
namespace paddle { namespace paddle {
namespace neon {
#if defined(__ARM_NEON__) || defined(__ARM_NEON) #if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <int filterSize, int stride>
struct DepthwiseConvKernel {};
inline float32_t conv3x3(float32x4_t r0,
float32x4_t r1,
float32x4_t r2,
float32x4_t k0,
float32x4_t k1,
float32x4_t k2) {
float32x4_t tmp;
tmp = vmulq_f32(r0, k0);
tmp = vmlaq_f32(tmp, r1, k1);
tmp = vmlaq_f32(tmp, r2, k2);
return vaddvq_f32(tmp);
}
inline float32_t conv4x4(float32x4_t r0,
float32x4_t r1,
float32x4_t r2,
float32x4_t r3,
float32x4_t k0,
float32x4_t k1,
float32x4_t k2,
float32x4_t k3) {
float32x4_t tmp;
tmp = vmulq_f32(r0, k0);
tmp = vmlaq_f32(tmp, r1, k1);
tmp = vmlaq_f32(tmp, r2, k2);
tmp = vmlaq_f32(tmp, r3, k3);
return vaddvq_f32(tmp);
}
/**
* Each step calculates four elements of the output.
* First step:
* R0[0, 1, 2, 3...] * K[0][0]
* R0[1, 2, 3, 4...] * K[0][1]
* R0[2, 3, 4, 5...] * K[0][2]
* R1[0, 1, 2, 3...] * K[1][0]
* R1[1, 2, 3, 4...] * K[1][1]
* R1[2, 3, 4, 5...] * K[1][2]
* R2[0, 1, 2, 3...] * K[2][0]
* R2[1, 2, 3, 4...] * K[2][1]
* + R2[2, 3, 4, 5...] * K[2][2]
* ------------------------------
* Output[0, 1, 2, 3]
*/
template <>
struct DepthwiseConvKernel<3, 1> {
static void run(const float* inputData,
const float* filterData,
int inputHeight,
int inputWidth,
int outputChannels,
int outputHeight,
int outputWidth,
int filterMultiplier,
float* outputData) {
const int steps = outputWidth >> 2;
const int remain = outputWidth & 3;
for (int c = 0; c < outputChannels; c++, filterData += 9) {
// Load the filters
float32x4_t k[3];
k[0] = vld1q_f32(filterData);
k[1] = vld1q_f32(filterData + 3);
k[2] = vld1q_f32(filterData + 6);
k[0] = vsetq_lane_f32(0.f, k[0], 3);
k[1] = vsetq_lane_f32(0.f, k[1], 3);
k[2] = vsetq_lane_f32(0.f, k[2], 3);
const float* r0 =
inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
const float* r1 = r0 + inputWidth;
const float* r2 = r0 + inputWidth * 2;
float32x4_t input[3][3];
for (int h = 0; h < outputHeight; h++) {
for (int s = 0; s < steps; s++) {
// Load the inputs
float32x4_t tmp;
input[0][0] = vld1q_f32(r0);
tmp = vld1q_f32(r0 + 4);
input[0][1] = vextq_f32(input[0][0], tmp, 1);
input[0][2] = vextq_f32(input[0][0], tmp, 2);
input[1][0] = vld1q_f32(r1);
tmp = vld1q_f32(r1 + 4);
input[1][1] = vextq_f32(input[1][0], tmp, 1);
input[1][2] = vextq_f32(input[1][0], tmp, 2);
input[2][0] = vld1q_f32(r2);
tmp = vld1q_f32(r2 + 4);
input[2][1] = vextq_f32(input[2][0], tmp, 1);
input[2][2] = vextq_f32(input[2][0], tmp, 2);
float32x4_t tmp1 = vdupq_n_f32(0.f);
float32x4_t tmp2 = vdupq_n_f32(0.f);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
tmp1 = vaddq_f32(tmp1, tmp2);
vst1q_f32(outputData, tmp1);
r0 += 4;
r1 += 4;
r2 += 4;
outputData += 4;
}
for (int r = 0; r < remain; r++) {
float32x4_t i0 = vld1q_f32(r0);
float32x4_t i1 = vld1q_f32(r1);
float32x4_t i2 = vld1q_f32(r2);
*outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]);
r0++;
r1++;
r2++;
outputData++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
};
/**
* Each step calculates four elements of the output.
* First step:
* R0[0, 2, 4, 6...] * K[0][0]
* R0[1, 3, 5, 7...] * K[0][1]
* R0[2, 4, 6, 8...] * K[0][2]
* R1[0, 2, 4, 6...] * K[1][0]
* R1[1, 3, 5, 7...] * K[1][1]
* R1[2, 4, 6, 8...] * K[1][2]
* R2[0, 2, 4, 6...] * K[2][0]
* R2[1, 3, 5, 7...] * K[2][1]
* R2[2, 4, 6, 8...] * K[2][2]
* ------------------------------
* Output[0, 1, 2, 3]
*/
template <>
struct DepthwiseConvKernel<3, 2> {
static void run(const float* inputData,
const float* filterData,
int inputHeight,
int inputWidth,
int outputChannels,
int outputHeight,
int outputWidth,
int filterMultiplier,
float* outputData) {
const int steps = outputWidth >> 2;
const int remain = outputWidth & 3;
for (int c = 0; c < outputChannels; c++, filterData += 9) {
// Load the filters
float32x4_t k[3];
k[0] = vld1q_f32(filterData);
k[1] = vld1q_f32(filterData + 3);
k[2] = vld1q_f32(filterData + 6);
k[0] = vsetq_lane_f32(0.f, k[0], 3);
k[1] = vsetq_lane_f32(0.f, k[1], 3);
k[2] = vsetq_lane_f32(0.f, k[2], 3);
const float* start =
inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
float32x4_t input[3][3];
for (int h = 0; h < outputHeight; h++) {
const float* r0 = start + 2 * h * inputWidth;
const float* r1 = start + (2 * h + 1) * inputWidth;
const float* r2 = start + (2 * h + 2) * inputWidth;
for (int s = 0; s < steps; s++) {
// Load the inputs
float32x4_t data1;
float32x4x2_t data2;
data2 = vld2q_f32(r0);
input[0][0] = data2.val[0];
input[0][1] = data2.val[1];
data1 = vld1q_f32(r0 + 8);
input[0][2] = vextq_f32(data2.val[0], data1, 1);
data2 = vld2q_f32(r1);
input[1][0] = data2.val[0];
input[1][1] = data2.val[1];
data1 = vld1q_f32(r1 + 8);
input[1][2] = vextq_f32(data2.val[0], data1, 1);
data2 = vld2q_f32(r2);
input[2][0] = data2.val[0];
input[2][1] = data2.val[1];
data1 = vld1q_f32(r2 + 8);
input[2][2] = vextq_f32(data2.val[0], data1, 1);
float32x4_t tmp1 = vdupq_n_f32(0.f);
float32x4_t tmp2 = vdupq_n_f32(0.f);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
tmp1 = vaddq_f32(tmp1, tmp2);
vst1q_f32(outputData, tmp1);
r0 += 8;
r1 += 8;
r2 += 8;
outputData += 4;
}
for (int r = 0; r < remain; r++) {
float32x4_t i0 = vld1q_f32(r0);
float32x4_t i1 = vld1q_f32(r1);
float32x4_t i2 = vld1q_f32(r2);
*outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]);
r0 += 2;
r1 += 2;
r2 += 2;
outputData++;
}
}
}
}
};
/**
* Each step calculates four elements of the output.
*/
template <>
struct DepthwiseConvKernel<4, 1> {
static void run(const float* inputData,
const float* filterData,
int inputHeight,
int inputWidth,
int outputChannels,
int outputHeight,
int outputWidth,
int filterMultiplier,
float* outputData) {
const int steps = outputWidth >> 2;
const int remain = outputWidth & 3;
for (int c = 0; c < outputChannels; c++, filterData += 16) {
// Load the filters
float32x4_t k[4];
k[0] = vld1q_f32(filterData);
k[1] = vld1q_f32(filterData + 4);
k[2] = vld1q_f32(filterData + 8);
k[3] = vld1q_f32(filterData + 12);
const float* r0 =
inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
const float* r1 = r0 + inputWidth;
const float* r2 = r0 + inputWidth * 2;
const float* r3 = r0 + inputWidth * 3;
float32x4_t input[4][4];
for (int h = 0; h < outputHeight; h++) {
for (int s = 0; s < steps; s++) {
// Load the inputs
float32x4_t tmp;
input[0][0] = vld1q_f32(r0);
tmp = vld1q_f32(r0 + 4);
input[0][1] = vextq_f32(input[0][0], tmp, 1);
input[0][2] = vextq_f32(input[0][0], tmp, 2);
input[0][3] = vextq_f32(input[0][0], tmp, 3);
input[1][0] = vld1q_f32(r1);
tmp = vld1q_f32(r1 + 4);
input[1][1] = vextq_f32(input[1][0], tmp, 1);
input[1][2] = vextq_f32(input[1][0], tmp, 2);
input[1][3] = vextq_f32(input[1][0], tmp, 3);
input[2][0] = vld1q_f32(r2);
tmp = vld1q_f32(r2 + 4);
input[2][1] = vextq_f32(input[2][0], tmp, 1);
input[2][2] = vextq_f32(input[2][0], tmp, 2);
input[2][3] = vextq_f32(input[2][0], tmp, 3);
input[3][0] = vld1q_f32(r3);
tmp = vld1q_f32(r3 + 4);
input[3][1] = vextq_f32(input[3][0], tmp, 1);
input[3][2] = vextq_f32(input[3][0], tmp, 2);
input[3][3] = vextq_f32(input[3][0], tmp, 3);
float32x4_t tmp1 = vdupq_n_f32(0.f);
float32x4_t tmp2 = vdupq_n_f32(0.f);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3);
tmp1 = vaddq_f32(tmp1, tmp2);
vst1q_f32(outputData, tmp1);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
outputData += 4;
}
for (int r = 0; r < remain; r++) {
float32x4_t i0 = vld1q_f32(r0);
float32x4_t i1 = vld1q_f32(r1);
float32x4_t i2 = vld1q_f32(r2);
float32x4_t i3 = vld1q_f32(r3);
*outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]);
r0++;
r1++;
r2++;
r3++;
outputData++;
}
r0 += 3;
r1 += 3;
r2 += 3;
r3 += 3;
}
}
}
};
/**
* Each step calculates four elements of the output.
*/
template <>
struct DepthwiseConvKernel<4, 2> {
static void run(const float* inputData,
const float* filterData,
int inputHeight,
int inputWidth,
int outputChannels,
int outputHeight,
int outputWidth,
int filterMultiplier,
float* outputData) {
const int steps = outputWidth >> 2;
const int remain = outputWidth & 3;
for (int c = 0; c < outputChannels; c++, filterData += 16) {
// Load the filters
float32x4_t k[4];
k[0] = vld1q_f32(filterData);
k[1] = vld1q_f32(filterData + 4);
k[2] = vld1q_f32(filterData + 8);
k[3] = vld1q_f32(filterData + 12);
const float* start =
inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
float32x4_t input[4][4];
for (int h = 0; h < outputHeight; h++) {
const float* r0 = start + 2 * h * inputWidth;
const float* r1 = start + (2 * h + 1) * inputWidth;
const float* r2 = start + (2 * h + 2) * inputWidth;
const float* r3 = start + (2 * h + 3) * inputWidth;
for (int s = 0; s < steps; s++) {
// Load the inputs
float32x4x2_t data1;
float32x4x2_t data2;
data1 = vld2q_f32(r0);
data2 = vld2q_f32(r0 + 8);
input[0][0] = data1.val[0];
input[0][1] = data1.val[1];
input[0][2] = vextq_f32(data1.val[0], data2.val[0], 1);
input[0][3] = vextq_f32(data1.val[1], data2.val[1], 1);
data1 = vld2q_f32(r1);
data2 = vld2q_f32(r1 + 8);
input[1][0] = data1.val[0];
input[1][1] = data1.val[1];
input[1][2] = vextq_f32(data1.val[0], data2.val[0], 1);
input[1][3] = vextq_f32(data1.val[1], data2.val[1], 1);
data1 = vld2q_f32(r2);
data2 = vld2q_f32(r2 + 8);
input[2][0] = data1.val[0];
input[2][1] = data1.val[1];
input[2][2] = vextq_f32(data1.val[0], data2.val[0], 1);
input[2][3] = vextq_f32(data1.val[1], data2.val[1], 1);
data1 = vld2q_f32(r3);
data2 = vld2q_f32(r3 + 8);
input[3][0] = data1.val[0];
input[3][1] = data1.val[1];
input[3][2] = vextq_f32(data1.val[0], data2.val[0], 1);
input[3][3] = vextq_f32(data1.val[1], data2.val[1], 1);
float32x4_t tmp1 = vdupq_n_f32(0.f);
float32x4_t tmp2 = vdupq_n_f32(0.f);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3);
tmp1 = vaddq_f32(tmp1, tmp2);
vst1q_f32(outputData, tmp1);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outputData += 4;
}
for (int r = 0; r < remain; r++) {
float32x4_t i0 = vld1q_f32(r0);
float32x4_t i1 = vld1q_f32(r1);
float32x4_t i2 = vld1q_f32(r2);
float32x4_t i3 = vld1q_f32(r3);
*outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]);
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
outputData++;
}
}
}
}
};
template <DeviceType Device> template <DeviceType Device>
class NeonDepthwiseConvFunction : public ConvFunctionBase { class NeonDepthwiseConvFunction : public ConvFunctionBase {
public: public:
...@@ -497,16 +42,16 @@ public: ...@@ -497,16 +42,16 @@ public:
const TensorShape& filter = inputs[1].shape(); const TensorShape& filter = inputs[1].shape();
const TensorShape& output = outputs[0].shape(); const TensorShape& output = outputs[0].shape();
size_t batchSize = input[0]; int batchSize = input[0];
size_t inputChannels = input[1]; int inputChannels = input[1];
size_t inputHeight = input[2]; int inputHeight = input[2];
size_t inputWidth = input[3]; int inputWidth = input[3];
size_t filterHeight = getFilterHeight(filter); int filterHeight = getFilterHeight(filter);
size_t filterWidth = getFilterWidth(filter); int filterWidth = getFilterWidth(filter);
size_t outputChannels = output[1]; int outputChannels = output[1];
size_t outputHeight = output[2]; int outputHeight = output[2];
size_t outputWidth = output[3]; int outputWidth = output[3];
size_t filterMultiplier = outputChannels / groups_; int filterMultiplier = outputChannels / groups_;
CHECK_EQ(inputChannels, groups_); CHECK_EQ(inputChannels, groups_);
// only support strideH() == strideW() and filterHeight == filterWidth. // only support strideH() == strideW() and filterHeight == filterWidth.
...@@ -519,22 +64,19 @@ public: ...@@ -519,22 +64,19 @@ public:
// padding the input // padding the input
float* inputPadding = inputData; float* inputPadding = inputData;
int padInputHeight = inputHeight + 2 * paddingH();
int padInputWidth = inputWidth + 2 * paddingW();
if (paddingH() > 0 || paddingW() > 0) { if (paddingH() > 0 || paddingW() > 0) {
int newSize = batchSize * inputChannels * (inputHeight + 2 * paddingH()) * int newSize = batchSize * inputChannels * padInputHeight * padInputWidth;
(inputWidth + 2 * paddingW());
resizeBuffer<Device>(newSize); resizeBuffer<Device>(newSize);
inputPadding = reinterpret_cast<float*>(memory_->getBuf()); inputPadding = reinterpret_cast<float*>(memory_->getBuf());
Padding<float>::run(inputData, neon::Padding<float>::run(inputData,
inputPadding, inputPadding,
batchSize * inputChannels, batchSize * inputChannels,
inputHeight, inputHeight,
inputWidth, inputWidth,
paddingH(), padInputHeight,
paddingW()); padInputWidth);
// height and width of padding data
inputHeight += 2 * paddingH();
inputWidth += 2 * paddingW();
} }
std::function<void( std::function<void(
...@@ -542,36 +84,37 @@ public: ...@@ -542,36 +84,37 @@ public:
DepthWiseConv; DepthWiseConv;
if (filterWidth == 3 && strideW() == 1) { if (filterWidth == 3 && strideW() == 1) {
DepthWiseConv = DepthwiseConvKernel<3, 1>::run; DepthWiseConv = neon::DepthwiseConvKernel<3, 1>::run;
} else if (filterWidth == 3 && strideW() == 2) { } else if (filterWidth == 3 && strideW() == 2) {
DepthWiseConv = DepthwiseConvKernel<3, 2>::run; DepthWiseConv = neon::DepthwiseConvKernel<3, 2>::run;
} else if (filterWidth == 4 && strideW() == 1) { } else if (filterWidth == 4 && strideW() == 1) {
DepthWiseConv = DepthwiseConvKernel<4, 1>::run; DepthWiseConv = neon::DepthwiseConvKernel<4, 1>::run;
} else if (filterWidth == 4 && strideW() == 2) { } else if (filterWidth == 4 && strideW() == 2) {
DepthWiseConv = DepthwiseConvKernel<4, 2>::run; DepthWiseConv = neon::DepthwiseConvKernel<4, 2>::run;
} else { } else {
LOG(FATAL) << "Not supported"; LOG(FATAL) << "Not supported";
} }
for (size_t i = 0; i < batchSize; i++) { for (int i = 0; i < batchSize; i++) {
DepthWiseConv(inputPadding, DepthWiseConv(inputPadding,
filterData, filterData,
inputHeight, padInputHeight,
inputWidth, padInputWidth,
outputChannels, outputChannels,
outputHeight, outputHeight,
outputWidth, outputWidth,
filterMultiplier, filterMultiplier,
outputData); outputData);
inputPadding += inputChannels * inputHeight * inputWidth; inputPadding += inputChannels * padInputHeight * padInputWidth;
outputData += outputChannels * outputHeight * outputWidth; outputData += outputChannels * outputHeight * outputWidth;
} }
} }
}; };
#ifndef PADDLE_TYPE_DOUBLE
REGISTER_TYPED_FUNC(NeonDepthwiseConv, CPU, NeonDepthwiseConvFunction); REGISTER_TYPED_FUNC(NeonDepthwiseConv, CPU, NeonDepthwiseConvFunction);
#endif
#endif #endif
} // namespace neon
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string.h>
#include "neon_util.h"
namespace paddle {
namespace neon {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <int filterSize, int stride>
struct DepthwiseConvKernel {};
inline float32_t conv3x3(float32x4_t r0,
float32x4_t r1,
float32x4_t r2,
float32x4_t k0,
float32x4_t k1,
float32x4_t k2) {
float32x4_t tmp;
tmp = vmulq_f32(r0, k0);
tmp = vmlaq_f32(tmp, r1, k1);
tmp = vmlaq_f32(tmp, r2, k2);
return vaddvq_f32(tmp);
}
inline float32_t conv4x4(float32x4_t r0,
float32x4_t r1,
float32x4_t r2,
float32x4_t r3,
float32x4_t k0,
float32x4_t k1,
float32x4_t k2,
float32x4_t k3) {
float32x4_t tmp;
tmp = vmulq_f32(r0, k0);
tmp = vmlaq_f32(tmp, r1, k1);
tmp = vmlaq_f32(tmp, r2, k2);
tmp = vmlaq_f32(tmp, r3, k3);
return vaddvq_f32(tmp);
}
/**
* Each step calculates four elements of the output.
* First step:
* R0[0, 1, 2, 3...] * K[0][0]
* R0[1, 2, 3, 4...] * K[0][1]
* R0[2, 3, 4, 5...] * K[0][2]
* R1[0, 1, 2, 3...] * K[1][0]
* R1[1, 2, 3, 4...] * K[1][1]
* R1[2, 3, 4, 5...] * K[1][2]
* R2[0, 1, 2, 3...] * K[2][0]
* R2[1, 2, 3, 4...] * K[2][1]
* + R2[2, 3, 4, 5...] * K[2][2]
* ------------------------------
* Output[0, 1, 2, 3]
*/
template <>
struct DepthwiseConvKernel<3, 1> {
static void run(const float* inputData,
const float* filterData,
int inputHeight,
int inputWidth,
int outputChannels,
int outputHeight,
int outputWidth,
int filterMultiplier,
float* outputData) {
const int steps = outputWidth >> 2;
const int remain = outputWidth & 3;
for (int c = 0; c < outputChannels; c++, filterData += 9) {
// Load the filters
float32x4_t k[3];
k[0] = vld1q_f32(filterData);
k[1] = vld1q_f32(filterData + 3);
k[2] = vld1q_f32(filterData + 6);
k[0] = vsetq_lane_f32(0.f, k[0], 3);
k[1] = vsetq_lane_f32(0.f, k[1], 3);
k[2] = vsetq_lane_f32(0.f, k[2], 3);
const float* r0 =
inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
const float* r1 = r0 + inputWidth;
const float* r2 = r0 + inputWidth * 2;
float32x4_t input[3][3];
for (int h = 0; h < outputHeight; h++) {
for (int s = 0; s < steps; s++) {
// Load the inputs
float32x4_t tmp;
input[0][0] = vld1q_f32(r0);
tmp = vld1q_f32(r0 + 4);
input[0][1] = vextq_f32(input[0][0], tmp, 1);
input[0][2] = vextq_f32(input[0][0], tmp, 2);
input[1][0] = vld1q_f32(r1);
tmp = vld1q_f32(r1 + 4);
input[1][1] = vextq_f32(input[1][0], tmp, 1);
input[1][2] = vextq_f32(input[1][0], tmp, 2);
input[2][0] = vld1q_f32(r2);
tmp = vld1q_f32(r2 + 4);
input[2][1] = vextq_f32(input[2][0], tmp, 1);
input[2][2] = vextq_f32(input[2][0], tmp, 2);
float32x4_t tmp1 = vdupq_n_f32(0.f);
float32x4_t tmp2 = vdupq_n_f32(0.f);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
tmp1 = vaddq_f32(tmp1, tmp2);
vst1q_f32(outputData, tmp1);
r0 += 4;
r1 += 4;
r2 += 4;
outputData += 4;
}
for (int r = 0; r < remain; r++) {
float32x4_t i0 = vld1q_f32(r0);
float32x4_t i1 = vld1q_f32(r1);
float32x4_t i2 = vld1q_f32(r2);
*outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]);
r0++;
r1++;
r2++;
outputData++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
};
/**
* Each step calculates four elements of the output.
* First step:
* R0[0, 2, 4, 6...] * K[0][0]
* R0[1, 3, 5, 7...] * K[0][1]
* R0[2, 4, 6, 8...] * K[0][2]
* R1[0, 2, 4, 6...] * K[1][0]
* R1[1, 3, 5, 7...] * K[1][1]
* R1[2, 4, 6, 8...] * K[1][2]
* R2[0, 2, 4, 6...] * K[2][0]
* R2[1, 3, 5, 7...] * K[2][1]
* R2[2, 4, 6, 8...] * K[2][2]
* ------------------------------
* Output[0, 1, 2, 3]
*/
template <>
struct DepthwiseConvKernel<3, 2> {
static void run(const float* inputData,
const float* filterData,
int inputHeight,
int inputWidth,
int outputChannels,
int outputHeight,
int outputWidth,
int filterMultiplier,
float* outputData) {
const int steps = outputWidth >> 2;
const int remain = outputWidth & 3;
for (int c = 0; c < outputChannels; c++, filterData += 9) {
// Load the filters
float32x4_t k[3];
k[0] = vld1q_f32(filterData);
k[1] = vld1q_f32(filterData + 3);
k[2] = vld1q_f32(filterData + 6);
k[0] = vsetq_lane_f32(0.f, k[0], 3);
k[1] = vsetq_lane_f32(0.f, k[1], 3);
k[2] = vsetq_lane_f32(0.f, k[2], 3);
const float* start =
inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
float32x4_t input[3][3];
for (int h = 0; h < outputHeight; h++) {
const float* r0 = start + 2 * h * inputWidth;
const float* r1 = start + (2 * h + 1) * inputWidth;
const float* r2 = start + (2 * h + 2) * inputWidth;
for (int s = 0; s < steps; s++) {
// Load the inputs
float32x4_t data1;
float32x4x2_t data2;
data2 = vld2q_f32(r0);
input[0][0] = data2.val[0];
input[0][1] = data2.val[1];
data1 = vld1q_f32(r0 + 8);
input[0][2] = vextq_f32(data2.val[0], data1, 1);
data2 = vld2q_f32(r1);
input[1][0] = data2.val[0];
input[1][1] = data2.val[1];
data1 = vld1q_f32(r1 + 8);
input[1][2] = vextq_f32(data2.val[0], data1, 1);
data2 = vld2q_f32(r2);
input[2][0] = data2.val[0];
input[2][1] = data2.val[1];
data1 = vld1q_f32(r2 + 8);
input[2][2] = vextq_f32(data2.val[0], data1, 1);
float32x4_t tmp1 = vdupq_n_f32(0.f);
float32x4_t tmp2 = vdupq_n_f32(0.f);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
tmp1 = vaddq_f32(tmp1, tmp2);
vst1q_f32(outputData, tmp1);
r0 += 8;
r1 += 8;
r2 += 8;
outputData += 4;
}
for (int r = 0; r < remain; r++) {
float32x4_t i0 = vld1q_f32(r0);
float32x4_t i1 = vld1q_f32(r1);
float32x4_t i2 = vld1q_f32(r2);
*outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]);
r0 += 2;
r1 += 2;
r2 += 2;
outputData++;
}
}
}
}
};
/**
* Each step calculates four elements of the output.
*/
template <>
struct DepthwiseConvKernel<4, 1> {
static void run(const float* inputData,
const float* filterData,
int inputHeight,
int inputWidth,
int outputChannels,
int outputHeight,
int outputWidth,
int filterMultiplier,
float* outputData) {
const int steps = outputWidth >> 2;
const int remain = outputWidth & 3;
for (int c = 0; c < outputChannels; c++, filterData += 16) {
// Load the filters
float32x4_t k[4];
k[0] = vld1q_f32(filterData);
k[1] = vld1q_f32(filterData + 4);
k[2] = vld1q_f32(filterData + 8);
k[3] = vld1q_f32(filterData + 12);
const float* r0 =
inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
const float* r1 = r0 + inputWidth;
const float* r2 = r0 + inputWidth * 2;
const float* r3 = r0 + inputWidth * 3;
float32x4_t input[4][4];
for (int h = 0; h < outputHeight; h++) {
for (int s = 0; s < steps; s++) {
// Load the inputs
float32x4_t tmp;
input[0][0] = vld1q_f32(r0);
tmp = vld1q_f32(r0 + 4);
input[0][1] = vextq_f32(input[0][0], tmp, 1);
input[0][2] = vextq_f32(input[0][0], tmp, 2);
input[0][3] = vextq_f32(input[0][0], tmp, 3);
input[1][0] = vld1q_f32(r1);
tmp = vld1q_f32(r1 + 4);
input[1][1] = vextq_f32(input[1][0], tmp, 1);
input[1][2] = vextq_f32(input[1][0], tmp, 2);
input[1][3] = vextq_f32(input[1][0], tmp, 3);
input[2][0] = vld1q_f32(r2);
tmp = vld1q_f32(r2 + 4);
input[2][1] = vextq_f32(input[2][0], tmp, 1);
input[2][2] = vextq_f32(input[2][0], tmp, 2);
input[2][3] = vextq_f32(input[2][0], tmp, 3);
input[3][0] = vld1q_f32(r3);
tmp = vld1q_f32(r3 + 4);
input[3][1] = vextq_f32(input[3][0], tmp, 1);
input[3][2] = vextq_f32(input[3][0], tmp, 2);
input[3][3] = vextq_f32(input[3][0], tmp, 3);
float32x4_t tmp1 = vdupq_n_f32(0.f);
float32x4_t tmp2 = vdupq_n_f32(0.f);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3);
tmp1 = vaddq_f32(tmp1, tmp2);
vst1q_f32(outputData, tmp1);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
outputData += 4;
}
for (int r = 0; r < remain; r++) {
float32x4_t i0 = vld1q_f32(r0);
float32x4_t i1 = vld1q_f32(r1);
float32x4_t i2 = vld1q_f32(r2);
float32x4_t i3 = vld1q_f32(r3);
*outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]);
r0++;
r1++;
r2++;
r3++;
outputData++;
}
r0 += 3;
r1 += 3;
r2 += 3;
r3 += 3;
}
}
}
};
/**
* Each step calculates four elements of the output.
*/
template <>
struct DepthwiseConvKernel<4, 2> {
static void run(const float* inputData,
const float* filterData,
int inputHeight,
int inputWidth,
int outputChannels,
int outputHeight,
int outputWidth,
int filterMultiplier,
float* outputData) {
const int steps = outputWidth >> 2;
const int remain = outputWidth & 3;
for (int c = 0; c < outputChannels; c++, filterData += 16) {
// Load the filters
float32x4_t k[4];
k[0] = vld1q_f32(filterData);
k[1] = vld1q_f32(filterData + 4);
k[2] = vld1q_f32(filterData + 8);
k[3] = vld1q_f32(filterData + 12);
const float* start =
inputData + (c / filterMultiplier) * (inputHeight * inputWidth);
float32x4_t input[4][4];
for (int h = 0; h < outputHeight; h++) {
const float* r0 = start + 2 * h * inputWidth;
const float* r1 = start + (2 * h + 1) * inputWidth;
const float* r2 = start + (2 * h + 2) * inputWidth;
const float* r3 = start + (2 * h + 3) * inputWidth;
for (int s = 0; s < steps; s++) {
// Load the inputs
float32x4x2_t data1;
float32x4x2_t data2;
data1 = vld2q_f32(r0);
data2 = vld2q_f32(r0 + 8);
input[0][0] = data1.val[0];
input[0][1] = data1.val[1];
input[0][2] = vextq_f32(data1.val[0], data2.val[0], 1);
input[0][3] = vextq_f32(data1.val[1], data2.val[1], 1);
data1 = vld2q_f32(r1);
data2 = vld2q_f32(r1 + 8);
input[1][0] = data1.val[0];
input[1][1] = data1.val[1];
input[1][2] = vextq_f32(data1.val[0], data2.val[0], 1);
input[1][3] = vextq_f32(data1.val[1], data2.val[1], 1);
data1 = vld2q_f32(r2);
data2 = vld2q_f32(r2 + 8);
input[2][0] = data1.val[0];
input[2][1] = data1.val[1];
input[2][2] = vextq_f32(data1.val[0], data2.val[0], 1);
input[2][3] = vextq_f32(data1.val[1], data2.val[1], 1);
data1 = vld2q_f32(r3);
data2 = vld2q_f32(r3 + 8);
input[3][0] = data1.val[0];
input[3][1] = data1.val[1];
input[3][2] = vextq_f32(data1.val[0], data2.val[0], 1);
input[3][3] = vextq_f32(data1.val[1], data2.val[1], 1);
float32x4_t tmp1 = vdupq_n_f32(0.f);
float32x4_t tmp2 = vdupq_n_f32(0.f);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3);
tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0);
tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1);
tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2);
tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3);
tmp1 = vaddq_f32(tmp1, tmp2);
vst1q_f32(outputData, tmp1);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outputData += 4;
}
for (int r = 0; r < remain; r++) {
float32x4_t i0 = vld1q_f32(r0);
float32x4_t i1 = vld1q_f32(r1);
float32x4_t i2 = vld1q_f32(r2);
float32x4_t i3 = vld1q_f32(r3);
*outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]);
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
outputData++;
}
}
}
}
};
template <class T>
struct Padding {
static void run(const T* input,
T* inputPadding,
int channels,
int inputHeight,
int inputWidth,
int padInputHeight,
int padInputWidth) {
const int paddingHeight = (padInputHeight - inputHeight) / 2;
const int paddingWidth = (padInputWidth - inputWidth) / 2;
for (int c = 0; c < channels; c++) {
if (paddingHeight > 0) {
memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(T));
inputPadding += padInputWidth * paddingHeight;
}
for (int i = 0; i < inputHeight; i++) {
// padding head
for (int j = 0; j < paddingWidth; j++) {
*inputPadding++ = T(0);
}
memcpy(inputPadding, input, inputWidth * sizeof(T));
inputPadding += inputWidth;
input += inputWidth;
// padding tail
for (int j = 0; j < paddingWidth; j++) {
*inputPadding++ = T(0);
}
}
if (paddingHeight > 0) {
memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(T));
inputPadding += padInputWidth * paddingHeight;
}
}
}
};
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <>
struct Padding<float> {
static void run(const float* input,
float* inputPadding,
int channels,
int inputHeight,
int inputWidth,
int padInputHeight,
int padInputWidth) {
const int paddingHeight = (padInputHeight - inputHeight) / 2;
const int paddingWidth = (padInputWidth - inputWidth) / 2;
for (int c = 0; c < channels; c++) {
if (paddingHeight > 0) {
memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float));
inputPadding += padInputWidth * paddingHeight;
}
for (int i = 0; i < inputHeight; i++) {
// padding head
for (int j = 0; j < paddingWidth; j++) {
*inputPadding++ = float(0);
}
int step = inputWidth >> 2;
int remain = inputWidth & 3;
for (int s = 0; s < step; s++) {
float32x4_t s0 = vld1q_f32(input);
vst1q_f32(inputPadding, s0);
input += 4;
inputPadding += 4;
}
for (int r = 0; r < remain; r++) {
*inputPadding++ = *input++;
}
// padding tail
for (int j = 0; j < paddingWidth; j++) {
*inputPadding++ = float(0);
}
}
if (paddingHeight > 0) {
memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float));
inputPadding += padInputWidth * paddingHeight;
}
}
}
};
// for stride is 2
struct StridePadding {
static void run(const float* input,
float* inputPadding,
int channels,
int inputHeight,
int inputWidth,
int padInputHeight,
int padInputWidth) {
const int paddingHeight = (padInputHeight - (inputHeight * 2 - 1)) / 2;
const int paddingWidth = (padInputWidth - (inputWidth * 2 - 1)) / 2;
for (int c = 0; c < channels; c++) {
if (paddingHeight > 0) {
memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float));
inputPadding += padInputWidth * paddingHeight;
}
for (int i = 0; i < inputHeight; i++) {
// padding head
for (int j = 0; j < paddingWidth; j++) {
*inputPadding++ = float(0);
}
int step = inputWidth >> 2;
int remain = inputWidth & 3;
float32x4_t s1 = vdupq_n_f32(0.f);
for (int s = 0; s < step; s++) {
float32x4_t s0 = vld1q_f32(input);
float32x4x2_t v = {s0, s1};
vst2q_f32(inputPadding, v);
input += 4;
inputPadding += 8;
}
for (int r = 0; r < remain; r++) {
*inputPadding++ = *input++;
*inputPadding++ = float(0);
}
inputPadding--;
// padding tail
for (int j = 0; j < paddingWidth; j++) {
*inputPadding++ = float(0);
}
if (i != inputHeight - 1) {
memset(inputPadding, 0, padInputWidth * sizeof(float));
inputPadding += padInputWidth;
}
}
if (paddingHeight > 0) {
memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float));
inputPadding += padInputWidth * paddingHeight;
}
}
}
};
#endif
#endif
} // namespace neon
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "NeonDepthwiseConv.h"
#include "paddle/function/ConvOp.h"
namespace paddle {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <DeviceType Device>
class NeonDepthwiseConvTransposeFunction : public ConvFunctionBase {
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
void check(const BufferArgs& inputs, const BufferArgs& outputs) override {
const TensorShape& input = inputs[0].shape();
const TensorShape& filter = inputs[1].shape();
const TensorShape& output = outputs[0].shape();
checkShape(input, filter, output);
}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(numInputs_, inputs.size());
CHECK_EQ(numOutputs_, outputs.size());
check(inputs, outputs);
const TensorShape& input = inputs[0].shape();
const TensorShape& filter = inputs[1].shape();
const TensorShape& output = outputs[0].shape();
int batchSize = input[0];
int inputChannels = input[1];
int inputHeight = input[2];
int inputWidth = input[3];
int filterHeight = getFilterHeight(filter);
int filterWidth = getFilterWidth(filter);
int outputChannels = output[1];
int outputHeight = output[2];
int outputWidth = output[3];
int filterMultiplier = outputChannels / groups_;
CHECK_EQ(inputChannels, groups_);
// only support strideH() == strideW() and filterHeight == filterWidth.
CHECK_EQ(strideH(), strideW());
CHECK_EQ(paddingH(), paddingW());
CHECK_EQ(filterHeight, filterWidth);
float* inputData = inputs[0].data<float>();
float* filterData = inputs[1].data<float>();
float* outputData = outputs[0].data<float>();
// padding the input, input -> inputPadding
float* inputPadding = inputData;
int padInputHeight =
(inputHeight - 1) * strideH() + 2 * filterHeight - 1 - 2 * paddingH();
int padInputWidth =
(inputWidth - 1) * strideW() + 2 * filterWidth - 1 - 2 * paddingW();
if (padInputHeight > inputHeight || padInputWidth > inputWidth) {
int newSize = batchSize * inputChannels * padInputHeight * padInputWidth;
resizeBuffer<Device>(newSize);
inputPadding = reinterpret_cast<float*>(memory_->getBuf());
if (strideH() == 1) {
neon::Padding<float>::run(inputData,
inputPadding,
batchSize * inputChannels,
inputHeight,
inputWidth,
padInputHeight,
padInputWidth);
} else if (strideH() == 2) {
neon::StridePadding::run(inputData,
inputPadding,
batchSize * inputChannels,
inputHeight,
inputWidth,
padInputHeight,
padInputWidth);
} else {
LOG(FATAL) << "Not supported";
}
}
std::function<void(
const float*, const float*, int, int, int, int, int, int, float*)>
DepthWiseConv;
if (filterWidth == 3) {
DepthWiseConv = neon::DepthwiseConvKernel<3, 1>::run;
} else if (filterWidth == 4) {
DepthWiseConv = neon::DepthwiseConvKernel<4, 1>::run;
} else {
LOG(FATAL) << "Not supported";
}
for (int i = 0; i < batchSize; i++) {
DepthWiseConv(inputPadding,
filterData,
padInputHeight,
padInputWidth,
outputChannels,
outputHeight,
outputWidth,
filterMultiplier,
outputData);
inputPadding += inputChannels * padInputHeight * padInputWidth;
outputData += outputChannels * outputHeight * outputWidth;
}
}
};
#ifndef PADDLE_TYPE_DOUBLE
REGISTER_TYPED_FUNC(NeonDepthwiseConvTranspose,
CPU,
NeonDepthwiseConvTransposeFunction);
#endif
#endif
} // namespace paddle
...@@ -33,12 +33,8 @@ inline float32_t vaddvq_f32(float32x4_t a) { ...@@ -33,12 +33,8 @@ inline float32_t vaddvq_f32(float32x4_t a) {
return vget_lane_f32(vpadd_f32(v, v), 0); return vget_lane_f32(vpadd_f32(v, v), 0);
} }
inline float32x4_t vmlaq_laneq_f32(float32x4_t a, #define vmlaq_laneq_f32(a, b, v, lane) \
float32x4_t b, vmlaq_n_f32(a, b, vgetq_lane_f32(v, lane))
float32x4_t v,
const int lane) {
return vmlaq_n_f32(a, b, vgetq_lane_f32(v, lane));
}
#endif #endif
} // namespace neon } // namespace neon
......
...@@ -62,14 +62,18 @@ void BatchNormBaseLayer::calFeatureMapSize() { ...@@ -62,14 +62,18 @@ void BatchNormBaseLayer::calFeatureMapSize() {
const ImageConfig& conf = config_.inputs(0).image_conf(); const ImageConfig& conf = config_.inputs(0).image_conf();
imageH_ = inputLayers_[0]->getOutput().getFrameHeight(); imageH_ = inputLayers_[0]->getOutput().getFrameHeight();
imageW_ = inputLayers_[0]->getOutput().getFrameWidth(); imageW_ = inputLayers_[0]->getOutput().getFrameWidth();
imageD_ = inputLayers_[0]->getOutput().getFrameDepth();
if (0 == imageD_) imageD_ = conf.img_size_z();
if (imageH_ == 0 && imageW_ == 0) { if (imageH_ == 0 && imageW_ == 0) {
imageH_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); imageH_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size();
imageW_ = conf.img_size(); imageW_ = conf.img_size();
} else { } else {
getOutput().setFrameHeight(imageH_); getOutput().setFrameHeight(imageH_);
getOutput().setFrameWidth(imageW_); getOutput().setFrameWidth(imageW_);
getOutput().setFrameDepth(imageD_);
} }
imgPixels_ = imageH_ * imageW_; imgPixels_ = imageH_ * imageW_ * imageD_;
} }
} // namespace paddle } // namespace paddle
...@@ -80,6 +80,7 @@ protected: ...@@ -80,6 +80,7 @@ protected:
/// Height or width of input image feature. /// Height or width of input image feature.
/// Both of them are 1 if the input is fully-connected layer. /// Both of them are 1 if the input is fully-connected layer.
int imageD_;
int imageH_; int imageH_;
int imageW_; int imageW_;
/// Height * Width. /// Height * Width.
......
...@@ -83,8 +83,8 @@ void Conv3DLayer::forward(PassType passType) { ...@@ -83,8 +83,8 @@ void Conv3DLayer::forward(PassType passType) {
int outWidth = getSize(); int outWidth = getSize();
resetOutput(batchSize, outWidth); resetOutput(batchSize, outWidth);
REGISTER_TIMER_INFO("FwdConv3D", getName().c_str());
for (size_t i = 0; i != inputLayers_.size(); ++i) { for (size_t i = 0; i != inputLayers_.size(); ++i) {
REGISTER_TIMER_INFO("FwdConv3D", getName().c_str());
const MatrixPtr &inMat = getInputValue(i); const MatrixPtr &inMat = getInputValue(i);
const MatrixPtr &outMat = getOutputValue(); const MatrixPtr &outMat = getOutputValue();
int M = M_[i]; int M = M_[i];
...@@ -120,7 +120,6 @@ void Conv3DLayer::forward(PassType passType) { ...@@ -120,7 +120,6 @@ void Conv3DLayer::forward(PassType passType) {
} }
} }
if (nullptr != this->biasParameter_) { if (nullptr != this->biasParameter_) {
REGISTER_TIMER_INFO("FwBiasTimer", getName().c_str());
this->addBias(); this->addBias();
} }
forwardActivation(); forwardActivation();
...@@ -134,15 +133,14 @@ void Conv3DLayer::backward(const UpdateCallback &callback) { ...@@ -134,15 +133,14 @@ void Conv3DLayer::backward(const UpdateCallback &callback) {
biases_->getParameterPtr()->incUpdate(callback); biases_->getParameterPtr()->incUpdate(callback);
} }
REGISTER_TIMER_INFO("BwdConv3D", getName().c_str());
for (size_t i = 0; i != inputLayers_.size(); ++i) { for (size_t i = 0; i != inputLayers_.size(); ++i) {
REGISTER_TIMER_INFO("BwdConv3D", getName().c_str());
if (weights_[i]->getWGrad()) { if (weights_[i]->getWGrad()) {
bpropWeights(i); bpropWeights(i);
} }
if (getInputGrad(i)) { if (getInputGrad(i)) {
bpropData(i); bpropData(i);
} }
REGISTER_TIMER_INFO("WeightUpdate", getName().c_str());
weights_[i]->getParameterPtr()->incUpdate(callback); weights_[i]->getParameterPtr()->incUpdate(callback);
} }
} }
......
...@@ -37,7 +37,7 @@ bool CudnnBatchNormLayer::init(const LayerMap& layerMap, ...@@ -37,7 +37,7 @@ bool CudnnBatchNormLayer::init(const LayerMap& layerMap,
} }
void CudnnBatchNormLayer::reshape(int batchSize) { void CudnnBatchNormLayer::reshape(int batchSize) {
hl_tensor_reshape(ioDesc_, batchSize, channels_, imageH_, imageW_); hl_tensor_reshape(ioDesc_, batchSize, channels_, imageH_ * imageD_, imageW_);
} }
void CudnnBatchNormLayer::forward(PassType passType) { void CudnnBatchNormLayer::forward(PassType passType) {
...@@ -104,7 +104,7 @@ void CudnnBatchNormLayer::forward(PassType passType) { ...@@ -104,7 +104,7 @@ void CudnnBatchNormLayer::forward(PassType passType) {
EPS, EPS,
batchSize, batchSize,
channels_, channels_,
imageH_, imageH_ * imageD_,
imageW_); imageW_);
} }
} }
......
...@@ -53,27 +53,27 @@ bool DeConv3DLayer::init(const LayerMap &layerMap, ...@@ -53,27 +53,27 @@ bool DeConv3DLayer::init(const LayerMap &layerMap,
size_t DeConv3DLayer::getSize() { size_t DeConv3DLayer::getSize() {
CHECK_NE(inputLayers_.size(), 0UL); CHECK_NE(inputLayers_.size(), 0UL);
outputH_.clear(); imgSizeW_.clear();
outputW_.clear(); imgSizeH_.clear();
outputD_.clear(); imgSizeD_.clear();
N_.clear(); N_.clear();
NOut_.clear(); NOut_.clear();
size_t layerSize = 0; size_t layerSize = 0;
for (size_t i = 0; i < inputLayers_.size(); ++i) { for (size_t i = 0; i < inputLayers_.size(); ++i) {
outputW_.push_back( imgSizeW_.push_back(
imageSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i], true)); imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i], true));
outputH_.push_back(imageSize( imgSizeH_.push_back(imageSize(
imgSizeH_[i], filterSizeY_[i], paddingY_[i], strideY_[i], true)); outputH_[i], filterSizeY_[i], paddingY_[i], strideY_[i], true));
outputD_.push_back(imageSize( imgSizeD_.push_back(imageSize(
imgSizeD_[i], filterSizeZ_[i], paddingZ_[i], strideZ_[i], true)); outputD_[i], filterSizeZ_[i], paddingZ_[i], strideZ_[i], true));
NOut_.push_back(outputD_[i] * outputH_[i] * outputW_[i]); NOut_.push_back(imgSizeD_[i] * imgSizeH_[i] * imgSizeW_[i]);
N_.push_back(imgSizeD_[i] * imgSizeH_[i] * imgSizeW_[i]); N_.push_back(outputD_[i] * outputH_[i] * outputW_[i]);
CHECK(layerSize == 0 || N_[i] * size_t(numFilters_) == layerSize); CHECK(layerSize == 0 || N_[i] * size_t(numFilters_) == layerSize);
layerSize += NOut_[i] * numFilters_; layerSize += NOut_[i] * numFilters_;
} }
getOutput().setFrameHeight(outputH_[0]); getOutput().setFrameHeight(imgSizeH_[0]);
getOutput().setFrameWidth(outputW_[0]); getOutput().setFrameWidth(imgSizeW_[0]);
getOutput().setFrameDepth(outputD_[0]); getOutput().setFrameDepth(imgSizeD_[0]);
return layerSize; return layerSize;
} }
...@@ -84,8 +84,8 @@ void DeConv3DLayer::forward(PassType passType) { ...@@ -84,8 +84,8 @@ void DeConv3DLayer::forward(PassType passType) {
resetOutput(batchSize, outWidth); resetOutput(batchSize, outWidth);
const MatrixPtr outMat = getOutputValue(); const MatrixPtr outMat = getOutputValue();
REGISTER_TIMER_INFO("FwdDeConv3D", getName().c_str());
for (size_t i = 0; i != inputLayers_.size(); ++i) { for (size_t i = 0; i != inputLayers_.size(); ++i) {
REGISTER_TIMER_INFO("FwdDeConv3D", getName().c_str());
const MatrixPtr &inMat = getInputValue(i); const MatrixPtr &inMat = getInputValue(i);
int M = M_[i]; int M = M_[i];
int N = N_[i]; int N = N_[i];
...@@ -103,9 +103,9 @@ void DeConv3DLayer::forward(PassType passType) { ...@@ -103,9 +103,9 @@ void DeConv3DLayer::forward(PassType passType) {
} }
colBuf_->col2Vol(outMat->getData() + n * outMat->getStride(), colBuf_->col2Vol(outMat->getData() + n * outMat->getStride(),
numFilters_, numFilters_,
outputD_[i], imgSizeD_[i],
outputH_[i], imgSizeH_[i],
outputW_[i], imgSizeW_[i],
filterSizeZ_[i], filterSizeZ_[i],
filterSizeY_[i], filterSizeY_[i],
filterSize_[i], filterSize_[i],
...@@ -120,7 +120,6 @@ void DeConv3DLayer::forward(PassType passType) { ...@@ -120,7 +120,6 @@ void DeConv3DLayer::forward(PassType passType) {
} }
} }
if (nullptr != this->biasParameter_) { if (nullptr != this->biasParameter_) {
REGISTER_TIMER_INFO("FwBiasTimer", getName().c_str());
this->addBias(); this->addBias();
} }
forwardActivation(); forwardActivation();
...@@ -133,21 +132,21 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) { ...@@ -133,21 +132,21 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) {
bpropBiases(); bpropBiases();
biases_->getParameterPtr()->incUpdate(callback); biases_->getParameterPtr()->incUpdate(callback);
} }
REGISTER_TIMER_INFO("BwdDeConv3D", getName().c_str());
for (size_t i = 0; i < inputLayers_.size(); ++i) { for (size_t i = 0; i < inputLayers_.size(); ++i) {
if (weights_[i]->getWGrad() || this->needGradient_) { if (weights_[i]->getWGrad() || this->needGradient_) {
int M = M_[i]; int M = M_[i];
int N = N_[i]; int N = N_[i];
int K = K_[i]; int K = K_[i];
REGISTER_TIMER_INFO("BwdDeConv3D", getName().c_str());
Matrix::resizeOrCreate(colBuf_, K * groups_[i], N, false, useGpu_); Matrix::resizeOrCreate(colBuf_, K * groups_[i], N, false, useGpu_);
const MatrixPtr &inMat = getInputValue(i); const MatrixPtr &inMat = getInputValue(i);
for (int n = 0; n < batchSize; ++n) { for (int n = 0; n < batchSize; ++n) {
colBuf_->vol2Col( colBuf_->vol2Col(
getOutputGrad()->getData() + n * getOutputGrad()->getStride(), getOutputGrad()->getData() + n * getOutputGrad()->getStride(),
numFilters_, numFilters_,
outputD_[i], imgSizeD_[i],
outputH_[i], imgSizeH_[i],
outputW_[i], imgSizeW_[i],
filterSizeZ_[i], filterSizeZ_[i],
filterSizeY_[i], filterSizeY_[i],
filterSize_[i], filterSize_[i],
...@@ -182,7 +181,6 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) { ...@@ -182,7 +181,6 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) {
} }
} }
} }
REGISTER_TIMER_INFO("WeightUpdate", getName().c_str());
weights_[i]->getParameterPtr()->incUpdate(callback); weights_[i]->getParameterPtr()->incUpdate(callback);
} }
} }
......
...@@ -139,7 +139,13 @@ void DetectionOutputLayer::forward(PassType passType) { ...@@ -139,7 +139,13 @@ void DetectionOutputLayer::forward(PassType passType) {
allDecodedBBoxes, allDecodedBBoxes,
&allIndices); &allIndices);
resetOutput(numKept, 7); if (numKept > 0) {
resetOutput(numKept, 7);
} else {
MatrixPtr outV = getOutputValue();
outV = NULL;
return;
}
MatrixPtr outV = getOutputValue(); MatrixPtr outV = getOutputValue();
getDetectionOutput(confBuffer_->getData(), getDetectionOutput(confBuffer_->getData(),
numKept, numKept,
......
...@@ -469,7 +469,7 @@ size_t getDetectionIndices( ...@@ -469,7 +469,7 @@ size_t getDetectionIndices(
const size_t numClasses, const size_t numClasses,
const size_t backgroundId, const size_t backgroundId,
const size_t batchSize, const size_t batchSize,
const size_t confThreshold, const real confThreshold,
const size_t nmsTopK, const size_t nmsTopK,
const real nmsThreshold, const real nmsThreshold,
const size_t keepTopK, const size_t keepTopK,
......
...@@ -275,7 +275,7 @@ size_t getDetectionIndices( ...@@ -275,7 +275,7 @@ size_t getDetectionIndices(
const size_t numClasses, const size_t numClasses,
const size_t backgroundId, const size_t backgroundId,
const size_t batchSize, const size_t batchSize,
const size_t confThreshold, const real confThreshold,
const size_t nmsTopK, const size_t nmsTopK,
const real nmsThreshold, const real nmsThreshold,
const size_t keepTopK, const size_t keepTopK,
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include "GruCompute.h" #include "GruCompute.h"
#include "hl_recurrent_apply.cuh" #include "hl_recurrent_apply.cuh"
#include "paddle/function/GruFunctor.h"
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
namespace paddle { namespace paddle {
...@@ -25,13 +26,13 @@ void GruCompute::init(LayerConfig &config) { ...@@ -25,13 +26,13 @@ void GruCompute::init(LayerConfig &config) {
template <> template <>
void GruCompute::forward<0>(hl_gru_value value, int frameSize, int batchSize) { void GruCompute::forward<0>(hl_gru_value value, int frameSize, int batchSize) {
hl_cpu_gru_forward(hppl::forward::gru_resetOutput(), GruFunctor<DEVICE_TYPE_CPU, real>::compute(hppl::forward::gru_resetOutput(),
hppl::forward::gru_finalOutput(), hppl::forward::gru_finalOutput(),
value, value,
frameSize, frameSize,
batchSize, batchSize,
activeNode_, activeNode_,
activeGate_); activeGate_);
} }
template <> template <>
...@@ -39,14 +40,15 @@ void GruCompute::backward<0>(hl_gru_value value, ...@@ -39,14 +40,15 @@ void GruCompute::backward<0>(hl_gru_value value,
hl_gru_grad grad, hl_gru_grad grad,
int frameSize, int frameSize,
int batchSize) { int batchSize) {
hl_cpu_gru_backward(hppl::backward::gru_stateGrad(), GruGradFunctor<DEVICE_TYPE_CPU, real>::compute(
hppl::backward::gru_resetGrad(), hppl::backward::gru_stateGrad(),
value, hppl::backward::gru_resetGrad(),
grad, value,
frameSize, grad,
batchSize, frameSize,
activeNode_, batchSize,
activeGate_); activeNode_,
activeGate_);
} }
} // namespace paddle } // namespace paddle
...@@ -49,6 +49,12 @@ struct LayerState { ...@@ -49,6 +49,12 @@ struct LayerState {
}; };
typedef std::shared_ptr<LayerState> LayerStatePtr; typedef std::shared_ptr<LayerState> LayerStatePtr;
/// Paddle device ID, MKLDNN is -2, CPU is -1
enum PADDLE_DEVICE_ID {
MKLDNN_DEVICE = -2,
CPU_DEVICE = -1,
};
/** /**
* @brief Base class for layer. * @brief Base class for layer.
* Define necessary variables and functions for every layer. * Define necessary variables and functions for every layer.
...@@ -59,11 +65,6 @@ protected: ...@@ -59,11 +65,6 @@ protected:
LayerConfig config_; LayerConfig config_;
/// whether to use GPU /// whether to use GPU
bool useGpu_; bool useGpu_;
/// Paddle device ID, MKLDNN is -2, CPU is -1
enum PADDLE_DEVICE_ID {
MKLDNN_DEVICE = -2,
CPU_DEVICE = -1,
};
/// Device Id. MKLDNN is -2, CPU is -1, and GPU is 0, 1, 2 ... /// Device Id. MKLDNN is -2, CPU is -1, and GPU is 0, 1, 2 ...
int deviceId_; int deviceId_;
/// Input layers /// Input layers
......
...@@ -14,7 +14,6 @@ limitations under the License. */ ...@@ -14,7 +14,6 @@ limitations under the License. */
#include "MKLDNNFcLayer.h" #include "MKLDNNFcLayer.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
using namespace mkldnn; // NOLINT using namespace mkldnn; // NOLINT
typedef memory::format format; typedef memory::format format;
...@@ -40,6 +39,8 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap, ...@@ -40,6 +39,8 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap,
oc_ = getSize(); oc_ = getSize();
oh_ = 1; oh_ = 1;
ow_ = 1; ow_ = 1;
ih_ = 1;
iw_ = 1;
// input size can not change in FC // input size can not change in FC
iLayerSize_ = inputLayers_[0]->getSize(); iLayerSize_ = inputLayers_[0]->getSize();
...@@ -77,111 +78,86 @@ void MKLDNNFcLayer::convertWeightsToPaddle() { ...@@ -77,111 +78,86 @@ void MKLDNNFcLayer::convertWeightsToPaddle() {
wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim); wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim);
} }
void MKLDNNFcLayer::convertOutputToOtherDevice() { void MKLDNNFcLayer::reshape(
copyOutputInfoToOtherDevice(); int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
// find other cpu device and reorder output to cpu device reshapeInput(bs, ih, iw);
int cnt = 0;
for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
if (outputOtherDevice_[i].deviceId == CPU_DEVICE) {
// fc cpu output value do not need convert
// just share point
outputOtherDevice_[i].value = output_.value;
++cnt;
}
}
if (cnt > 1) {
LOG(WARNING) << "should not have more than one CPU devie";
}
}
void MKLDNNFcLayer::reshape() {
const Argument& input = getInput(0, getPrev(0)->getDeviceId());
int batchSize = input.getBatchSize();
if (bs_ == batchSize) {
return;
}
bs_ = batchSize;
ih_ = input.getFrameHeight();
iw_ = input.getFrameWidth();
if (ih_ == 0) {
ih_ = 1;
}
if (iw_ == 0) {
iw_ = 1;
}
CHECK_EQ(iLayerSize_, inputLayers_[0]->getSize()); CHECK_EQ(iLayerSize_, inputLayers_[0]->getSize());
ic_ = iLayerSize_ / (ih_ * iw_); ic = iLayerSize_ / (ih * iw);
CHECK_EQ(size_t(ic_ * ih_ * iw_), iLayerSize_) << "not divisible"; CHECK_EQ(size_t(ic * ih * iw), iLayerSize_) << "not divisible";
CHECK_EQ(size_t(oc_), getSize()); CHECK_EQ(size_t(oc), getSize());
printSizeInfo();
// reset output reshapeOutput(oh, ow);
output_.setFrameHeight(oh_); resizeOutput(bs, oc);
output_.setFrameWidth(ow_);
resetOutput(bs_, oc_);
// reset mkldnn forward printSizeInfo();
resetFwd();
needResetBwd_ = true;
convertWeightsFromPaddle();
} }
void MKLDNNFcLayer::resetFwd() { void MKLDNNFcLayer::resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
bool hasBias = biases_ && biases_->getW(); bool hasBias = biases_ && biases_->getW();
const MatrixPtr& wgt = weight_->getW(); const MatrixPtr& wgtVal = weight_->getW();
const MatrixPtr& bias = hasBias ? biases_->getW() : nullptr; const MatrixPtr& biasVal = hasBias ? biases_->getW() : nullptr;
const MatrixPtr& out = output_.value; const MatrixPtr& outVal = output_.value;
if (inputIsOnlyMKLDNN()) { if (inputIsOnlyMKLDNN()) {
const MatrixPtr& in = getInputValue(0); const MatrixPtr& inVal = getInputValue(0);
inVal_ = std::dynamic_pointer_cast<MKLDNNMatrix>(in); in = std::dynamic_pointer_cast<MKLDNNMatrix>(inVal);
CHECK(inVal_) << "Input should be MKLDNNMatrix"; CHECK(in) << "Input should be MKLDNNMatrix";
} else { } else {
CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet"; CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet";
const MatrixPtr& in = getInputValue(0, CPU_DEVICE); const MatrixPtr& inVal = getInputValue(0, CPU_DEVICE);
inVal_ = MKLDNNMatrix::create( in = MKLDNNMatrix::create(
in, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_); inVal, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_);
} }
inVal_->downSpatial(); in->downSpatial();
wgtVal_ = MKLDNNMatrix::create( wgt = MKLDNNMatrix::create(
wgt, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_); wgtVal, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_);
wgtVal_->downSpatial(); wgt->downSpatial();
biasVal_ = bias = hasBias ? MKLDNNMatrix::create(biasVal, {oc_}, format::x, engine_)
hasBias ? MKLDNNMatrix::create(bias, {oc_}, format::x, engine_) : nullptr; : nullptr;
outVal_ = MKLDNNMatrix::create(out, {bs_, oc_}, format::nc, engine_); out = MKLDNNMatrix::create(outVal, {bs_, oc_}, format::nc, engine_);
// change original output value to mkldnn output value // change original output value to mkldnn output value
output_.value = std::dynamic_pointer_cast<Matrix>(outVal_); output_.value = std::dynamic_pointer_cast<Matrix>(out);
if (!outputIsOnlyMKLDNN()) { if (!outputIsOnlyMKLDNN()) {
convertOutputToOtherDevice(); // fc cpu output value do not need create convert
// just share point
getOutput(CPU_DEVICE).value->setData(output_.value->getData());
} }
// create forward handle // create forward handle
prop_kind pk = prop_kind::forward; prop_kind pk = prop_kind::forward;
fc_fwd::desc fwdDesc = hasBias ? fc_fwd::desc(pk, fc_fwd::desc fwdDesc = hasBias ? fc_fwd::desc(pk,
inVal_->getMemoryDesc(), in->getMemoryDesc(),
wgtVal_->getMemoryDesc(), wgt->getMemoryDesc(),
biasVal_->getMemoryDesc(), bias->getMemoryDesc(),
outVal_->getMemoryDesc()) out->getMemoryDesc())
: fc_fwd::desc(pk, : fc_fwd::desc(pk,
inVal_->getMemoryDesc(), in->getMemoryDesc(),
wgtVal_->getMemoryDesc(), wgt->getMemoryDesc(),
outVal_->getMemoryDesc()); out->getMemoryDesc());
fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_); fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_);
if (hasBias) { if (hasBias) {
fwd_.reset(new fc_fwd(fwdPD, *inVal_, *wgtVal_, *biasVal_, *outVal_)); fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *bias, *out));
} else { } else {
fwd_.reset(new fc_fwd(fwdPD, *inVal_, *wgtVal_, *outVal_)); fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *out));
} }
printValueFormatFlow(); printValueFormatFlow();
pipelineFwd_.clear(); pipeline.push_back(*fwd_);
pipelineFwd_.push_back(*fwd_);
} }
void MKLDNNFcLayer::resetBwd() { void MKLDNNFcLayer::resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) {
pipeline.clear();
if (!needResetBwd_) { if (!needResetBwd_) {
return; return;
} }
...@@ -190,8 +166,8 @@ void MKLDNNFcLayer::resetBwd() { ...@@ -190,8 +166,8 @@ void MKLDNNFcLayer::resetBwd() {
/// backward weight /// backward weight
CHECK(inVal_) << "Should have input value"; CHECK(inVal_) << "Should have input value";
const MatrixPtr& wgt = weight_->getWGrad(); const MatrixPtr& wgtGrad = weight_->getWGrad();
const MatrixPtr& bias = hasBias ? biases_->getWGrad() : nullptr; const MatrixPtr& biasGrad = hasBias ? biases_->getWGrad() : nullptr;
// TODO(TJ): merge outgrad // TODO(TJ): merge outgrad
int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE; int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
...@@ -202,101 +178,66 @@ void MKLDNNFcLayer::resetBwd() { ...@@ -202,101 +178,66 @@ void MKLDNNFcLayer::resetBwd() {
// for CPU device: // for CPU device:
// fc do not need to convert from cpu device since output is always nc format // fc do not need to convert from cpu device since output is always nc format
// only need create from cpu device // only need create from cpu device
const MatrixPtr& out = getOutput(device).grad; const MatrixPtr& outGrad = getOutput(device).grad;
outGrad_ = MKLDNNMatrix::create(out, outVal_->getPrimitiveDesc()); out = MKLDNNMatrix::create(outGrad, outVal_->getPrimitiveDesc());
wgtGrad_ = MKLDNNMatrix::create(wgt, wgtVal_->getPrimitiveDesc()); wgt = MKLDNNMatrix::create(wgtGrad, wgtVal_->getPrimitiveDesc());
biasGrad_ = hasBias ? MKLDNNMatrix::create(bias, biasVal_->getPrimitiveDesc()) bias = hasBias ? MKLDNNMatrix::create(biasGrad, biasVal_->getPrimitiveDesc())
: nullptr; : nullptr;
// create memory primitive desc // create memory primitive desc
fc_fwd::desc fwdDesc = fc_fwd::desc(prop_kind::forward, fc_fwd::desc fwdDesc = fc_fwd::desc(prop_kind::forward,
inVal_->getMemoryDesc(), inVal_->getMemoryDesc(),
wgtGrad_->getMemoryDesc(), wgt->getMemoryDesc(),
outGrad_->getMemoryDesc()); out->getMemoryDesc());
fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_); fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_);
fc_bwdWgt::desc bwdWgtDesc = hasBias fc_bwdWgt::desc bwdWgtDesc = hasBias
? fc_bwdWgt::desc(inVal_->getMemoryDesc(), ? fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgtGrad_->getMemoryDesc(), wgt->getMemoryDesc(),
biasGrad_->getMemoryDesc(), bias->getMemoryDesc(),
outGrad_->getMemoryDesc()) out->getMemoryDesc())
: fc_bwdWgt::desc(inVal_->getMemoryDesc(), : fc_bwdWgt::desc(inVal_->getMemoryDesc(),
wgtGrad_->getMemoryDesc(), wgt->getMemoryDesc(),
outGrad_->getMemoryDesc()); out->getMemoryDesc());
fc_bwdWgt::primitive_desc bwdWgtPD = fc_bwdWgt::primitive_desc bwdWgtPD =
fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, fwdPD); fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, fwdPD);
if (hasBias) { if (hasBias) {
bwdWgt_.reset( bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt, *bias));
new fc_bwdWgt(bwdWgtPD, *inVal_, *outGrad_, *wgtGrad_, *biasGrad_));
} else { } else {
bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *outGrad_, *wgtGrad_)); bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt));
} }
pipelineBwd_.clear(); pipeline.push_back(*bwdWgt_);
pipelineBwd_.push_back(*bwdWgt_);
/// backward data /// backward data
device = inputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE; const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad;
const MatrixPtr& in = getInputGrad(0, device); if (inGrad == nullptr) {
if (in == nullptr) {
return; return;
} }
if (getInput(0, device).getAllCount() > 1) { if (getInput(0, MKLDNN_DEVICE).getAllCount() > 1) {
// TODO(TJ): use outputMaps_ ways when merge outgrad done // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
} else { } else {
inGrad_ = MKLDNNMatrix::create(in, inVal_->getPrimitiveDesc()); in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc());
} }
fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(inVal_->getMemoryDesc(), fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(
wgtGrad_->getMemoryDesc(), inVal_->getMemoryDesc(), wgt->getMemoryDesc(), out->getMemoryDesc());
outGrad_->getMemoryDesc());
fc_bwdData::primitive_desc bwdDataPD = fc_bwdData::primitive_desc bwdDataPD =
fc_bwdData::primitive_desc(bwdDataDesc, engine_, fwdPD); fc_bwdData::primitive_desc(bwdDataDesc, engine_, fwdPD);
CHECK(wgtVal_) << "Should have weight memory"; CHECK(wgtVal_) << "Should have weight memory";
bwdData_.reset(new fc_bwdData(bwdDataPD, *outGrad_, *wgtVal_, *inGrad_)); bwdData_.reset(new fc_bwdData(bwdDataPD, *out, *wgtVal_, *in));
printGradFormatFlow(); printGradFormatFlow();
pipelineBwd_.push_back(*bwdData_); pipeline.push_back(*bwdData_);
} }
void MKLDNNFcLayer::forward(PassType passType) { void MKLDNNFcLayer::updateInputData() {
Layer::forward(passType); inVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
reshape();
{
REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str());
syncInputValue();
// just submit forward pipeline
stream_->submit(pipelineFwd_);
}
/* activation */ {
REGISTER_TIMER_INFO("FwActTimer", getName().c_str());
forwardActivation();
}
} }
void MKLDNNFcLayer::backward(const UpdateCallback& callback) { void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) {
/* Do derivation */ { weight_->getParameterPtr()->incUpdate(callback);
REGISTER_TIMER_INFO("BpActTimer", getName().c_str()); if (biases_ && biases_->getWGrad()) {
backwardActivation(); biases_->getParameterPtr()->incUpdate(callback);
}
{
REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str());
resetBwd();
syncOutputGrad();
// just sumbmit backward pipeline
stream_->submit(pipelineBwd_);
}
{
REGISTER_TIMER_INFO("WeightUpdate", getName().c_str());
weight_->getParameterPtr()->incUpdate(callback);
if (biases_ && biases_->getWGrad()) {
biases_->getParameterPtr()->incUpdate(callback);
}
} }
} }
} // namespace paddle } // namespace paddle
...@@ -45,35 +45,28 @@ public: ...@@ -45,35 +45,28 @@ public:
bool init(const LayerMap& layerMap, bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override; const ParameterMap& parameterMap) override;
void convertWeightsFromPaddle() override; void reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
void convertWeightsToPaddle() override; void resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) override;
void forward(PassType passType) override; void resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) override;
void backward(const UpdateCallback& callback) override; void updateInputData() override;
protected: void updateWeights(const UpdateCallback& callback) override;
/**
* reshape the input image sizes void convertWeightsFromPaddle() override;
* and reset output buffer size
* and reset mkldnn forward void convertWeightsToPaddle() override;
*/
void reshape();
/**
* reset the forward primitve and memory
* only would be called when input size changes
*/
void resetFwd();
/**
* reset the backward primitve and memory for mkldnn fc
* only would be called when needed
*/
void resetBwd();
void convertOutputToOtherDevice() override;
}; };
} // namespace paddle } // namespace paddle
...@@ -19,6 +19,7 @@ limitations under the License. */ ...@@ -19,6 +19,7 @@ limitations under the License. */
#include "MKLDNNBase.h" #include "MKLDNNBase.h"
#include "mkldnn.hpp" #include "mkldnn.hpp"
#include "paddle/math/MKLDNNMatrix.h" #include "paddle/math/MKLDNNMatrix.h"
#include "paddle/utils/Stat.h"
DECLARE_bool(use_mkldnn); DECLARE_bool(use_mkldnn);
...@@ -33,6 +34,8 @@ typedef std::shared_ptr<MKLDNNLayer> MKLDNNLayerPtr; ...@@ -33,6 +34,8 @@ typedef std::shared_ptr<MKLDNNLayer> MKLDNNLayerPtr;
*/ */
class MKLDNNLayer : public Layer { class MKLDNNLayer : public Layer {
protected: protected:
// input value element count
size_t inputElemenCnt_;
// batch size // batch size
int bs_; int bs_;
// input image channel, height and width // input image channel, height and width
...@@ -52,7 +55,7 @@ protected: ...@@ -52,7 +55,7 @@ protected:
std::vector<mkldnn::primitive> pipelineFwd_; std::vector<mkldnn::primitive> pipelineFwd_;
std::vector<mkldnn::primitive> pipelineBwd_; std::vector<mkldnn::primitive> pipelineBwd_;
// MKLDNNMatrixPtr // MKLDNNMatrixPtr with internal format
MKLDNNMatrixPtr inVal_; MKLDNNMatrixPtr inVal_;
MKLDNNMatrixPtr inGrad_; MKLDNNMatrixPtr inGrad_;
MKLDNNMatrixPtr outVal_; MKLDNNMatrixPtr outVal_;
...@@ -65,6 +68,7 @@ protected: ...@@ -65,6 +68,7 @@ protected:
public: public:
explicit MKLDNNLayer(const LayerConfig& config) explicit MKLDNNLayer(const LayerConfig& config)
: Layer(config), : Layer(config),
inputElemenCnt_(0),
bs_(0), bs_(0),
ic_(0), ic_(0),
ih_(0), ih_(0),
...@@ -95,12 +99,104 @@ public: ...@@ -95,12 +99,104 @@ public:
if (!Layer::init(layerMap, parameterMap)) { if (!Layer::init(layerMap, parameterMap)) {
return false; return false;
} }
checkCPUOutputsNumber();
stream_.reset(new MKLDNNStream()); stream_.reset(new MKLDNNStream());
engine_ = CPUEngine::Instance().getEngine(); engine_ = CPUEngine::Instance().getEngine();
return true; return true;
} }
void forward(PassType passType) override {
passType_ = passType;
{
REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str());
CHECK(!inputLayers_.empty());
copySeqInfoToOutputs();
size_t elemenCnt = inputLayers_[0]->getOutput().value->getElementCnt();
if (inputElemenCnt_ != elemenCnt) {
// reset when input total sizes changed, not only the batchsize
inputElemenCnt_ = elemenCnt;
reshape(bs_, ic_, ih_, iw_, oc_, oh_, ow_);
resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_);
convertWeightsFromPaddle();
needResetBwd_ = true;
}
if (inputLayers_[0]->getType() == "data") {
updateInputData();
}
stream_->submit(pipelineFwd_);
}
/* activation */ {
REGISTER_TIMER_INFO("FwActTimer", getName().c_str());
forwardActivation();
}
}
void backward(const UpdateCallback& callback) override {
/* Do derivation */ {
REGISTER_TIMER_INFO("BpActTimer", getName().c_str());
backwardActivation();
}
{
REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str());
if (needResetBwd_) {
resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_);
needResetBwd_ = false;
}
stream_->submit(pipelineBwd_);
}
{
REGISTER_TIMER_INFO("WeightUpdate", getName().c_str());
updateWeights(callback);
}
}
/**
* reshape the input image sizes
* and reset output image and buffer size
* output channel can not be changed
*/
virtual void reshape(
int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) = 0;
/**
* reset the mkldnn forward primitve and memory
* only would be called when input size changes
*/
virtual void resetFwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) = 0;
/**
* reset the mkldnn backward primitve and memory for mkldnn fc
* only would be called when needed
*/
virtual void resetBwd(std::vector<mkldnn::primitive>& pipeline,
MKLDNNMatrixPtr& in,
MKLDNNMatrixPtr& wgt,
MKLDNNMatrixPtr& bias,
MKLDNNMatrixPtr& out) = 0;
/**
* Update input value data when input layer is "data" type.
* Since the input value data address might be changed.
*/
virtual void updateInputData() {}
/**
* Update weights and biases if necessary.
*/
virtual void updateWeights(const UpdateCallback& callback) {}
/** /**
* convert weight from paddle format to mkldnn format * convert weight from paddle format to mkldnn format
* weight_ will be override * weight_ will be override
...@@ -114,10 +210,38 @@ public: ...@@ -114,10 +210,38 @@ public:
virtual void convertWeightsToPaddle() {} virtual void convertWeightsToPaddle() {}
/** /**
* convert MKLDNN output to other device. * add this interface as public for unit test
* only support CPU device yet */
void addOutputArgument(int deviceId) { Layer::addOutputArgument(deviceId); }
protected:
/**
* reshape the input image sizes and input batchsize
*/ */
virtual void convertOutputToOtherDevice() {} virtual void reshapeInput(int& batchsize, int& height, int& width) {
const Argument& input = inputLayers_[0]->getOutput();
batchsize = input.getBatchSize();
int h = input.getFrameHeight();
int w = input.getFrameWidth();
if (h != 0) {
height = h;
}
if (w != 0) {
width = w;
}
}
/**
* reshape output image sizes
*/
virtual void reshapeOutput(size_t height, size_t width) {
output_.setFrameHeight(height);
output_.setFrameWidth(width);
for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
outputOtherDevice_[i].setFrameHeight(height);
outputOtherDevice_[i].setFrameWidth(width);
}
}
/** /**
* print info about sizes * print info about sizes
...@@ -133,8 +257,8 @@ public: ...@@ -133,8 +257,8 @@ public:
*/ */
virtual void printValueFormatFlow() { virtual void printValueFormatFlow() {
if (inVal_ && outVal_) { if (inVal_ && outVal_) {
VLOG(MKLDNN_FMTS) << "value format flow --- " << inVal_->getFormat() VLOG(MKLDNN_FMTS) << inVal_->getFormat() << " >>> "
<< " >>> " << outVal_->getFormat(); << outVal_->getFormat();
} }
} }
...@@ -143,29 +267,12 @@ public: ...@@ -143,29 +267,12 @@ public:
*/ */
virtual void printGradFormatFlow() { virtual void printGradFormatFlow() {
if (inGrad_ && outGrad_) { if (inGrad_ && outGrad_) {
VLOG(MKLDNN_FMTS) << "grad format flow --- " << inGrad_->getFormat() VLOG(MKLDNN_FMTS) << inGrad_->getFormat() << " <<< "
<< " <<< " << outGrad_->getFormat(); << outGrad_->getFormat();
} }
} }
protected: protected:
/**
* copy image size and sequence info to other device
* @note: can not directly use Layer::copyOutputToOtherDevice since here only
* copy base info and do not copy data value
*/
void copyOutputInfoToOtherDevice() {
for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
outputOtherDevice_[i].setFrameHeight(output_.getFrameHeight());
outputOtherDevice_[i].setFrameWidth(output_.getFrameWidth());
outputOtherDevice_[i].sequenceStartPositions =
output_.sequenceStartPositions;
outputOtherDevice_[i].subSequenceStartPositions =
output_.subSequenceStartPositions;
outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims;
}
}
/** /**
* If input only has MKLDNN device. * If input only has MKLDNN device.
* Otherwise, only support the previous layer using CPU device. * Otherwise, only support the previous layer using CPU device.
...@@ -193,37 +300,12 @@ protected: ...@@ -193,37 +300,12 @@ protected:
return outputOtherDevice_.size() == 0; return outputOtherDevice_.size() == 0;
} }
/**
* Sync input value data
*/
void syncInputValue() {
if (inputIsOnlyMKLDNN()) {
return;
}
real* iData = getInputValue(0, CPU_DEVICE)->getData();
// update input data
// since it might be changed if this is after data layer
inVal_->updateData(iData);
}
/**
* Sync output grad data
*/
void syncOutputGrad() {
if (outputIsOnlyMKLDNN()) {
return;
}
// update diff
real* oDiff = getOutput(CPU_DEVICE).grad->getData();
outGrad_->updateData(oDiff);
}
/** /**
* Set deviceId of this layer. * Set deviceId of this layer.
*/ */
void setDevice(int id) { deviceId_ = id; } void setDevice(int id) { deviceId_ = id; }
private:
/** /**
* Set deviceId of the params used in this layer. * Set deviceId of the params used in this layer.
*/ */
...@@ -247,6 +329,42 @@ protected: ...@@ -247,6 +329,42 @@ protected:
parameter->setDevice(id); parameter->setDevice(id);
} }
} }
/**
* Check the cpu device number of outputOtherDevice_.
* should have only one at most.
*/
void checkCPUOutputsNumber(int max = 1) {
int cnt = 0;
for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
if (outputOtherDevice_[i].deviceId == CPU_DEVICE) {
++cnt;
}
}
CHECK_LE(cnt, max) << "too much CPU devies";
}
/**
* copy SeqInfo from input layer to this output and other output devices.
* @note: do not use getInput(0) since it used this deviceId_,
* use "inputLayers_[0]->getOutput()" instead.
*/
void copySeqInfoToOutputs() {
if (inputLayers_.empty() || !needSequenceInfo_) {
return;
}
const Argument& input = inputLayers_[0]->getOutput();
output_.sequenceStartPositions = input.sequenceStartPositions;
output_.subSequenceStartPositions = input.subSequenceStartPositions;
output_.cpuSequenceDims = input.cpuSequenceDims;
for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
outputOtherDevice_[i].sequenceStartPositions =
output_.sequenceStartPositions;
outputOtherDevice_[i].subSequenceStartPositions =
output_.subSequenceStartPositions;
outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims;
}
}
}; };
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "SwitchOrderLayer.h"
#include "paddle/utils/Stat.h"
namespace paddle {
REGISTER_LAYER(switch_order, SwitchOrderLayer);
bool SwitchOrderLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
/* Initialize the basic parent class */
Layer::init(layerMap, parameterMap);
auto& img_conf = config_.inputs(0).image_conf();
size_t inD = img_conf.img_size_z();
size_t inH =
img_conf.has_img_size_y() ? img_conf.img_size_y() : img_conf.img_size();
size_t inW = img_conf.img_size();
size_t inC = img_conf.channels();
inH = inH * inD;
inDims_ = TensorShape({0, inC, inH, inW});
outDims_ = TensorShape(4);
auto& reshape_conf = config_.reshape_conf();
for (int i = 0; i < reshape_conf.height_axis_size(); i++) {
heightAxis_.push_back(reshape_conf.height_axis(i));
}
for (int i = 0; i < reshape_conf.width_axis_size(); i++) {
widthAxis_.push_back(reshape_conf.width_axis(i));
}
createFunction(nchw2nhwc_, "NCHW2NHWC", FuncConfig());
createFunction(nhwc2nchw_, "NHWC2NCHW", FuncConfig());
return true;
}
void SwitchOrderLayer::setOutDims() {
outDims_.setDim(0, inDims_[0]);
outDims_.setDim(1, inDims_[2]);
outDims_.setDim(2, inDims_[3]);
outDims_.setDim(3, inDims_[1]);
reshapeHeight_ = 1;
for (size_t i = 0; i < heightAxis_.size(); i++) {
reshapeHeight_ *= outDims_[heightAxis_[i]];
}
output_.setFrameHeight(reshapeHeight_);
reshapeWidth_ = 1;
for (size_t i = 0; i < widthAxis_.size(); i++) {
reshapeWidth_ *= outDims_[widthAxis_[i]];
}
output_.setFrameWidth(reshapeWidth_);
}
void SwitchOrderLayer::setInDims() {
MatrixPtr input = inputLayers_[0]->getOutputValue();
size_t batchSize = input->getHeight();
inDims_.setDim(0, batchSize);
int d = inputLayers_[0]->getOutput().getFrameDepth();
d = (d == 0 ? 1 : d);
int h = inputLayers_[0]->getOutput().getFrameHeight();
if (h != 0) inDims_.setDim(2, h * d);
int w = inputLayers_[0]->getOutput().getFrameWidth();
if (w != 0) inDims_.setDim(3, w);
int totalCount = input->getElementCnt();
int channels = totalCount / (inDims_[0] * inDims_[2] * inDims_[3]);
if (channels != 0) inDims_.setDim(1, channels);
}
void SwitchOrderLayer::forward(PassType passType) {
Layer::forward(passType);
setInDims();
setOutDims();
resetOutput(outDims_[0], outDims_[1] * outDims_[2] * outDims_[3]);
if (heightAxis_.size() > 0) {
resetOutput(reshapeHeight_, reshapeWidth_);
}
// switch NCHW to NHWC
BufferArgs inputs;
BufferArgs outputs;
inputs.addArg(*getInputValue(0), inDims_);
outputs.addArg(*getOutputValue(), outDims_);
nchw2nhwc_[0]->calc(inputs, outputs);
forwardActivation();
}
void SwitchOrderLayer::backward(const UpdateCallback& callback) {
(void)callback;
backwardActivation();
// switch NHWC to NCHW
BufferArgs inputs;
BufferArgs outputs;
inputs.addArg(*getOutputGrad(), outDims_);
outputs.addArg(*getInputGrad(0), inDims_, ADD_TO);
nhwc2nchw_[0]->calc(inputs, outputs);
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Layer.h"
namespace paddle {
/**
* \brief This layer calculate softmax in image channel dimension.
*/
class SwitchOrderLayer : public Layer {
public:
explicit SwitchOrderLayer(const LayerConfig& config) : Layer(config) {}
~SwitchOrderLayer() {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
void setInDims();
void setOutDims();
protected:
std::vector<std::shared_ptr<FunctionBase>> nchw2nhwc_;
std::vector<std::shared_ptr<FunctionBase>> nhwc2nchw_;
TensorShape inDims_;
TensorShape outDims_;
std::vector<int> heightAxis_;
std::vector<int> widthAxis_;
size_t reshapeHeight_;
size_t reshapeWidth_;
};
} // namespace paddle
...@@ -63,8 +63,12 @@ void MKLDNNTester::reset(const TestConfig& dnn, ...@@ -63,8 +63,12 @@ void MKLDNNTester::reset(const TestConfig& dnn,
initTestLayer( initTestLayer(
configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i])); configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i]));
} }
dnnLayer_ = testLayers_[DNN];
refLayer_ = testLayers_[REF]; refLayer_ = testLayers_[REF];
dnnLayer_ = std::dynamic_pointer_cast<MKLDNNLayer>(testLayers_[DNN]);
CHECK(dnnLayer_);
// for comparison with Paddle reference results,
// need manually add cpu device output for test
dnnLayer_->addOutputArgument(CPU_DEVICE);
EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size()); EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size());
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size()); EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
...@@ -109,20 +113,22 @@ void MKLDNNTester::randomBotDatas() { ...@@ -109,20 +113,22 @@ void MKLDNNTester::randomBotDatas() {
void MKLDNNTester::randomTopDiffs() { void MKLDNNTester::randomTopDiffs() {
refLayer_->getOutputGrad()->randomizeUniform(); refLayer_->getOutputGrad()->randomizeUniform();
dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad())); dnnLayer_->getOutput(CPU_DEVICE)
VLOG(lvl_) << "Random dom Backward Input, TopDiff: "; .grad->copyFrom(*(refLayer_->getOutputGrad()));
VLOG(lvl_) << "Random Backward Input, TopDiff: ";
printMatrix(refLayer_->getOutputGrad()); printMatrix(refLayer_->getOutputGrad());
} }
void MKLDNNTester::checkForward() { void MKLDNNTester::checkForward() {
printTopDatas();
double delta = compareMatrix(testLayers_[DNN]->getOutputValue(),
testLayers_[REF]->getOutputValue());
VLOG(MKLDNN_ALL) << "Check Forward"; VLOG(MKLDNN_ALL) << "Check Forward";
printTopDatas();
double delta = compareMatrix(dnnLayer_->getOutput(-1).value,
refLayer_->getOutputValue());
EXPECT_LE(fabs(delta), eps_); EXPECT_LE(fabs(delta), eps_);
} }
void MKLDNNTester::checkBackwardData() { void MKLDNNTester::checkBackwardData() {
VLOG(MKLDNN_ALL) << "Check Backward Data";
// TODO(TJ): uncomment me when batch norm ready // TODO(TJ): uncomment me when batch norm ready
// const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm"; // const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm";
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) { for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
...@@ -144,14 +150,12 @@ void MKLDNNTester::checkBackwardData() { ...@@ -144,14 +150,12 @@ void MKLDNNTester::checkBackwardData() {
} }
void MKLDNNTester::checkBackwardWgts() { void MKLDNNTester::checkBackwardWgts() {
VLOG(MKLDNN_ALL) << "Check Backward Weight";
CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size()); CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
vector<VectorPtr> dnnWgts; // used to temply save mkldnn weights vector<VectorPtr> dnnWgts; // used to temply save mkldnn weights
saveWgt(parameters_[DNN], dnnWgts); saveWgt(parameters_[DNN], dnnWgts);
const MKLDNNLayerPtr dnnlayer = dnnLayer_->convertWeightsToPaddle();
std::dynamic_pointer_cast<MKLDNNLayer>(dnnLayer_);
CHECK(dnnlayer);
dnnlayer->convertWeightsToPaddle();
for (size_t i = 0; i < parameters_[DNN].size(); ++i) { for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE); const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE); const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
...@@ -189,38 +193,38 @@ void MKLDNNTester::restoreWgt(const vector<VectorPtr>& from, ...@@ -189,38 +193,38 @@ void MKLDNNTester::restoreWgt(const vector<VectorPtr>& from,
} }
// clear parameters grad // clear parameters grad
void MKLDNNTester::clearWgtDiffs() { void MKLDNNTester::clearWgtDiffs(size_t id) {
CHECK_LE(id, parameters_.size());
for (size_t n = 0; n < parameters_.size(); ++n) { for (size_t n = 0; n < parameters_.size(); ++n) {
for (size_t i = 0; i < parameters_[n].size(); ++i) { if (id == n || id == parameters_.size()) {
const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT); for (size_t i = 0; i < parameters_[n].size(); ++i) {
if (grad) { const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
grad->zeroMem(); if (grad) {
grad->zeroMem();
}
} }
} }
} }
} }
void MKLDNNTester::clearBotDiffs() { void MKLDNNTester::clearBotDiffs(size_t id) {
// dnn and ref CHECK_LE(id, dataLayers_.size());
for (size_t n = 0; n < dataLayers_.size(); ++n) { for (size_t n = 0; n < dataLayers_.size(); ++n) {
// all inputs layers if (id == n || id == dataLayers_.size()) {
for (size_t i = 0; i < dataLayers_[n].size(); ++i) { // clear inputs layers of this specific layer
dataLayers_[n][i]->getOutputGrad()->zeroMem(); for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
dataLayers_[n][i]->getOutputGrad()->zeroMem();
}
} }
} }
} }
void MKLDNNTester::clearBotDiffs(int n) { void MKLDNNTester::clearTopDatas(size_t id) {
CHECK_LT(n, NUM); CHECK_LE(id, testLayers_.size());
// all inputs layers
for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
dataLayers_[n][i]->getOutputGrad()->zeroMem();
}
}
void MKLDNNTester::clearTopDatas() {
for (size_t i = 0; i < testLayers_.size(); ++i) { for (size_t i = 0; i < testLayers_.size(); ++i) {
testLayers_[i]->getOutputValue()->zeroMem(); if (id == i || id == testLayers_.size()) {
testLayers_[i]->getOutputValue()->zeroMem();
}
} }
} }
...@@ -300,16 +304,24 @@ void MKLDNNTester::runOnce() { ...@@ -300,16 +304,24 @@ void MKLDNNTester::runOnce() {
checkForward(); checkForward();
// test backward // test backward
// simple updater
UpdateCallback updateCallback = [](Parameter* para) {
auto& grad = para->getBuf(PARAMETER_GRADIENT);
auto& value = para->getBuf(PARAMETER_VALUE);
real lr = 1e-3;
value->add(*grad, lr);
};
randomTopDiffs(); randomTopDiffs();
dnnLayer_->backward(nullptr); dnnLayer_->backward(updateCallback);
refLayer_->backward(nullptr); refLayer_->backward(updateCallback);
checkBackwardData(); checkBackwardData();
checkBackwardWgts(); checkBackwardWgts();
// clear buffers // clear buffers
// ref code will addto the diff, dnn code will writeto it // ref code will addto the diff, dnn code will writeto it
// and clearTopDatas() and clearWgtDiffs() should be coverd by test layers // and clearTopDatas(REF) should be coverd by ref layers
clearBotDiffs(REF); clearBotDiffs(REF);
clearWgtDiffs(REF);
} }
void MKLDNNTester::run(const TestConfig& dnn, void MKLDNNTester::run(const TestConfig& dnn,
......
...@@ -18,6 +18,7 @@ limitations under the License. */ ...@@ -18,6 +18,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "LayerGradUtil.h" #include "LayerGradUtil.h"
#include "paddle/gserver/layers/MKLDNNBase.h" #include "paddle/gserver/layers/MKLDNNBase.h"
#include "paddle/gserver/layers/MKLDNNLayer.h"
namespace paddle { namespace paddle {
...@@ -40,7 +41,8 @@ protected: ...@@ -40,7 +41,8 @@ protected:
vector<LayerMap> layerMaps_; vector<LayerMap> layerMaps_;
vector<vector<ParameterPtr>> parameters_; vector<vector<ParameterPtr>> parameters_;
vector<LayerPtr> testLayers_; vector<LayerPtr> testLayers_;
LayerPtr dnnLayer_, refLayer_; LayerPtr refLayer_;
MKLDNNLayerPtr dnnLayer_;
/// run some iterations, all the result should pass /// run some iterations, all the result should pass
size_t iter_; size_t iter_;
...@@ -88,10 +90,10 @@ private: ...@@ -88,10 +90,10 @@ private:
void checkBackwardData(); void checkBackwardData();
void checkBackwardWgts(); void checkBackwardWgts();
void clearWgtDiffs(); // clear specific layer, clear all when id equals NUM
void clearBotDiffs(); void clearWgtDiffs(size_t id = NUM);
void clearBotDiffs(int n); // clear specific layer void clearBotDiffs(size_t id = NUM);
void clearTopDatas(); void clearTopDatas(size_t id = NUM);
void printTopDatas(); void printTopDatas();
void printMatrix(const MatrixPtr& m); void printMatrix(const MatrixPtr& m);
......
...@@ -1703,6 +1703,55 @@ TEST(Layer, BatchNormalizationLayer) { ...@@ -1703,6 +1703,55 @@ TEST(Layer, BatchNormalizationLayer) {
#endif #endif
} }
void testBatchNorm3DLayer(const string& type, bool trans, bool useGpu) {
TestConfig config;
const int CHANNELS = 10;
const int IMG_SIZE = 16;
const int IMG_SIZE_Y = 8;
const int IMG_SIZE_Z = 8;
size_t size = CHANNELS * IMG_SIZE * IMG_SIZE_Y * IMG_SIZE_Z;
config.layerConfig.set_type(type);
config.layerConfig.set_size(size);
config.layerConfig.set_active_type("sigmoid");
config.biasSize = CHANNELS;
config.inputDefs.push_back({INPUT_DATA,
"layer_0",
/* dim= */ size,
/* paraSize= */ CHANNELS});
config.inputDefs.push_back({INPUT_DATA, "layer_1_running_mean", 1, CHANNELS});
config.inputDefs.back().isStatic = true;
config.inputDefs.push_back({INPUT_DATA, "layer_2_running_var", 1, CHANNELS});
config.inputDefs.back().isStatic = true;
LayerInputConfig* input = config.layerConfig.add_inputs();
config.layerConfig.add_inputs();
config.layerConfig.add_inputs();
ImageConfig* img_conf = input->mutable_image_conf();
img_conf->set_channels(CHANNELS);
img_conf->set_img_size(IMG_SIZE);
img_conf->set_img_size_y(IMG_SIZE_Y);
img_conf->set_img_size_z(IMG_SIZE_Z);
testLayerGrad(config,
"batch_norm",
64,
/* trans= */ trans,
useGpu,
/* useWeight */ true);
}
TEST(Layer, testBatchNorm3DLayer) {
testBatchNorm3DLayer("batch_norm", false, false);
#ifndef PADDLE_ONLY_CPU
testBatchNorm3DLayer("batch_norm", false, true);
if (hl_get_cudnn_lib_version() >= int(4000)) {
testBatchNorm3DLayer("cudnn_batch_norm", false, true);
}
#endif
}
void testConvOperator(bool isDeconv) { void testConvOperator(bool isDeconv) {
TestConfig config; TestConfig config;
const int NUM_FILTERS = 16; const int NUM_FILTERS = 16;
...@@ -2008,6 +2057,31 @@ TEST(Layer, CropLayer) { ...@@ -2008,6 +2057,31 @@ TEST(Layer, CropLayer) {
} }
} }
TEST(Layer, SwitchOrderLayer) {
TestConfig config;
// config input_0
config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 0});
LayerInputConfig* input = config.layerConfig.add_inputs();
ImageConfig* img = input->mutable_image_conf();
img->set_channels(4);
img->set_img_size(16);
img->set_img_size_y(16);
ReshapeConfig* reshape = config.layerConfig.mutable_reshape_conf();
reshape->add_height_axis(0);
reshape->add_height_axis(1);
reshape->add_height_axis(2);
reshape->add_width_axis(3);
// config softmax layer
config.layerConfig.set_type("switch_order");
config.layerConfig.set_name("switchOrderLayer");
for (auto useGpu : {false, true}) {
testLayerGrad(config, "switch_order", 100, false, useGpu, true);
}
}
vector<real> randSampling(real range, int n) { vector<real> randSampling(real range, int n) {
CHECK_GE(range, n); CHECK_GE(range, n);
vector<real> num(range); vector<real> num(range);
...@@ -2228,26 +2302,27 @@ void test3DDeConvLayer(const string& type, bool trans, bool useGpu) { ...@@ -2228,26 +2302,27 @@ void test3DDeConvLayer(const string& type, bool trans, bool useGpu) {
conv->set_stride(2); conv->set_stride(2);
conv->set_stride_y(2); conv->set_stride_y(2);
conv->set_stride_z(2); conv->set_stride_z(2);
conv->set_img_size(IMAGE_SIZE); conv->set_output_x(IMAGE_SIZE);
conv->set_img_size_y(IMAGE_SIZE_Y); conv->set_output_y(IMAGE_SIZE_Y);
conv->set_img_size_z(IMAGE_SIZE_Z); conv->set_output_z(IMAGE_SIZE_Z);
conv->set_output_x(imageSize(conv->img_size(),
conv->set_img_size(imageSize(conv->output_x(),
conv->filter_size(), conv->filter_size(),
conv->padding(), conv->padding(),
conv->stride(), conv->stride(),
true)); true));
conv->set_output_y(imageSize(conv->img_size_y(), conv->set_img_size_y(imageSize(conv->output_y(),
conv->filter_size_y(), conv->filter_size_y(),
conv->padding_y(), conv->padding_y(),
conv->stride_y(), conv->stride_y(),
true)); true));
conv->set_output_z(imageSize(conv->img_size_z(), conv->set_img_size_z(imageSize(conv->output_z(),
conv->filter_size_z(), conv->filter_size_z(),
conv->padding_z(), conv->padding_z(),
conv->stride_z(), conv->stride_z(),
true)); true));
config.layerConfig.set_size(conv->output_x() * conv->output_y() * config.layerConfig.set_size(conv->img_size() * conv->img_size_y() *
conv->output_z() * NUM_FILTERS); conv->img_size_z() * NUM_FILTERS);
conv->set_groups(1); conv->set_groups(1);
conv->set_filter_channels(conv->channels() / conv->groups()); conv->set_filter_channels(conv->channels() / conv->groups());
config.inputDefs.push_back( config.inputDefs.push_back(
......
...@@ -33,14 +33,12 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, memory::primitive_desc pd) { ...@@ -33,14 +33,12 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, memory::primitive_desc pd) {
size_t width = cnts / dims[0]; size_t width = cnts / dims[0];
m = Matrix::create(height, width, false, false); m = Matrix::create(height, width, false, false);
} }
CHECK(m) << " Matrix should not be empty"; CHECK(m) << " Matrix should not be empty";
CpuMatrixPtr cpuMatrix = std::dynamic_pointer_cast<CpuMatrix>(m); CpuMatrixPtr cpuMatrix = std::dynamic_pointer_cast<CpuMatrix>(m);
CHECK(cpuMatrix) << "Only support create from CPU matrix yet"; CHECK(cpuMatrix) << "Only support create from CPU matrix yet";
CHECK_EQ(cpuMatrix->getElementCnt(), cnts) << "Count size does not match";
CHECK_EQ(cnts, m->getElementCnt()) << "Count size does not match"; return std::make_shared<MKLDNNMatrix>(cpuMatrix, pd);
return std::make_shared<MKLDNNMatrix>(
m->getData(), m->getHeight(), m->getWidth(), pd);
} }
MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m,
...@@ -138,7 +136,7 @@ void MKLDNNMatrix::downSpatial() { ...@@ -138,7 +136,7 @@ void MKLDNNMatrix::downSpatial() {
mkldnn_primitive_create(&result, pd.get(), nullptr, nullptr), mkldnn_primitive_create(&result, pd.get(), nullptr, nullptr),
"could not create a memory primitive"); "could not create a memory primitive");
reset(result); reset(result);
set_data_handle(getData()); set_data_handle(data_);
} }
} // namespace paddle } // namespace paddle
...@@ -30,11 +30,10 @@ typedef std::shared_ptr<MKLDNNMatrix> MKLDNNMatrixPtr; ...@@ -30,11 +30,10 @@ typedef std::shared_ptr<MKLDNNMatrix> MKLDNNMatrixPtr;
*/ */
class MKLDNNMatrix : public CpuMatrix, public mkldnn::memory { class MKLDNNMatrix : public CpuMatrix, public mkldnn::memory {
public: public:
MKLDNNMatrix(real* data, MKLDNNMatrix(CpuMatrixPtr m, mkldnn::memory::primitive_desc pd)
size_t height, : CpuMatrix(m->getData(), m->getHeight(), m->getWidth(), false),
size_t width, mkldnn::memory(pd, m->getData()),
mkldnn::memory::primitive_desc pd) m_(m) {}
: CpuMatrix(data, height, width, false), mkldnn::memory(pd, data) {}
~MKLDNNMatrix() {} ~MKLDNNMatrix() {}
...@@ -81,11 +80,29 @@ public: ...@@ -81,11 +80,29 @@ public:
void downSpatial(); void downSpatial();
/** /**
* Update the memory data handle. * set the memory data handle.
* Caution: This will not check the buffer size of the data, * Caution: This will not check the buffer size of the data,
* it should be coverd by user. * it should be coverd by user.
*/ */
void updateData(void* data) { set_data_handle(data); } void setData(real* data) {
set_data_handle(data);
CpuMatrix::setData(data);
m_.reset();
}
/**
* override Matrix::getData
* check data before return
*/
real* getData() override {
CHECK_EQ((void*)data_, get_data_handle());
return data_;
}
const real* getData() const override {
CHECK_EQ((void*)data_, get_data_handle());
return data_;
}
/** /**
* Get primitive descriptor. * Get primitive descriptor.
...@@ -143,6 +160,10 @@ protected: ...@@ -143,6 +160,10 @@ protected:
memory::format srcFmt, memory::format srcFmt,
memory::format dstFmt, memory::format dstFmt,
memory::dims dm); memory::dims dm);
private:
// save the CpuMatrixPtr in case the buffer released outside
CpuMatrixPtr m_;
}; };
} // namespace paddle } // namespace paddle
...@@ -84,6 +84,7 @@ LAPACK_ROUTINE_EACH(DYNAMIC_LOAD_LAPACK_WRAP) ...@@ -84,6 +84,7 @@ LAPACK_ROUTINE_EACH(DYNAMIC_LOAD_LAPACK_WRAP)
namespace paddle { namespace paddle {
#ifndef PADDLE_USE_EIGEN_FOR_BLAS
template <> template <>
void gemm<float>(const CBLAS_TRANSPOSE transA, void gemm<float>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const CBLAS_TRANSPOSE transB,
...@@ -143,6 +144,7 @@ void gemm<double>(const CBLAS_TRANSPOSE transA, ...@@ -143,6 +144,7 @@ void gemm<double>(const CBLAS_TRANSPOSE transA,
C, C,
ldc); ldc);
} }
#endif
template <> template <>
int getrf<float>(const CBLAS_ORDER order, int getrf<float>(const CBLAS_ORDER order,
...@@ -182,6 +184,7 @@ int getri<double>(const CBLAS_ORDER order, ...@@ -182,6 +184,7 @@ int getri<double>(const CBLAS_ORDER order,
return dynload::PADDLE_DGETRI(order, N, A, lda, ipiv); return dynload::PADDLE_DGETRI(order, N, A, lda, ipiv);
} }
#ifndef PADDLE_USE_EIGEN_FOR_BLAS
template <> template <>
void axpy<float>(const int n, const float alpha, const float* x, float* y) { void axpy<float>(const int n, const float alpha, const float* x, float* y) {
cblas_saxpy(n, alpha, x, 1, y, 1); cblas_saxpy(n, alpha, x, 1, y, 1);
...@@ -201,6 +204,7 @@ template <> ...@@ -201,6 +204,7 @@ template <>
double dotProduct<double>(const int n, const double* x, const double* y) { double dotProduct<double>(const int n, const double* x, const double* y) {
return cblas_ddot(n, x, 1, y, 1); return cblas_ddot(n, x, 1, y, 1);
} }
#endif
#if defined(PADDLE_USE_MKL) || defined(PADDLE_USE_MKLML) #if defined(PADDLE_USE_MKL) || defined(PADDLE_USE_MKLML)
......
...@@ -40,7 +40,14 @@ extern "C" { ...@@ -40,7 +40,14 @@ extern "C" {
#ifndef LAPACK_FOUND #ifndef LAPACK_FOUND
extern "C" { extern "C" {
#ifndef PADDLE_USE_EIGEN_FOR_BLAS
#include <cblas.h> #include <cblas.h>
#else
typedef enum CBLAS_ORDER {
CblasRowMajor = 101,
CblasColMajor = 102
} CBLAS_ORDER;
#endif
int LAPACKE_sgetrf( int LAPACKE_sgetrf(
int matrix_layout, int m, int n, float* a, int lda, int* ipiv); int matrix_layout, int m, int n, float* a, int lda, int* ipiv);
int LAPACKE_dgetrf( int LAPACKE_dgetrf(
...@@ -56,6 +63,7 @@ int LAPACKE_dgetri( ...@@ -56,6 +63,7 @@ int LAPACKE_dgetri(
namespace paddle { namespace paddle {
#ifndef PADDLE_USE_EIGEN_FOR_BLAS
template <class T> template <class T>
void gemm(const CBLAS_TRANSPOSE transA, void gemm(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const CBLAS_TRANSPOSE transB,
...@@ -70,6 +78,7 @@ void gemm(const CBLAS_TRANSPOSE transA, ...@@ -70,6 +78,7 @@ void gemm(const CBLAS_TRANSPOSE transA,
const T beta, const T beta,
T* C, T* C,
const int ldc); const int ldc);
#endif
template <class T> template <class T>
int getrf(const CBLAS_ORDER Order, int getrf(const CBLAS_ORDER Order,
...@@ -84,10 +93,21 @@ int getri( ...@@ -84,10 +93,21 @@ int getri(
const CBLAS_ORDER Order, const int N, T* A, const int lda, const int* ipiv); const CBLAS_ORDER Order, const int N, T* A, const int lda, const int* ipiv);
template <class T> template <class T>
void axpy(const int n, const T alpha, const T* x, T* y); void axpy(const int n, const T alpha, const T* x, T* y) {
/// y = y + alpha * x
for (int i = 0; i < n; i++) {
y[i] = y[i] + alpha * x[i];
}
}
template <class T> template <class T>
T dotProduct(const int n, const T* x, const T* y); T dotProduct(const int n, const T* x, const T* y) {
T result = static_cast<T>(0);
for (int i = 0; i < n; i++) {
result += x[i] * y[i];
}
return result;
}
template <class T> template <class T>
void vExp(const int n, const T* a, T* r); void vExp(const int n, const T* a, T* r);
......
...@@ -28,6 +28,7 @@ limitations under the License. */ ...@@ -28,6 +28,7 @@ limitations under the License. */
#include "hl_top_k.h" #include "hl_top_k.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#include "paddle/function/GemmFunctor.h"
#include "paddle/utils/ThreadLocal.h" #include "paddle/utils/ThreadLocal.h"
#include "SIMDFunctions.h" #include "SIMDFunctions.h"
...@@ -2773,24 +2774,24 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) { ...@@ -2773,24 +2774,24 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) {
CHECK(!isTransposed()) << "Not supported"; CHECK(!isTransposed()) << "Not supported";
size_t a_col, b_col, a_row, b_row; size_t a_col, b_col, a_row, b_row;
CBLAS_TRANSPOSE a_trans, b_trans; bool a_trans, b_trans;
if (!a->isTransposed()) { if (!a->isTransposed()) {
a_col = a->getWidth(); a_col = a->getWidth();
a_row = a->getHeight(); a_row = a->getHeight();
a_trans = CblasNoTrans; a_trans = false;
} else { } else {
a_col = a->getHeight(); a_col = a->getHeight();
a_row = a->getWidth(); a_row = a->getWidth();
a_trans = CblasTrans; a_trans = true;
} }
if (!b->isTransposed()) { if (!b->isTransposed()) {
b_col = b->getWidth(); b_col = b->getWidth();
b_row = b->getHeight(); b_row = b->getHeight();
b_trans = CblasNoTrans; b_trans = false;
} else { } else {
b_col = b->getHeight(); b_col = b->getHeight();
b_row = b->getWidth(); b_row = b->getWidth();
b_trans = CblasTrans; b_trans = true;
} }
CHECK_EQ(a_col, b_row); CHECK_EQ(a_col, b_row);
...@@ -2807,7 +2808,7 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) { ...@@ -2807,7 +2808,7 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) {
int lda = a->getStride(); int lda = a->getStride();
int ldb = b->getStride(); int ldb = b->getStride();
int ldc = getStride(); int ldc = getStride();
gemm<real>( BlasGemm<DEVICE_TYPE_CPU, real>::compute(
a_trans, b_trans, M, N, K, scaleAB, A, lda, B, ldb, scaleT, C, ldc); a_trans, b_trans, M, N, K, scaleAB, A, lda, B, ldb, scaleT, C, ldc);
} }
......
...@@ -1616,6 +1616,10 @@ public: ...@@ -1616,6 +1616,10 @@ public:
}; };
class CpuMatrix : public Matrix { class CpuMatrix : public Matrix {
private:
MatrixPtr sftmaxSum_;
MatrixPtr sftmaxDot_;
public: public:
CpuMatrix(size_t height, size_t width, bool trans = false); CpuMatrix(size_t height, size_t width, bool trans = false);
CpuMatrix(real* data, size_t height, size_t width, bool trans = false) CpuMatrix(real* data, size_t height, size_t width, bool trans = false)
......
...@@ -14,27 +14,31 @@ function(op_library TARGET) ...@@ -14,27 +14,31 @@ function(op_library TARGET)
cmake_parse_arguments(op_library "${options}" "${oneValueArgs}" cmake_parse_arguments(op_library "${options}" "${oneValueArgs}"
"${multiValueArgs}" ${ARGN}) "${multiValueArgs}" ${ARGN})
foreach(src ${op_library_SRCS}) list(LENGTH op_library_SRCS op_library_SRCS_len)
if (${src} MATCHES ".*\\.cu$") if (${op_library_SRCS_len} EQUAL 0)
list(APPEND cu_srcs ${src}) if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cc)
elseif(${src} MATCHES ".*\\.cc$") list(APPEND cc_srcs ${TARGET}.cc)
list(APPEND cc_srcs ${src})
else()
message(FATAL_ERROR "${TARGET} Source file ${src} should only be .cc or .cu")
endif() endif()
endforeach() if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu)
list(APPEND cu_srcs ${TARGET}.cu)
endif()
else()
foreach(src ${op_library_SRCS})
if (${src} MATCHES ".*\\.cu$")
list(APPEND cu_srcs ${src})
elseif(${src} MATCHES ".*\\.cc$")
list(APPEND cc_srcs ${src})
else()
message(FATAL_ERROR "${TARGET} Source file ${src} should only be .cc or .cu")
endif()
endforeach()
endif()
list(LENGTH cc_srcs cc_srcs_len) list(LENGTH cc_srcs cc_srcs_len)
if (${cc_srcs_len} EQUAL 0) if (${cc_srcs_len} EQUAL 0)
message(FATAL_ERROR "The op library ${TARGET} should contains at least one .cc file") message(FATAL_ERROR "The op library ${TARGET} should contains at least one .cc file")
endif() endif()
list(LENGTH cu_srcs cu_srcs_len)
list(LENGTH op_library_DEPS dep_len)
if (${cu_srcs_len} EQUAL 0 AND ${dep_len} EQUAL 0)
message(WARNING "The op library ${TARGET} not support GPU!")
endif()
if (WITH_GPU) if (WITH_GPU)
nv_library(${TARGET} SRCS ${cc_srcs} ${cu_srcs} DEPS ${op_library_DEPS} nv_library(${TARGET} SRCS ${cc_srcs} ${cu_srcs} DEPS ${op_library_DEPS}
${op_common_deps}) ${op_common_deps})
...@@ -46,22 +50,22 @@ endfunction() ...@@ -46,22 +50,22 @@ endfunction()
add_subdirectory(math) add_subdirectory(math)
list(REMOVE_ITEM GENERAL_OPS set(DEPS_OPS
net_op identity_op
minus_op minus_op
mul_op mul_op
recurrent_op recurrent_op
scale_op) scale_op)
op_library(identity_op DEPS scale_op)
op_library(net_op SRCS net_op.cc) op_library(minus_op DEPS scale_op)
op_library(minus_op SRCS minus_op.cc minus_op.cu DEPS scale_op) op_library(mul_op DEPS math_function)
op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function)
op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc
DEPS framework_proto tensor operator net_op) DEPS framework_proto tensor operator net_op)
op_library(scale_op SRCS scale_op.cc scale_op.cu DEPS net_op) op_library(scale_op DEPS net_op)
list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS})
foreach(src ${GENERAL_OPS}) foreach(src ${GENERAL_OPS})
op_library(${src} SRCS ${src}.cc ${src}.cu) op_library(${src})
endforeach() endforeach()
set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library")
......
...@@ -57,7 +57,6 @@ class AddOpGrad : public framework::OperatorWithKernel { ...@@ -57,7 +57,6 @@ class AddOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker, add_two_grad, ops::AddOpGrad); REGISTER_OP(add, ops::AddOp, ops::AddOpMaker, add_grad, ops::AddOpGrad);
REGISTER_OP_CPU_KERNEL(add_two, REGISTER_OP_CPU_KERNEL(add, ops::AddKernel<paddle::platform::CPUPlace, float>);
ops::AddKernel<paddle::platform::CPUPlace, float>);
...@@ -12,10 +12,7 @@ ...@@ -12,10 +12,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h"
#include "paddle/operators/add_op.h" #include "paddle/operators/add_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(add_two, REGISTER_OP_GPU_KERNEL(add, ops::AddKernel<paddle::platform::GPUPlace, float>);
ops::AddKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/concat_op.h"
#include <vector>
namespace paddle {
namespace operators {
using framework::Tensor;
class ConcatOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto *out = ctx.Output<framework::Tensor>("Out");
size_t axis = static_cast<size_t>(ctx.Attr<int>("axis"));
size_t n = ins.size();
PADDLE_ENFORCE_GT(n, 1, "Input tensors count should > 1.");
auto out_dims = ins[0]->dims();
size_t in_zero_dims_size = out_dims.size();
for (size_t i = 1; i < n; i++) {
for (size_t j = 0; j < in_zero_dims_size; j++) {
if (j == axis) {
out_dims[axis] += ins[i]->dims()[j];
continue;
}
PADDLE_ENFORCE_EQ(out_dims[j], ins[i]->dims()[j],
"Input tensors should have the same "
"elements except the specify axis.")
}
}
out->Resize(out_dims);
}
};
class ConcatOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ConcatOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "the input tensors of concat operator.").AsDuplicable();
AddOutput("Out", "the output tensor of concat operator.");
AddComment(R"DOC(
Join the input tensors along with the axis.
Examples:
Input[0] = [[1,2],[3,4]]
Input[1] = [[5,6]]
axis = 0
Output = [[1,2],
[3,4],
[5,6]]
)DOC");
AddAttr<int>("axis", "The axis which the inputs will be joined with.")
.SetDefault(0);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(concat, ops::ConcatOp, ops::ConcatOpMaker)
REGISTER_OP_CPU_KERNEL(concat,
ops::ConcatKernel<paddle::platform::CPUPlace, float>)
...@@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); ...@@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
...@@ -13,8 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,8 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/operators/scatter_op.h" #include "paddle/operators/concat_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(scatter, // TODO(Yancey1989) Add GPU kernel
ops::ScatterOpKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename Place, typename T>
class ConcatKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto* out = ctx.Output<framework::Tensor>("Out");
int64_t axis = static_cast<int64_t>(ctx.Attr<int>("axis"));
size_t n = ins.size();
size_t output_axis_dim = 0;
size_t before = 1, after = 1;
for (size_t i = 0; i < n; i++) {
output_axis_dim += ins[i]->dims()[axis];
}
auto& input_zero = ins[0];
for (int64_t i = 0; i < input_zero->dims().size(); i++) {
if (i == axis) {
continue;
}
if (i < axis) {
before *= input_zero->dims()[i];
} else {
after *= input_zero->dims()[i];
}
}
size_t output_offset = 0;
for (size_t i = 0; i < n; i++) {
auto& in = ins[i];
auto axis_dim = in->dims()[axis];
for (size_t j = 0; j < before; j++) {
size_t len = axis_dim * after * sizeof(T);
const T* src = in->data<T>() + axis_dim * after * j;
T* out_data = out->mutable_data<T>(platform::CPUPlace());
T* dest = out_data + output_offset + output_axis_dim * after * j;
memcpy(dest, src, len);
}
output_offset += axis_dim * after;
}
}
};
} // namespace operators
} // namespace paddle
...@@ -119,10 +119,10 @@ class CosSimOpGrad : public framework::OperatorWithKernel { ...@@ -119,10 +119,10 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1, PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1,
"The 1st dimension of Input(Y) must be equal to Input(X) or" "The 1st dimension of Input(Y) must be equal to Input(X) or"
" just 1 (which will be broadcasted to match Input(X))."); " just 1 (which will be broadcasted to match Input(X)).");
auto target_xnorm_dims = framework::make_ddim({x_dims[0], 1}), auto target_xnorm_dims = framework::make_ddim({x_dims[0], 1});
auto target_ynorm_dims = framework::make_ddim({y_dims[0], 1}), auto target_ynorm_dims = framework::make_ddim({y_dims[0], 1});
PADDLE_ENFORCE_EQ(xnorm_dims, target_xnorm_dims, PADDLE_ENFORCE_EQ(xnorm_dims, target_xnorm_dims,
"Shape of Input(XNorm) must be [X.Dim(0), 1]."); "Shape of Input(XNorm) must be [X.Dim(0), 1].");
PADDLE_ENFORCE_EQ(ynorm_dims, target_ynorm_dims, PADDLE_ENFORCE_EQ(ynorm_dims, target_ynorm_dims,
"Shape of Input(YNorm) must be [Y.Dim(0), 1]."); "Shape of Input(YNorm) must be [Y.Dim(0), 1].");
PADDLE_ENFORCE_EQ(out_dims, target_xnorm_dims, PADDLE_ENFORCE_EQ(out_dims, target_xnorm_dims,
......
...@@ -23,6 +23,9 @@ using Tensor = framework::Tensor; ...@@ -23,6 +23,9 @@ using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor, template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex> typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename Place, typename T> template <typename Place, typename T>
class CosSimKernel : public framework::OpKernel { class CosSimKernel : public framework::OpKernel {
...@@ -41,12 +44,11 @@ class CosSimKernel : public framework::OpKernel { ...@@ -41,12 +44,11 @@ class CosSimKernel : public framework::OpKernel {
// convert Tensor to Eigen Tensor // convert Tensor to Eigen Tensor
int rows_x = in_x->dims()[0]; int rows_x = in_x->dims()[0];
int rows_y = in_y->dims()[0]; int rows_y = in_y->dims()[0];
int cols = framework::product(in_x->dims()) / rows_x;
auto x = EigenMatrix<T>::Reshape(*in_x, 1); auto x = EigenMatrix<T>::Reshape(*in_x, 1);
auto y = EigenMatrix<T>::Reshape(*in_y, 1); auto y = EigenMatrix<T>::Reshape(*in_y, 1);
auto z = EigenMatrix<T>::From(*out_z); auto z = EigenVector<T>::Flatten(*out_z);
auto x_norm = EigenMatrix<T>::From(*out_x_norm); auto x_norm = EigenVector<T>::Flatten(*out_x_norm);
auto y_norm = EigenMatrix<T>::From(*out_y_norm); auto y_norm = EigenVector<T>::Flatten(*out_y_norm);
// compute // compute
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
...@@ -81,10 +83,10 @@ class CosSimGradKernel : public framework::OpKernel { ...@@ -81,10 +83,10 @@ class CosSimGradKernel : public framework::OpKernel {
// convert Tensor to Eigen Tensor // convert Tensor to Eigen Tensor
auto x = EigenMatrix<T>::Reshape(*in_x, 1); auto x = EigenMatrix<T>::Reshape(*in_x, 1);
auto y = EigenMatrix<T>::Reshape(*in_y, 1); auto y = EigenMatrix<T>::Reshape(*in_y, 1);
auto z = EigenMatrix<T>::From(*in_z); auto z = EigenMatrix<T>::Reshape(*in_z, 1);
auto x_norm = EigenMatrix<T>::From(*in_x_norm); auto x_norm = EigenMatrix<T>::Reshape(*in_x_norm, 1);
auto y_norm = EigenMatrix<T>::From(*in_y_norm); auto y_norm = EigenMatrix<T>::Reshape(*in_y_norm, 1);
auto dz = EigenMatrix<T>::From(*in_grad_z); auto dz = EigenMatrix<T>::Reshape(*in_grad_z, 1);
// compute gradident // compute gradident
int rows_x = in_x->dims()[0]; int rows_x = in_x->dims()[0];
...@@ -108,16 +110,18 @@ class CosSimGradKernel : public framework::OpKernel { ...@@ -108,16 +110,18 @@ class CosSimGradKernel : public framework::OpKernel {
// compute dy // compute dy
if (out_grad_y) { if (out_grad_y) {
out_grad_y->mutable_data<T>(context.GetPlace()); out_grad_y->mutable_data<T>(context.GetPlace());
auto dy = EigenMatrix<T>::Reshape(*out_grad_y, 1) auto grad = auto dy = EigenMatrix<T>::Reshape(*out_grad_y, 1);
x / norm_prod_bcast - z_bcast * y / y_snorm_bcast; auto grad = x / norm_prod_bcast - z_bcast * y / y_snorm_bcast;
dy.device(place) = dz_bcast * grad; dy.device(place) = dz_bcast * grad;
} }
} else { } else {
Eigen::DSizes<int, 2> bcast_rows(rows_x, 1); Eigen::DSizes<int, 2> bcast_rows(rows_x, 1);
Eigen::DSizes<int, 2> bcast_rows_cols(rows_x, 1); Eigen::DSizes<int, 2> bcast_rows_cols(rows_x, cols);
auto y_bcast = y.broadcast(bcast_rows); auto y_bcast = y.broadcast(bcast_rows);
auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_rows_cols); auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_rows_cols);
auto norm_prod_bcast = x_norm * y_norm.broadcast(bcast_rows_cols); auto norm_prod_bcast = (x_norm * y_norm.eval().broadcast(bcast_rows))
.eval()
.broadcast(bcast_cols);
// compute dx // compute dx
if (out_grad_x) { if (out_grad_x) {
out_grad_x->mutable_data<T>(context.GetPlace()); out_grad_x->mutable_data<T>(context.GetPlace());
......
...@@ -19,19 +19,19 @@ template <typename T> ...@@ -19,19 +19,19 @@ template <typename T>
class CPUGaussianRandomKernel : public framework::OpKernel { class CPUGaussianRandomKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
float mean = context.GetAttr<float>("mean"); float mean = context.Attr<float>("mean");
float std = context.GetAttr<float>("std"); float std = context.Attr<float>("std");
auto* tensor = context.Output<framework::Tensor>("Out"); auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.GetAttr<int>("seed")); unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine; std::minstd_rand engine;
if (seed == 0) { if (seed == 0) {
seed = std::random_device()(); seed = std::random_device()();
} }
engine.seed(seed); engine.seed(seed);
std::normal_distribution<T> dist(mean, std); std::normal_distribution<T> dist(mean, std);
int64_t size = framework::product(tensor->dims()); int64_t size = tensor->numel();
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine); data[i] = dist(engine);
} }
...@@ -45,7 +45,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { ...@@ -45,7 +45,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext& context) const override { void InferShape(const framework::InferShapeContext& context) const override {
auto* tensor = context.Output<framework::Tensor>("Out"); auto* tensor = context.Output<framework::Tensor>("Out");
auto dims = GetAttr<std::vector<int>>("dims"); auto dims = Attr<std::vector<int>>("dims");
std::vector<int64_t> temp; std::vector<int64_t> temp;
temp.reserve(dims.size()); temp.reserve(dims.size());
for (auto dim : dims) { for (auto dim : dims) {
......
...@@ -42,16 +42,16 @@ class GPUGaussianRandomKernel : public framework::OpKernel { ...@@ -42,16 +42,16 @@ class GPUGaussianRandomKernel : public framework::OpKernel {
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* tensor = context.Output<framework::Tensor>("Out"); auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.GetAttr<int>("seed")); unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
if (seed == 0) { if (seed == 0) {
std::random_device rd; std::random_device rd;
seed = rd(); seed = rd();
} }
T mean = static_cast<T>(context.GetAttr<float>("mean")); T mean = static_cast<T>(context.Attr<float>("mean"));
T std = static_cast<T>(context.GetAttr<float>("std")); T std = static_cast<T>(context.Attr<float>("std"));
thrust::counting_iterator<unsigned int> index_sequence_begin(0); thrust::counting_iterator<unsigned int> index_sequence_begin(0);
ssize_t N = framework::product(tensor->dims()); int64_t size = tensor->numel();
thrust::transform(index_sequence_begin, index_sequence_begin + N, thrust::transform(index_sequence_begin, index_sequence_begin + size,
thrust::device_ptr<T>(data), thrust::device_ptr<T>(data),
GaussianGenerator<T>(mean, std, seed)); GaussianGenerator<T>(mean, std, seed));
} }
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/net_op.h"
#include "paddle/operators/scale_op.h"
namespace paddle {
namespace operators {
// The identity operator is an alias of the scale operator. This is also an
// example for creating an alias for an existing operator.
template <typename AttrType>
class IdentityOpMaker : public framework::OpProtoAndCheckerMaker {
public:
IdentityOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input tensor of identity operator.");
AddOutput("Out", "The output tensor of identity operator.");
AddComment(R"DOC(
The identity operator is an alias of the scale operator
with the attribute scale fixed to 1.0.
)DOC");
}
};
template <typename AttrType>
class IdentityOp : public NetOp {
public:
IdentityOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: NetOp(type, inputs, outputs, attrs) {
AppendOp(framework::OpRegistry::CreateOp(
"scale", {{"X", {Input("X")}}}, {{"Out", {Output("Out")}}},
{{"scale", static_cast<AttrType>(1)}}));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(identity, ops::IdentityOp<float>,
ops::IdentityOpMaker<float>);
...@@ -70,7 +70,7 @@ class LookupTableCUDAKernel : public framework::OpKernel { ...@@ -70,7 +70,7 @@ class LookupTableCUDAKernel : public framework::OpKernel {
size_t N = table_t->dims()[0]; size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1]; size_t D = table_t->dims()[1];
size_t K = product(ids_t->dims()); size_t K = ids_t->numel();
auto ids = ids_t->data<int32_t>(); auto ids = ids_t->data<int32_t>();
auto table = table_t->data<T>(); auto table = table_t->data<T>();
auto output = output_t->mutable_data<T>(context.GetPlace()); auto output = output_t->mutable_data<T>(context.GetPlace());
...@@ -91,7 +91,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { ...@@ -91,7 +91,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel {
int N = d_table_t->dims()[0]; int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1]; int D = d_table_t->dims()[1];
int K = product(ids_t->dims()); int K = ids_t->numel();
const int32_t* ids = ids_t->data<int32_t>(); const int32_t* ids = ids_t->data<int32_t>();
const T* d_output = d_output_t->data<T>(); const T* d_output = d_output_t->data<T>();
T* d_table = d_table_t->mutable_data<T>(context.GetPlace()); T* d_table = d_table_t->mutable_data<T>(context.GetPlace());
......
...@@ -35,7 +35,7 @@ class LookupTableKernel : public framework::OpKernel { ...@@ -35,7 +35,7 @@ class LookupTableKernel : public framework::OpKernel {
auto ids = ids_t->data<int32_t>(); auto ids = ids_t->data<int32_t>();
auto table = table_t->data<T>(); auto table = table_t->data<T>();
auto output = output_t->mutable_data<T>(context.GetPlace()); auto output = output_t->mutable_data<T>(context.GetPlace());
for (ssize_t i = 0; i < product(ids_t->dims()); ++i) { for (int64_t i = 0; i < ids_t->numel(); ++i) {
PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_LT(ids[i], N);
PADDLE_ENFORCE_GE(ids[i], 0); PADDLE_ENFORCE_GE(ids[i], 0);
memcpy(output + i * D, table + ids[i] * D, D * sizeof(T)); memcpy(output + i * D, table + ids[i] * D, D * sizeof(T));
...@@ -61,7 +61,7 @@ class LookupTableGradKernel : public framework::OpKernel { ...@@ -61,7 +61,7 @@ class LookupTableGradKernel : public framework::OpKernel {
t.device(context.GetEigenDevice<platform::CPUPlace>()) = t.device(context.GetEigenDevice<platform::CPUPlace>()) =
t.constant(static_cast<T>(0)); t.constant(static_cast<T>(0));
for (ssize_t i = 0; i < product(ids_t->dims()); ++i) { for (int64_t i = 0; i < ids_t->numel(); ++i) {
PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_LT(ids[i], N);
PADDLE_ENFORCE_GE(ids[i], 0); PADDLE_ENFORCE_GE(ids[i], 0);
for (int j = 0; j < D; ++j) { for (int j = 0; j < D; ++j) {
......
if(WITH_GPU) if(WITH_GPU)
nv_library(math_function SRCS math_function.cc math_function.cu DEPS cblas device_context) nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc
im2col.cu DEPS cblas device_context)
else() else()
cc_library(math_function SRCS math_function.cc DEPS cblas device_context) cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context)
endif() endif()
nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor)
cc_test(im2col_test SRCS im2col_test.cc DEPS math_function tensor)
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/im2col.h"
namespace paddle {
namespace operators {
namespace math {
/*
* im = [input_channels, input_height, input_width]
* col =
* [input_channels, filter_height, filter_width, output_height, output_width]
*/
template <class T>
class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO,
platform::CPUPlace, T> {
public:
void operator()(const framework::Tensor& im, framework::Tensor& col,
int stride_height, int stride_width, int padding_height,
int padding_width, platform::DeviceContext* context) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
int input_height = im.dims()[1];
int input_width = im.dims()[2];
int filter_height = col.dims()[1];
int filter_width = col.dims()[2];
int output_height = col.dims()[3];
int output_width = col.dims()[4];
int channels_col = input_channels * filter_height * filter_width;
const T* im_data = im.data<T>();
T* col_data = col.data<T>();
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % filter_width;
int h_offset = (c / filter_width) % filter_height;
int c_im = c / filter_width / filter_height;
for (int h = 0; h < output_height; ++h) {
for (int w = 0; w < output_width; ++w) {
int im_row_idx = h * stride_height + h_offset;
int im_col_idx = w * stride_width + w_offset;
if ((im_row_idx - padding_height) < 0 ||
(im_row_idx - padding_height) >= input_height ||
(im_col_idx - padding_width) < 0 ||
(im_col_idx - padding_width) >= input_width) {
col_data[(c * output_height + h) * output_width + w] = T(0);
} else {
im_row_idx += c_im * input_height - padding_height;
im_col_idx -= padding_width;
col_data[(c * output_height + h) * output_width + w] =
im_data[im_row_idx * input_width + im_col_idx];
}
}
}
}
}
};
/*
* im = [input_channels, input_height, input_width]
* col =
* [input_channels, filter_height, filter_width, output_height, output_width]
*/
template <class T>
class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO,
platform::CPUPlace, T> {
public:
void operator()(framework::Tensor& im, const framework::Tensor& col,
int stride_height, int stride_width, int padding_height,
int padding_width, platform::DeviceContext* context) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
int input_height = im.dims()[1];
int input_width = im.dims()[2];
int filter_height = col.dims()[1];
int filter_width = col.dims()[2];
int output_height = col.dims()[3];
int output_width = col.dims()[4];
int channels_col = input_channels * filter_height * filter_width;
T* im_data = im.data<T>();
const T* col_data = col.data<T>();
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % filter_width;
int h_offset = (c / filter_width) % filter_height;
int c_im = c / filter_width / filter_height;
for (int h = 0; h < output_height; ++h) {
for (int w = 0; w < output_width; ++w) {
int im_row_idx = h * stride_height + h_offset;
int im_col_idx = w * stride_width + w_offset;
if ((im_row_idx - padding_height) >= 0 &&
(im_row_idx - padding_height) < input_height &&
(im_col_idx - padding_width) >= 0 &&
(im_col_idx - padding_width) < input_width) {
im_row_idx += c_im * input_height - padding_height;
im_col_idx -= padding_width;
im_data[im_row_idx * input_width + im_col_idx] +=
col_data[(c * output_height + h) * output_width + w];
}
}
}
}
}
};
template class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO,
platform::CPUPlace, float>;
template class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO,
platform::CPUPlace, double>;
template class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO,
platform::CPUPlace, float>;
template class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO,
platform::CPUPlace, double>;
/*
* im = [input_channels, input_height, input_width]
* col =
* [output_height, output_width, input_channels, filter_height, filter_width]
*/
template <class T>
class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
platform::CPUPlace, T> {
public:
void operator()(const framework::Tensor& im, framework::Tensor& col,
int stride_height, int stride_width, int padding_height,
int padding_width, platform::DeviceContext* context) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
int input_height = im.dims()[1];
int input_width = im.dims()[2];
int filter_height = col.dims()[3];
int filter_width = col.dims()[4];
int output_height = col.dims()[0];
int output_width = col.dims()[1];
const T* im_data = im.data<T>();
T* col_data = col.data<T>();
for (int col_row_idx = 0; col_row_idx < output_height; ++col_row_idx) {
for (int col_col_idx = 0; col_col_idx < output_width; ++col_col_idx) {
for (int channel = 0; channel < input_channels; ++channel) {
for (int filter_row_idx = 0; filter_row_idx < filter_height;
++filter_row_idx) {
for (int filter_col_idx = 0; filter_col_idx < filter_width;
++filter_col_idx) {
int im_row_offset =
col_row_idx * stride_height + filter_row_idx - padding_height;
int im_col_offset =
col_col_idx * stride_width + filter_col_idx - padding_width;
int col_offset = (((col_row_idx * output_width + col_col_idx) *
input_channels +
channel) *
filter_height +
filter_row_idx) *
filter_width +
filter_col_idx;
if (im_row_offset < 0 || im_row_offset >= input_height ||
im_col_offset < 0 || im_col_offset >= input_width) {
col_data[col_offset] = T(0);
} else {
int im_offset =
(channel * input_height + im_row_offset) * input_width +
im_col_offset;
col_data[col_offset] = im_data[im_offset];
}
}
}
}
}
}
}
};
/*
* im = [input_channels, input_height, input_width]
* col =
* [output_height, output_width, input_channels, filter_height, filter_width]
*/
template <class T>
class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
platform::CPUPlace, T> {
public:
void operator()(framework::Tensor& im, const framework::Tensor& col,
int stride_height, int stride_width, int padding_height,
int padding_width, platform::DeviceContext* context) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
int input_height = im.dims()[1];
int input_width = im.dims()[2];
int filter_height = col.dims()[3];
int filter_width = col.dims()[4];
int output_height = col.dims()[0];
int output_width = col.dims()[1];
T* im_data = im.data<T>();
const T* col_data = col.data<T>();
for (int col_row_idx = 0; col_row_idx < output_height; ++col_row_idx) {
for (int col_col_idx = 0; col_col_idx < output_width; ++col_col_idx) {
for (int channel = 0; channel < input_channels; ++channel) {
for (int filter_row_idx = 0; filter_row_idx < filter_height;
++filter_row_idx) {
for (int filter_col_idx = 0; filter_col_idx < filter_width;
++filter_col_idx) {
int im_row_offset =
col_row_idx * stride_height + filter_row_idx - padding_height;
int im_col_offset =
col_col_idx * stride_width + filter_col_idx - padding_width;
int col_offset = (((col_row_idx * output_width + col_col_idx) *
input_channels +
channel) *
filter_height +
filter_row_idx) *
filter_width +
filter_col_idx;
if (im_row_offset >= 0 && im_row_offset < input_height &&
im_col_offset >= 0 && im_col_offset < input_width) {
int im_offset =
(channel * input_height + im_row_offset) * input_width +
im_col_offset;
im_data[im_offset] += col_data[col_offset];
}
}
}
}
}
}
}
};
template class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
platform::CPUPlace, float>;
template class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
platform::CPUPlace, double>;
template class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
platform::CPUPlace, float>;
template class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
platform::CPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/im2col.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <class T>
__global__ void im2col(const T* data_im, int num_outs, int height, int width,
int filter_height, int filter_width, int stride_height,
int stride_width, int padding_height, int padding_width,
int output_height, int output_width, T* data_col) {
int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
if (index < num_outs) {
int w_out = index % output_width;
index /= output_width;
int h_out = index % output_height;
int channel_in = index / output_height;
int channel_out = channel_in * filter_height * filter_width;
int h_in = h_out * stride_height;
int w_in = w_out * stride_width;
data_col += (channel_out * output_height + h_out) * output_width + w_out;
for (int i = 0; i < filter_height; ++i) {
for (int j = 0; j < filter_width; ++j) {
int rIdx = int(h_in + i);
int cIdx = int(w_in + j);
if ((rIdx - (int)padding_height) >= (int)height ||
(rIdx - (int)padding_height) < 0 ||
(cIdx - (int)padding_width) >= (int)width ||
(cIdx - (int)padding_width) < 0) {
*data_col = 0;
} else {
rIdx = rIdx + channel_in * height - padding_height;
cIdx = cIdx - padding_width;
*data_col = data_im[rIdx * width + cIdx];
}
data_col += output_height * output_width;
}
}
}
}
/*
* im = [input_channels, input_height, input_width]
* col =
* [input_channels, filter_height, filter_width, output_height, output_width]
*/
template <class T>
class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO,
platform::GPUPlace, T> {
public:
void operator()(const framework::Tensor& im, framework::Tensor& col,
int stride_height, int stride_width, int padding_height,
int padding_width, platform::DeviceContext* context) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
int input_height = im.dims()[1];
int input_width = im.dims()[2];
int filter_height = col.dims()[1];
int filter_width = col.dims()[2];
int output_height = col.dims()[3];
int output_width = col.dims()[4];
int num_outputs = input_channels * output_height * output_width;
int blocks = (num_outputs + 1024 - 1) / 1024;
int block_x = 512;
int block_y = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(block_x, block_y);
im2col<T><<<
grid, threads, 0,
reinterpret_cast<platform::CUDADeviceContext*>(context)->stream()>>>(
im.data<T>(), num_outputs, input_height, input_width, filter_height,
filter_width, stride_height, stride_width, padding_height,
padding_width, output_height, output_width, col.data<T>());
}
};
template <class T>
__global__ void col2im(size_t n, const T* data_col, size_t height, size_t width,
size_t channels, size_t filter_height,
size_t filter_width, size_t stride_height,
size_t stride_width, size_t padding_height,
size_t padding_width, size_t output_height,
size_t output_width, T* data_im) {
size_t index =
(blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
if (index < n) {
T val = 0;
int w = int(index % width);
int h = int((index / width) % height);
int c = int(index / (width * height));
if ((w - (int)padding_width) >= 0 &&
(w - (int)padding_width) < (width - 2 * padding_width) &&
(h - (int)padding_height) >= 0 &&
(h - padding_height) < (height - 2 * padding_height)) {
// compute the start and end of the output
int w_col_start = (w < (int)filter_width)
? 0
: (w - int(filter_width)) / (int)stride_width + 1;
int w_col_end =
min((int)(w / (int)stride_width + 1), (int)(output_width));
int h_col_start = (h < (int)filter_height)
? 0
: (h - (int)filter_height) / (int)stride_height + 1;
int h_col_end = min(int(h / stride_height + 1), int(output_height));
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = int(c * filter_height * filter_width) +
(h - h_col * (int)stride_height) * (int)filter_width +
(w - w_col * (int)stride_width);
val +=
data_col[(c_col * output_height + h_col) * output_width + w_col];
}
}
h -= padding_height;
w -= padding_width;
data_im[c * ((width - 2 * padding_width) *
(height - 2 * padding_height)) +
h * (width - 2 * padding_width) + w] += val;
}
}
}
/*
* im = [input_channels, input_height, input_width]
* col =
* [input_channels, filter_height, filter_width, output_height, output_width]
*/
template <class T>
class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO,
platform::GPUPlace, T> {
public:
void operator()(framework::Tensor& im, const framework::Tensor& col,
int stride_height, int stride_width, int padding_height,
int padding_width, platform::DeviceContext* context) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
int input_height = im.dims()[1];
int input_width = im.dims()[2];
int filter_height = col.dims()[1];
int filter_width = col.dims()[2];
int output_height = col.dims()[3];
int output_width = col.dims()[4];
size_t num_kernels = input_channels * (input_height + 2 * padding_height) *
(input_width + 2 * padding_width);
size_t blocks = (num_kernels + 1024 - 1) / 1024;
size_t block_x = 512;
size_t block_y = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(block_x, block_y);
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im<T><<<
grid, threads, 0,
reinterpret_cast<platform::CUDADeviceContext*>(context)->stream()>>>(
num_kernels, col.data<T>(), input_height + 2 * padding_height,
input_width + 2 * padding_width, input_channels, filter_height,
filter_width, stride_height, stride_width, padding_height,
padding_width, output_height, output_width, im.data<T>());
}
};
template class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO,
platform::GPUPlace, float>;
template class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO,
platform::GPUPlace, double>;
template class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO,
platform::GPUPlace, float>;
template class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO,
platform::GPUPlace, double>;
template <class T>
__global__ void im2colOCF(const T* im_data, T* col_data, int input_channels,
int input_height, int input_width, int filter_height,
int filter_width, int stride_height, int stride_width,
int padding_height, int padding_width,
int output_height, int output_width) {
int swid = blockIdx.x;
int shid = blockIdx.y;
for (int channelid = threadIdx.z; channelid < input_channels;
channelid += blockDim.z) {
for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) {
for (int idx = threadIdx.x; idx < filter_width; idx += blockDim.x) {
int width_offset = idx + swid * stride_width - padding_width;
int height_offset = idy + shid * stride_height - padding_height;
int im_offset = width_offset + height_offset * input_width +
channelid * input_height * input_width;
int col_offset = idx + idy * filter_width +
channelid * filter_height * filter_width +
(shid * output_width + swid) *
(input_channels * filter_height * filter_width);
if (height_offset >= input_height || height_offset < 0 ||
width_offset >= input_width || width_offset < 0) {
col_data[col_offset] = T(0);
} else {
col_data[col_offset] = im_data[im_offset];
}
}
}
}
}
/*
* im = [input_channels, input_height, input_width]
* col =
* [output_height, output_width, input_channels, filter_height, filter_width]
*/
template <class T>
class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
platform::GPUPlace, T> {
public:
void operator()(const framework::Tensor& im, framework::Tensor& col,
int stride_height, int stride_width, int padding_height,
int padding_width, platform::DeviceContext* context) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
int input_height = im.dims()[1];
int input_width = im.dims()[2];
int filter_height = col.dims()[3];
int filter_width = col.dims()[4];
int output_height = col.dims()[0];
int output_width = col.dims()[1];
int block_dim_x = 0;
int block_dim_y = 0;
if (filter_height <= 4 && filter_width <= 4) {
block_dim_x = 4;
block_dim_y = 4;
} else if (filter_height <= 8 && filter_width <= 8) {
block_dim_x = 8;
block_dim_y = 8;
} else if (filter_height <= 16 && filter_width <= 16) {
block_dim_x = 16;
block_dim_y = 16;
} else {
block_dim_x = 32;
block_dim_y = 32;
}
int block_dim_z = 1024 / block_dim_x / block_dim_y;
dim3 threads(block_dim_x, block_dim_y,
std::min(block_dim_z, input_channels));
dim3 grid(output_width, output_height);
im2colOCF<T><<<
grid, threads, 0,
reinterpret_cast<platform::CUDADeviceContext*>(context)->stream()>>>(
im.data<T>(), col.data<T>(), input_channels, input_height, input_width,
filter_height, filter_width, stride_height, stride_width,
padding_height, padding_width, output_height, output_width);
}
};
template <class T>
__global__ void col2imOCF(T* im_data, const T* col_data, int input_channels,
int input_height, int input_width, int filter_height,
int filter_width, int stride_height, int stride_width,
int padding_height, int padding_width,
int output_height, int output_width) {
int swid = blockIdx.x;
int shid = blockIdx.y;
for (int channelid = threadIdx.z; channelid < input_channels;
channelid += blockDim.z) {
for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) {
for (int idx = threadIdx.x; idx < filter_width; idx += blockDim.x) {
int width_offset = idx + swid * stride_width - padding_width;
int height_offset = idy + shid * stride_height - padding_height;
int im_offset = width_offset + height_offset * input_width +
channelid * input_height * input_width;
int col_offset = idx + idy * filter_width +
channelid * filter_height * filter_width +
(shid * output_width + swid) *
(input_channels * filter_height * filter_width);
if (height_offset >= 0 && height_offset < input_height &&
width_offset >= 0 && width_offset < input_width) {
paddle::platform::CudaAtomicAdd(im_data + im_offset,
col_data[col_offset]);
}
}
}
}
}
/*
* im = [input_channels, input_height, input_width]
* col =
* [output_height, output_width, input_channels, filter_height, filter_width]
*/
template <class T>
class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
platform::GPUPlace, T> {
public:
void operator()(framework::Tensor& im, const framework::Tensor& col,
int stride_height, int stride_width, int padding_height,
int padding_width, platform::DeviceContext* context) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
int input_height = im.dims()[1];
int input_width = im.dims()[2];
int filter_height = col.dims()[3];
int filter_width = col.dims()[4];
int output_height = col.dims()[0];
int output_width = col.dims()[1];
int block_dim_x = 0;
int block_dim_y = 0;
if (filter_height <= 4 && filter_width <= 4) {
block_dim_x = 4;
block_dim_y = 4;
} else if (filter_height <= 8 && filter_width <= 8) {
block_dim_x = 8;
block_dim_y = 8;
} else if (filter_height <= 16 && filter_width <= 16) {
block_dim_x = 16;
block_dim_y = 16;
} else {
block_dim_x = 32;
block_dim_y = 32;
}
int block_dim_z = 1024 / block_dim_x / block_dim_y;
dim3 threads(block_dim_x, block_dim_y,
std::min(block_dim_z, input_channels));
dim3 grid(output_width, output_height);
col2imOCF<T><<<
grid, threads, 0,
reinterpret_cast<platform::CUDADeviceContext*>(context)->stream()>>>(
im.data<T>(), col.data<T>(), input_channels, input_height, input_width,
filter_height, filter_width, stride_height, stride_width,
padding_height, padding_width, output_height, output_width);
}
};
template class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
platform::GPUPlace, float>;
template class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
platform::GPUPlace, double>;
template class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
platform::GPUPlace, float>;
template class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"
namespace paddle {
namespace operators {
namespace math {
/* The storage format of the coldata in the Im2ColFunctor and Col2ImFunctor. */
enum class ColFormat { kCFO = 0, kOCF = 1 };
/*
* \brief Converts the image data of three dimensions(CHW) into a colData of
* five dimensions in the Im2ColFunctor calculation,
* And in the Col2ImFunctor calculation, it is reversed.
*
* \param imData Image data.
* \param imShape The shape of imData,
* [input_channels, input_height, input_width].
* \param colData Column data.
* \param colShape The shape of colData.
*
* If the template argument Format is kCFO, the shape of colData is:
* [input_channels, filter_height, filter_width, output_height, output_width]
* So, it is easy to reshape into a convolution matrix for convolution
* calculation based on matrix multiplication.
* The shape of convolution matrix is [height, width], where the height is equal
* input_channels * filter_height * filter_width, and the width is equal
* output_height * output_width.
*
* Reshape:
* shape of colData shape of convolution matrix
* [input_channels,
* filter_height,
* filter_width, ======> [height, width]
* output_height,
* output_width]
*
* If the template argument Format is kOCF, the shape of colData is:
* [output_height, output_width, input_channels, filter_height, filter_width]
* So, it is easy to reshape into a sequence matrix for rnn calculation.
* The shape of sequence matrix is [seq_length, step_size], where the seq_length
* is equal output_height * output_width, and the step_size is equal
* input_channels * filter_height * filter_width.
*
* Reshape:
* shape of colData shape of sequence matrix
* [output_height,
* output_width,
* input_channels, ======> [seqLength, stepSize]
* filter_height,
* filter_width]
*
* \note The caller needs to ensure that imShape.inputChannels is equal to
* colShape.inputChannels.
*/
template <ColFormat Format, typename Place, typename T>
class Im2ColFunctor {
public:
void operator()(const framework::Tensor& im, framework::Tensor& col,
int stride_height, int stride_width, int padding_height,
int padding_width, platform::DeviceContext* context);
};
template <ColFormat Format, typename Place, typename T>
class Col2ImFunctor {
public:
void operator()(framework::Tensor& im, const framework::Tensor& col,
int stride_height, int stride_width, int padding_height,
int padding_width, platform::DeviceContext* context);
};
} // namespace math
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/im2col.h"
#include <gtest/gtest.h>
#include <iostream>
template <typename Place>
void testIm2col() {
paddle::framework::Tensor input_tmp;
paddle::framework::Tensor input;
paddle::framework::Tensor output_cfo;
paddle::framework::Tensor output_ocf;
paddle::framework::Tensor output_tmp;
/**
* input = [0, 1, 2,
* 3, 4, 5]
*
* output_cfo = [0, 1
* 1, 2
* 3, 4
* 4, 5]
*
* output_ocf = [0, 1, 3, 4
* 1, 2, 4, 5]
*/
int input_height = 2;
int input_width = 3;
int filter_size = 2;
int stride = 1;
int padding = 0;
int output_height = (input_height - filter_size + 2 * padding) / stride + 1;
int output_width = (input_width - filter_size + 2 * padding) / stride + 1;
float* input_ptr = input_tmp.mutable_data<float>(
{1, input_height, input_width}, paddle::platform::CPUPlace());
float arr[6] = {0, 1, 2, 3, 4, 5};
memcpy(input_ptr, arr, 6 * sizeof(float));
auto* place = new Place();
if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp;
} else {
input.CopyFrom<float>(input_tmp, *place);
}
output_cfo.mutable_data<float>(
{1, filter_size, filter_size, output_height, output_width}, *place);
output_ocf.mutable_data<float>(
{output_height, output_width, 1, filter_size, filter_size}, *place);
paddle::operators::math::Im2ColFunctor<
paddle::operators::math::ColFormat::kCFO, Place, float>
im2col;
paddle::operators::math::Im2ColFunctor<
paddle::operators::math::ColFormat::kOCF, Place, float>
im2col_ocf;
paddle::platform::DeviceContext* context;
if (paddle::platform::is_cpu_place(*place)) {
context =
new paddle::platform::CPUDeviceContext(paddle::platform::CPUPlace());
} else {
#ifndef PADDLE_ONLY_CPU
context =
new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace());
#else
PADDLE_THROW("no GPU support");
#endif // PADDLE_ONLY_CPU
}
im2col(input, output_cfo, stride, stride, padding, padding, context);
im2col_ocf(input, output_ocf, stride, stride, padding, padding, context);
float* out_cfo_ptr;
if (paddle::platform::is_cpu_place(*place)) {
out_cfo_ptr = output_cfo.data<float>();
} else {
output_tmp.CopyFrom<float>(output_cfo, paddle::platform::CPUPlace());
out_cfo_ptr = output_tmp.data<float>();
}
EXPECT_EQ(out_cfo_ptr[0], 0);
EXPECT_EQ(out_cfo_ptr[1], 1);
EXPECT_EQ(out_cfo_ptr[2], 1);
EXPECT_EQ(out_cfo_ptr[3], 2);
EXPECT_EQ(out_cfo_ptr[4], 3);
EXPECT_EQ(out_cfo_ptr[5], 4);
EXPECT_EQ(out_cfo_ptr[6], 4);
EXPECT_EQ(out_cfo_ptr[7], 5);
float* out_ocf_ptr;
if (paddle::platform::is_cpu_place(*place)) {
out_ocf_ptr = output_ocf.data<float>();
} else {
output_tmp.CopyFrom<float>(output_ocf, paddle::platform::CPUPlace());
out_ocf_ptr = output_tmp.data<float>();
}
EXPECT_EQ(out_ocf_ptr[0], 0);
EXPECT_EQ(out_ocf_ptr[1], 1);
EXPECT_EQ(out_ocf_ptr[2], 3);
EXPECT_EQ(out_ocf_ptr[3], 4);
EXPECT_EQ(out_ocf_ptr[4], 1);
EXPECT_EQ(out_ocf_ptr[5], 2);
EXPECT_EQ(out_ocf_ptr[6], 4);
EXPECT_EQ(out_ocf_ptr[7], 5);
}
TEST(math, im2col) {
testIm2col<paddle::platform::CPUPlace>();
#ifndef PADDLE_ONLY_CPU
testIm2col<paddle::platform::GPUPlace>();
#endif
}
...@@ -49,12 +49,11 @@ class MeanGradKernel : public framework::OpKernel { ...@@ -49,12 +49,11 @@ class MeanGradKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto OG = context.Input<Tensor>(framework::GradVarName("Out")); auto OG = context.Input<Tensor>(framework::GradVarName("Out"));
PADDLE_ENFORCE(framework::product(OG->dims()) == 1, PADDLE_ENFORCE(OG->numel() == 1, "Mean Gradient should be scalar");
"Mean Gradient should be scalar");
auto IG = context.Output<Tensor>(framework::GradVarName("X")); auto IG = context.Output<Tensor>(framework::GradVarName("X"));
IG->mutable_data<T>(context.GetPlace()); IG->mutable_data<T>(context.GetPlace());
T ig_size = (T)framework::product(IG->dims()); T ig_size = static_cast<T>(IG->numel());
Eigen::DSizes<int, 1> bcast(ig_size); Eigen::DSizes<int, 1> bcast(ig_size);
EigenVector<T>::Flatten(*IG).device(context.GetEigenDevice<Place>()) = EigenVector<T>::Flatten(*IG).device(context.GetEigenDevice<Place>()) =
......
...@@ -31,8 +31,7 @@ class MinusOp : public framework::OperatorWithKernel { ...@@ -31,8 +31,7 @@ class MinusOp : public framework::OperatorWithKernel {
auto *right_tensor = ctx.Input<framework::Tensor>("Y"); auto *right_tensor = ctx.Input<framework::Tensor>("Y");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
framework::product(left_tensor->dims()), left_tensor->numel(), right_tensor->numel(),
framework::product(right_tensor->dims()),
"Minus operator must take two tensor with same num of elements"); "Minus operator must take two tensor with same num of elements");
ctx.Output<framework::Tensor>("Out")->Resize(left_tensor->dims()); ctx.Output<framework::Tensor>("Out")->Resize(left_tensor->dims());
} }
......
...@@ -25,18 +25,27 @@ class MulOp : public framework::OperatorWithKernel { ...@@ -25,18 +25,27 @@ class MulOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto dim0 = ctx.Input<Tensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto dim1 = ctx.Input<Tensor>("Y")->dims(); auto y_dims = ctx.Input<Tensor>("Y")->dims();
PADDLE_ENFORCE_EQ(dim0.size(), 2, int x_num_col_dims = Attr<int>("x_num_col_dims");
"input X(%s) should be a tensor with 2 dims, a matrix", int y_num_col_dims = Attr<int>("y_num_col_dims");
ctx.op().Input("X"));
PADDLE_ENFORCE_EQ(dim1.size(), 2, PADDLE_ENFORCE(x_dims.size() > x_num_col_dims,
"input Y(%s) should be a tensor with 2 dims, a matrix", "The rank of input tensor X(%s) should be larger than "
ctx.op().Input("Y")); "`mul_op`'s `x_num_col_dims`.",
ctx.op().Input("X"));
PADDLE_ENFORCE(y_dims.size() > y_num_col_dims,
"The rank of input tensor Y(%s) should be larger than "
"`mul_op`'s `y_num_col_dims`.",
ctx.op().Input("Y"));
auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims);
auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims);
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
dim0[1], dim1[0], x_mat_dims[1], y_mat_dims[0],
"First matrix's width must be equal with second matrix's height."); "First matrix's width must be equal with second matrix's height.");
ctx.Output<Tensor>("Out")->Resize({dim0[0], dim1[1]}); ctx.Output<Tensor>("Out")->Resize({x_mat_dims[0], y_mat_dims[1]});
} }
}; };
...@@ -47,6 +56,23 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -47,6 +56,23 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "The first input of mul op"); AddInput("X", "The first input of mul op");
AddInput("Y", "The second input of mul op"); AddInput("Y", "The second input of mul op");
AddOutput("Out", "The output of mul op"); AddOutput("Out", "The output of mul op");
AddAttr<int>(
"x_num_col_dims",
R"DOC(mul_op can take tensors with more than two dimensions as input `X`,
in that case, tensors will be reshaped to a matrix. The matrix's first
dimension(column length) will be the product of tensor's last
`num_col_dims` dimensions, and the matrix's second dimension(row length)
will be the product of tensor's first `rank - num_col_dims` dimensions.
)DOC")
.SetDefault(1)
.EqualGreaterThan(1);
AddAttr<int>(
"y_num_col_dims",
R"DOC(mul_op can take tensors with more than two dimensions as input `Y`,
in that case, tensors will be reshaped to a matrix. Just like input `X`.
)DOC")
.SetDefault(1)
.EqualGreaterThan(1);
AddComment(R"DOC( AddComment(R"DOC(
Two Element Mul Operator. Two Element Mul Operator.
...@@ -70,10 +96,20 @@ class MulOpGrad : public framework::OperatorWithKernel { ...@@ -70,10 +96,20 @@ class MulOpGrad : public framework::OperatorWithKernel {
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims(); auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y")); auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE(x_dims[0] == out_dims[0],
"Out@GRAD M X N must equal to X dims 0, M "); auto x_mat_dims =
PADDLE_ENFORCE(y_dims[1] == out_dims[1], framework::flatten_to_2d(x_dims, Attr<int>("x_num_col_dims"));
"Out@GRAD M X N must equal to Y dims 1, N "); auto y_mat_dims =
framework::flatten_to_2d(y_dims, Attr<int>("y_num_col_dims"));
PADDLE_ENFORCE_EQ(
x_mat_dims[0], out_dims[0],
"The first dimension of Out@GRAD must equal to the first dimension of "
"the first operand.");
PADDLE_ENFORCE_EQ(
y_mat_dims[1], out_dims[1],
"The second dimension of Out@GRAD must equal to the second "
"dimension of the second operand.");
if (x_grad) x_grad->Resize(x_dims); if (x_grad) x_grad->Resize(x_dims);
if (y_grad) y_grad->Resize(y_dims); if (y_grad) y_grad->Resize(y_dims);
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
...@@ -31,13 +31,25 @@ template <typename Place, typename T> ...@@ -31,13 +31,25 @@ template <typename Place, typename T>
class MulKernel : public framework::OpKernel { class MulKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X"); const Tensor* x = context.Input<Tensor>("X");
auto* y = context.Input<Tensor>("Y"); const Tensor* y = context.Input<Tensor>("Y");
auto* z = context.Output<Tensor>("Out"); Tensor* z = context.Output<Tensor>("Out");
const Tensor x_matrix =
x->dims().size() > 2
? framework::ReshapeToMatrix<T>(
*x, context.template Attr<int>("x_num_col_dims"))
: *x;
const Tensor y_matrix =
y->dims().size() > 2
? framework::ReshapeToMatrix<T>(
*y, context.template Attr<int>("y_num_col_dims"))
: *y;
z->mutable_data<T>(context.GetPlace()); z->mutable_data<T>(context.GetPlace());
auto* device_context = auto* device_context =
const_cast<platform::DeviceContext*>(context.device_context_); const_cast<platform::DeviceContext*>(context.device_context_);
math::matmul<Place, T>(*x, false, *y, false, 1, z, 0, device_context); math::matmul<Place, T>(x_matrix, false, y_matrix, false, 1, z, 0,
device_context);
} }
}; };
...@@ -45,23 +57,39 @@ template <typename Place, typename T> ...@@ -45,23 +57,39 @@ template <typename Place, typename T>
class MulGradKernel : public framework::OpKernel { class MulGradKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X"); int x_num_col_dims = ctx.template Attr<int>("x_num_col_dims");
auto* y = ctx.Input<Tensor>("Y"); int y_num_col_dims = ctx.template Attr<int>("y_num_col_dims");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out")); const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* y = ctx.Input<Tensor>("Y");
const Tensor x_matrix =
x->dims().size() > 2 ? framework::ReshapeToMatrix<T>(*x, x_num_col_dims)
: *x;
const Tensor y_matrix =
y->dims().size() > 2 ? framework::ReshapeToMatrix<T>(*y, y_num_col_dims)
: *y;
const Tensor* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X")); Tensor* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dy = ctx.Output<Tensor>(framework::GradVarName("Y")); Tensor* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));
auto* device_context = auto* device_context =
const_cast<platform::DeviceContext*>(ctx.device_context_); const_cast<platform::DeviceContext*>(ctx.device_context_);
if (dx) { if (dx) {
dx->mutable_data<T>(ctx.GetPlace()); dx->mutable_data<T>(ctx.GetPlace());
Tensor dx_matrix = dx->dims().size() > 2 ? framework::ReshapeToMatrix<T>(
*dx, x_num_col_dims)
: *dx;
// dx = dout * y'. dx: M x K, dout : M x N, y : K x N // dx = dout * y'. dx: M x K, dout : M x N, y : K x N
math::matmul<Place, T>(*dout, false, *y, true, 1, dx, 0, device_context); math::matmul<Place, T>(*dout, false, y_matrix, true, 1, &dx_matrix, 0,
device_context);
} }
if (dy) { if (dy) {
dy->mutable_data<T>(ctx.GetPlace()); dy->mutable_data<T>(ctx.GetPlace());
Tensor dy_matrix = dy->dims().size() > 2 ? framework::ReshapeToMatrix<T>(
*dy, y_num_col_dims)
: *dy;
// dy = x' * dout. dy K x N, dout : M x N, x : M x K // dy = x' * dout. dy K x N, dout : M x N, x : M x K
math::matmul<Place, T>(*x, true, *dout, false, 1, dy, 0, device_context); math::matmul<Place, T>(x_matrix, true, *dout, false, 1, &dy_matrix, 0,
device_context);
} }
} }
}; };
......
## Operator's Parameter Name Convention
To make the operator document itself more clear, we recommend operator names obey the listing conventions.
### OpProtoMaker names
When defining an operator in Paddle, a corresponding [OpProtoMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L170) (TODO: OpProtoMaker Doc)need to be defined. All the Input/Output and Attributes will write into the [OpProto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L61) , and will be used in client language to create operator.
- Input/Output.
- Input/Output names follow the **CamelCase**. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words.
- If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule intends making operators which have few inputs/outputs unified.
- Attribute.
- Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words.
- Comments.
- Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier`.
- Operator comment format of` R"DOC(your comment here)DOC"`. You should explain the input/output of the operator first. If there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`.
- Order.
- Follow the order of Input/Output, then Attribute, then Comments. See the example in best practice.
### Best Practice
Here we give some examples to show how these rules will be used.
- The operator has one input, one output. e.g.`relu`, inputs: `X`, outputs: `Out`.
- The operator has two input, one output. e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out`.
- The operator contains attribute. e.g. `cosine`, inputs : `X`, `axis`, outputs : `Out`.
We give a full example of Accumulator Operator.
```c++
class AccumulateOpMaker : public framework::OpProtoAndCheckerMaker {
public:
AccumulateOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor. If the output size is not the same as input size, the output tensor is first reshaped and initialized to zero, and only then, accumulation is done.");
AddOutput("Out", "(Tensor) Accumulated output tensor");
AddAttr<float>("gamma", "(float, default 1.0) Accumulation multiplier");
AddComment(R"DOC(
Accumulate operator accumulates the input tensor to the output tensor. If the
output tensor already has the right size, we add to it; otherwise, we first
initialize the output tensor to all zeros, and then do accumulation. Any
further calls to the operator, given that no one else fiddles with the output
in the interim, will do simple accumulations.
Accumulation is done as shown:
Out = 1*X + gamma*Out
where X is the input tensor, Y is the output tensor and gamma is the multiplier
argument.
)DOC");
}
};
```
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/reshape_op.h"
namespace paddle {
namespace operators {
class ReshapeOp : public framework::OperatorWithKernel {
public:
ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorWithKernel(type, inputs, outputs, attrs) {}
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
// input check
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) shouldn't be null");
auto shape = ctx.Attr<std::vector<int>>("shape");
PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty.");
for (auto dim : shape) {
PADDLE_ENFORCE(dim > 0, "Each dimension of shape must be positive.");
}
// capacity check
int64_t capacity =
std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
auto *in = ctx.Input<framework::Tensor>("X");
int64_t in_size = framework::product(in->dims());
PADDLE_ENFORCE_EQ(capacity, in_size,
"The size of Input(X) mismatches with Attr(shape).");
// resize output
std::vector<int64_t> shape_int64(shape.size(), 0);
std::transform(shape.begin(), shape.end(), shape_int64.begin(),
[](int a) { return static_cast<int64_t>(a); });
auto out_dims = framework::make_ddim(shape_int64);
ctx.Output<framework::Tensor>("Out")->Resize(out_dims);
}
};
class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker {
public:
ReshapeOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input tensor of reshape operator.");
AddOutput("Out", "The output tensor of reshape operator.");
AddAttr<std::vector<int>>("shape", "Target shape of reshape operator.");
AddComment(R"DOC(Reshape operator
Reshape Input(X) into the shape specified by Attr(shape).
An example:
Given a 2-D tensor X with 2 rows and 2 columns
[[1, 2], [3, 4]]
with target shape = [1, 4], the reshape operator will transform
the tensor X into a 1-D tensor:
[1, 2, 3, 4]
)DOC");
}
};
class ReshapeGradOp : public framework::OperatorWithKernel {
public:
ReshapeGradOp(const std::string &type,
const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorWithKernel(type, inputs, outputs, attrs) {}
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) shouldn't be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) shouldn't be null.");
auto dims = ctx.Input<framework::Tensor>("X")->dims();
auto *d_in = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
d_in->Resize(dims);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(reshape, ops::ReshapeOp, ops::ReshapeOpMaker, reshape_grad,
ops::ReshapeGradOp);
REGISTER_OP_CPU_KERNEL(reshape,
ops::ReshapeKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
reshape_grad, ops::ReshapeGradKernel<paddle::platform::CPUPlace, float>);
...@@ -12,9 +12,11 @@ ...@@ -12,9 +12,11 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU #include "paddle/operators/reshape_op.h"
#include "paddle/operators/gather_op.h"
namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL(
REGISTER_OP_GPU_KERNEL(gather, reshape,
ops::GatherOpKernel<paddle::platform::GPUPlace, float>); paddle::operators::ReshapeKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
reshape_grad,
paddle::operators::ReshapeGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
template <typename Place, typename T>
class ReshapeKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* out = ctx.Output<framework::Tensor>("Out");
auto* in = ctx.Input<framework::Tensor>("X");
out->mutable_data<T>(ctx.GetPlace());
auto shape = ctx.Attr<std::vector<int>>("shape");
std::vector<int64_t> shape_int64(shape.size(), 0);
std::transform(shape.begin(), shape.end(), shape_int64.begin(),
[](int a) { return static_cast<int64_t>(a); });
auto out_dims = framework::make_ddim(shape_int64);
out->CopyFrom<T>(*in, ctx.GetPlace());
out->Resize(out_dims);
}
};
template <typename Place, typename T>
class ReshapeGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* d_out = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* d_x = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
d_x->mutable_data<T>(ctx.GetPlace());
auto in_dims = d_x->dims();
d_x->CopyFrom<T>(*d_out, ctx.GetPlace());
d_x->Resize(in_dims);
}
};
} // namespace operators
} // namespace paddle
...@@ -109,7 +109,7 @@ void InitArgument(const ArgumentName& name, Argument* arg, ...@@ -109,7 +109,7 @@ void InitArgument(const ArgumentName& name, Argument* arg,
arg->step_scopes = op.Output(name.step_scopes); arg->step_scopes = op.Output(name.step_scopes);
auto inlinks = op.Inputs(name.inlinks); auto inlinks = op.Inputs(name.inlinks);
auto inlink_alias = op.GetAttr<std::vector<std::string>>(name.inlink_alias); auto inlink_alias = op.Attr<std::vector<std::string>>(name.inlink_alias);
PADDLE_ENFORCE(inlinks.size() == inlink_alias.size(), PADDLE_ENFORCE(inlinks.size() == inlink_alias.size(),
"the size of inlinks and inlink_alias don't match:%d,%d", "the size of inlinks and inlink_alias don't match:%d,%d",
inlinks.size(), inlink_alias.size()); inlinks.size(), inlink_alias.size());
...@@ -121,7 +121,7 @@ void InitArgument(const ArgumentName& name, Argument* arg, ...@@ -121,7 +121,7 @@ void InitArgument(const ArgumentName& name, Argument* arg,
} }
auto outlinks = op.Outputs(name.outlinks); auto outlinks = op.Outputs(name.outlinks);
auto outlink_alias = op.GetAttr<std::vector<std::string>>(name.outlink_alias); auto outlink_alias = op.Attr<std::vector<std::string>>(name.outlink_alias);
PADDLE_ENFORCE(outlinks.size() == outlink_alias.size(), PADDLE_ENFORCE(outlinks.size() == outlink_alias.size(),
"the size of outlinks and outlink_alias don't match:%d,%d", "the size of outlinks and outlink_alias don't match:%d,%d",
outlinks.size(), outlink_alias.size()); outlinks.size(), outlink_alias.size());
...@@ -135,8 +135,8 @@ void InitArgument(const ArgumentName& name, Argument* arg, ...@@ -135,8 +135,8 @@ void InitArgument(const ArgumentName& name, Argument* arg,
auto boot_memories = op.Inputs(name.boot_memories); auto boot_memories = op.Inputs(name.boot_memories);
// attributes // attributes
auto memories = op.GetAttr<std::vector<std::string>>(name.memories); auto memories = op.Attr<std::vector<std::string>>(name.memories);
auto pre_memories = op.GetAttr<std::vector<std::string>>(name.pre_memories); auto pre_memories = op.Attr<std::vector<std::string>>(name.pre_memories);
PADDLE_ENFORCE(memories.size() == boot_memories.size(), PADDLE_ENFORCE(memories.size() == boot_memories.size(),
"the size of memories, boot_memories don't match:%d,%d", "the size of memories, boot_memories don't match:%d,%d",
......
...@@ -25,14 +25,19 @@ class RowwiseAddOp : public framework::OperatorWithKernel { ...@@ -25,14 +25,19 @@ class RowwiseAddOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
auto dim0 = ctx.Input<Tensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto dim1 = ctx.Input<Tensor>("b")->dims(); auto b_dims = ctx.Input<Tensor>("b")->dims();
PADDLE_ENFORCE_GT(
PADDLE_ENFORCE(dim0.size() == 2, "Input 0 must be matrix"); x_dims.size(), b_dims.size(),
PADDLE_ENFORCE(dim1.size() == 1, "The second input must be vector"); "The rank of input `X` must be larger than the one of input `b`.");
PADDLE_ENFORCE(dim0[1] == dim1[0], "The width of two input must be same");
PADDLE_ENFORCE(ctx.OutputSize("Out") == 1, "The output size must be 1"); int num_col_dims = x_dims.size() - b_dims.size();
ctx.Output<Tensor>("Out")->Resize(ctx.Input<Tensor>("X")->dims());
PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims,
"The width of two operands must be same");
PADDLE_ENFORCE_EQ(ctx.OutputSize("Out"), 1, "The output size must be 1");
ctx.Output<Tensor>("Out")->Resize(x_dims);
} }
}; };
...@@ -61,13 +66,20 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel { ...@@ -61,13 +66,20 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("b"), "b should not be null"); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("b"), "b should not be null");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null"); "Input(Out@GRAD) should not be null");
auto dims0 = ctx.Input<Tensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto dims1 = ctx.Input<Tensor>("b")->dims(); auto b_dims = ctx.Input<Tensor>("b")->dims();
PADDLE_ENFORCE_EQ(1, dims1.size(), "b dims should be 1") PADDLE_ENFORCE_GT(
x_dims.size(), b_dims.size(),
"The rank of input `X` must be larger than the one of input `b`.");
int num_col_dims = x_dims.size() - b_dims.size();
PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims,
"The width of two operands must be same");
auto *dx = ctx.Output<Tensor>(framework::GradVarName("X")); auto *dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *db = ctx.Output<Tensor>(framework::GradVarName("b")); auto *db = ctx.Output<Tensor>(framework::GradVarName("b"));
if (dx) dx->Resize(dims0); if (dx) dx->Resize(x_dims);
if (db) db->Resize(dims1); if (db) db->Resize(b_dims);
} }
}; };
......
...@@ -33,10 +33,12 @@ class RowwiseAddKernel : public framework::OpKernel { ...@@ -33,10 +33,12 @@ class RowwiseAddKernel : public framework::OpKernel {
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto out = context.Output<Tensor>("Out"); auto out = context.Output<Tensor>("Out");
out->mutable_data<T>(context.GetPlace()); out->mutable_data<T>(context.GetPlace());
int num_col_dims = context.Input<Tensor>("X")->dims().size() -
auto input = EigenMatrix<T>::From(*context.Input<Tensor>("X")); context.Input<Tensor>("b")->dims().size();
auto bias = EigenVector<T>::From(*context.Input<Tensor>("b")); auto input =
auto output = EigenMatrix<T>::From(*out); EigenMatrix<T>::Reshape(*context.Input<Tensor>("X"), num_col_dims);
auto bias = EigenVector<T>::Flatten(*context.Input<Tensor>("b"));
auto output = EigenMatrix<T>::Reshape(*out, num_col_dims);
const int bias_size = bias.dimension(0); const int bias_size = bias.dimension(0);
const int rest_size = input.size() / bias_size; const int rest_size = input.size() / bias_size;
...@@ -54,12 +56,15 @@ class RowwiseAddGradKernel : public framework::OpKernel { ...@@ -54,12 +56,15 @@ class RowwiseAddGradKernel : public framework::OpKernel {
auto* dout = context.Input<Tensor>(framework::GradVarName("Out")); auto* dout = context.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = context.Output<Tensor>(framework::GradVarName("X")); auto* dx = context.Output<Tensor>(framework::GradVarName("X"));
auto* db = context.Output<Tensor>(framework::GradVarName("b")); auto* db = context.Output<Tensor>(framework::GradVarName("b"));
int num_col_dims = context.Input<Tensor>("X")->dims().size() -
context.Input<Tensor>("b")->dims().size();
auto out_grad = EigenMatrix<T>::From(*dout); auto out_grad = EigenMatrix<T>::Reshape(*dout, num_col_dims);
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
if (dx) { if (dx) {
dx->mutable_data<T>(context.GetPlace()); dx->mutable_data<T>(context.GetPlace());
EigenMatrix<T>::From(*dx).device(place) = out_grad; EigenMatrix<T>::Reshape(*dx, num_col_dims).device(place) = out_grad;
} }
if (db) { if (db) {
......
...@@ -44,11 +44,13 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -44,11 +44,13 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker {
The equation is: Out = scale*X The equation is: Out = scale*X
)DOC"); )DOC");
AddAttr<AttrType>("scale", "scale of scale operator.").SetDefault(1.0); AddAttr<AttrType>("scale", "The scaling factor of the scale operator.")
.SetDefault(1.0);
} }
}; };
// Identity Op's gradient is identity op, too. // The operator to calculate gradients of a scale operator is just the scale
// operator itself.
// Grad(Out=scale(X)) => Grad(X) = scale(Grad(Out)) // Grad(Out=scale(X)) => Grad(X) = scale(Grad(Out))
template <typename AttrType> template <typename AttrType>
class ScaleGradOp : public NetOp { class ScaleGradOp : public NetOp {
...@@ -60,38 +62,11 @@ class ScaleGradOp : public NetOp { ...@@ -60,38 +62,11 @@ class ScaleGradOp : public NetOp {
AppendOp(framework::OpRegistry::CreateOp( AppendOp(framework::OpRegistry::CreateOp(
"scale", {{"X", {Input(framework::GradVarName("Out"))}}}, "scale", {{"X", {Input(framework::GradVarName("Out"))}}},
{{"Out", {Output(framework::GradVarName("X"))}}}, {{"Out", {Output(framework::GradVarName("X"))}}},
{{"scale", GetAttr<AttrType>("scale")}})); {{"scale", Attr<AttrType>("scale")}}));
CompleteAddOp(false); CompleteAddOp(false);
} }
}; };
// identity is a alias of scale op. This is also a example for creating a alias
// operator.
template <typename AttrType>
class IdentityOpMaker : public framework::OpProtoAndCheckerMaker {
public:
IdentityOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "input tensor of identity op");
AddOutput("Out", "output tensor of identity op");
AddComment("identity operator. Just a alias of scale op which scale = 1.0");
}
};
template <typename AttrType>
class IdentityOp : public NetOp {
public:
IdentityOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: NetOp(type, inputs, outputs, attrs) {
AppendOp(framework::OpRegistry::CreateOp(
"scale", {{"X", {Input("X")}}}, {{"Out", {Output("Out")}}},
{{"scale", static_cast<AttrType>(1)}}));
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -101,5 +76,3 @@ REGISTER_OP(scale, ops::ScaleOp, ops::ScaleOpMaker<float>, scale_grad, ...@@ -101,5 +76,3 @@ REGISTER_OP(scale, ops::ScaleOp, ops::ScaleOpMaker<float>, scale_grad,
ops::ScaleGradOp<float>); ops::ScaleGradOp<float>);
REGISTER_OP_CPU_KERNEL(scale, REGISTER_OP_CPU_KERNEL(scale,
ops::ScaleKernel<paddle::platform::CPUPlace, float>); ops::ScaleKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_WITHOUT_GRADIENT(identity, ops::IdentityOp<float>,
ops::IdentityOpMaker<float>);
...@@ -27,7 +27,7 @@ class ScaleKernel : public framework::OpKernel { ...@@ -27,7 +27,7 @@ class ScaleKernel : public framework::OpKernel {
auto* in = context.Input<framework::Tensor>("X"); auto* in = context.Input<framework::Tensor>("X");
tensor->mutable_data<T>(in->place()); tensor->mutable_data<T>(in->place());
auto scale = static_cast<T>(context.GetAttr<AttrType>("scale")); auto scale = static_cast<T>(context.Attr<AttrType>("scale"));
auto eigen_out = framework::EigenVector<T>::Flatten(*tensor); auto eigen_out = framework::EigenVector<T>::Flatten(*tensor);
auto eigen_in = framework::EigenVector<T>::Flatten(*in); auto eigen_in = framework::EigenVector<T>::Flatten(*in);
......
...@@ -31,7 +31,7 @@ class SGDOpKernel : public framework::OpKernel { ...@@ -31,7 +31,7 @@ class SGDOpKernel : public framework::OpKernel {
auto param = ctx.Input<Tensor>("param"); auto param = ctx.Input<Tensor>("param");
auto grad = ctx.Input<Tensor>("grad"); auto grad = ctx.Input<Tensor>("grad");
auto param_out = ctx.Output<Tensor>("param_out"); auto param_out = ctx.Output<Tensor>("param_out");
float lr = ctx.GetAttr<float>("learning_rate"); float lr = ctx.Attr<float>("learning_rate");
param_out->mutable_data<T>(ctx.GetPlace()); param_out->mutable_data<T>(ctx.GetPlace());
......
...@@ -51,7 +51,7 @@ the other dimensions in the K-dimensional vector input. Then the ratio of the ...@@ -51,7 +51,7 @@ the other dimensions in the K-dimensional vector input. Then the ratio of the
exponential of the given dimension and the sum of exponential values of all exponential of the given dimension and the sum of exponential values of all
the other dimensions is the output of the softmax operator. the other dimensions is the output of the softmax operator.
For each row `i` and each column `j` in X, we have: For each row `i` and each column `j` in input X, we have:
Y[i, j] = exp(X[i, j]) / sum_j(exp(X[i, j])) Y[i, j] = exp(X[i, j]) / sum_j(exp(X[i, j]))
)DOC"); )DOC");
...@@ -64,14 +64,15 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ...@@ -64,14 +64,15 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE(ctx.InputVar("Y") != nullptr, "Input(Y) should not be null"); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should be not null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")),
"Input(Y@GRAD) should not be null"); "Input(Y@GRAD) should be not null.");
PADDLE_ENFORCE(ctx.Input<Tensor>("Y")->dims() == PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("Y")->dims(),
ctx.Input<Tensor>(framework::GradVarName("Y"))->dims(), ctx.Input<Tensor>(framework::GradVarName("Y"))->dims(),
"the shape of Input(0) and Input(1) should be the same"); "Input(Y) and its gradients should have a same shape.");
ctx.Output<Tensor>(framework::GradVarName("X")) ctx.Output<Tensor>(framework::GradVarName("X"))
->Resize(ctx.Input<Tensor>("Y")->dims()); ->Resize(ctx.Input<Tensor>("X")->dims());
} }
}; };
......
...@@ -28,12 +28,12 @@ template <typename Place, typename T> ...@@ -28,12 +28,12 @@ template <typename Place, typename T>
class SoftmaxKernel : public framework::OpKernel { class SoftmaxKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto input = context.Input<Tensor>("X"); auto X = context.Input<Tensor>("X");
auto output = context.Output<Tensor>("Y"); auto Y = context.Output<Tensor>("Y");
output->mutable_data<T>(context.GetPlace()); Y->mutable_data<T>(context.GetPlace());
auto logits = EigenMatrix<T>::From(*input); auto logits = EigenMatrix<T>::From(*X);
auto softmax = EigenMatrix<T>::From(*output); auto softmax = EigenMatrix<T>::From(*Y);
const int kBatchDim = 0; const int kBatchDim = 0;
const int kClassDim = 1; const int kClassDim = 1;
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/squared_l2_distance_op.h"
namespace paddle {
namespace operators {
class SquaredL2DistanceOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input of SquaredL2DistanceOp "
"must be initialized.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Target of SquaredL2DistanceOp "
"must be initialized.");
auto* x = ctx.Input<Tensor>("X");
auto x_dims = x->dims();
auto* y = ctx.Input<Tensor>("Y");
auto y_dims = y->dims();
PADDLE_ENFORCE_EQ(framework::arity(x_dims), framework::arity(y_dims),
"Tensor rank of both SquaredL2DistanceOp's "
"inputs must be same.");
int rank = framework::arity(x_dims);
PADDLE_ENFORCE_GE(rank, 2, "Tensor rank should be at least equal to 2.");
PADDLE_ENFORCE_EQ(x->numel() / x_dims[0], y->numel() / y_dims[0],
"Product of dimensions expcet the first dimension of "
"input and target must be equal.");
PADDLE_ENFORCE(y_dims[0] == 1 || y_dims[0] == x_dims[0],
"First dimension of target must be equal to input "
"or to 1.");
ctx.Output<Tensor>("sub_result")
->Resize({x_dims[0], x->numel() / x_dims[0]});
ctx.Output<Tensor>("Out")->Resize({x_dims[0], 1});
}
};
class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SquaredL2DistanceOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of SquaredL2DistanceOp.");
AddInput("Y", "Target of SquaredL2DistanceOp.");
AddOutput("sub_result",
"Buffering substraction result which "
"will be reused in backward.")
.AsIntermediate();
AddOutput("Out", "Squared l2 distance between input and target.");
AddComment(R"DOC(
SquaredL2DistanceOp will cacluate the squared L2 distance for
input and target. Number of distance value equals to the
first dimension of input. First dimension of target could be equal to
input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp
will broadcast target's first dimension to input's first dimension.
You can decide whether calculate the gradient of input and target.
)DOC");
}
};
class SquaredL2DistanceGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Gradient of Out should not be null");
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0],
"First dimension of output gradient and "
"input value must be equal.");
PADDLE_ENFORCE_EQ(out_dims[1], 1,
"Second dimension of output gradient "
"must be 1.");
auto* x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* y_grad = ctx.Output<Tensor>(framework::GradVarName("Y"));
if (x_grad) x_grad->Resize(x_dims);
if (y_grad) y_grad->Resize(y_dims);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(squared_l2_distance, ops::SquaredL2DistanceOp,
ops::SquaredL2DistanceOpMaker, squared_l2_distance_grad,
ops::SquaredL2DistanceGradOp);
REGISTER_OP_CPU_KERNEL(
squared_l2_distance,
ops::SquaredL2DistanceKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(
squared_l2_distance_grad,
ops::SquaredL2DistanceGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/squared_l2_distance_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(
squared_l2_distance,
ops::SquaredL2DistanceKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(
squared_l2_distance_grad,
ops::SquaredL2DistanceGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename Place, typename T>
class SquaredL2DistanceKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in0 = context.Input<Tensor>("X");
auto* in1 = context.Input<Tensor>("Y");
auto* out0 = context.Output<Tensor>("sub_result");
auto* out1 = context.Output<Tensor>("Out");
auto in0_dims = in0->dims();
auto in1_dims = in1->dims();
int cols = in0->numel() / in0_dims[0];
// reduce dimensions except the first
auto x =
EigenMatrix<T>::From(*in0, framework::make_ddim({in0_dims[0], cols}));
auto y =
EigenMatrix<T>::From(*in1, framework::make_ddim({in1_dims[0], cols}));
out0->mutable_data<T>(context.GetPlace());
out1->mutable_data<T>(context.GetPlace());
auto sub_result = EigenMatrix<T>::From(*out0);
auto z = EigenVector<T>::Flatten(*out1);
auto place = context.GetEigenDevice<Place>();
auto x_dims = x.dimensions();
auto y_dims = y.dimensions();
// buffer the substraction result
if (y_dims[0] == 1 && x_dims[0] > y_dims[0]) {
sub_result.device(place) =
x -
y.broadcast(Eigen::array<int, 2>({{static_cast<int>(x_dims[0]), 1}}));
} else {
sub_result.device(place) = x - y;
}
auto sub_res_pow2 = sub_result * sub_result;
z.device(place) = sub_res_pow2.sum(Eigen::array<int, 1>({{1}}));
}
};
template <typename Place, typename T>
class SquaredL2DistanceGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in0 = context.Input<Tensor>("sub_result");
auto* in1 = context.Input<Tensor>(framework::GradVarName("Out"));
auto* x_g = context.Output<Tensor>(framework::GradVarName("X"));
auto* y_g = context.Output<Tensor>(framework::GradVarName("Y"));
auto sub_result = EigenMatrix<T>::From(*in0);
auto out_grad = EigenMatrix<T>::From(*in1);
auto x_dims = x_g->dims();
auto y_dims = y_g->dims();
int cols = x_g->numel() / x_dims[0];
// calculate gradient
auto grad_mat = 2 *
(out_grad.broadcast(Eigen::array<int, 2>({{1, cols}}))) *
sub_result;
// propagate back to input
auto eigen_place = context.GetEigenDevice<Place>();
if (x_g) {
x_g->mutable_data<T>(context.GetPlace());
// eigen matrix
auto x_grad =
EigenMatrix<T>::From(*x_g, framework::make_ddim({x_dims[0], cols}));
// dimensions are same with subResult
x_grad.device(eigen_place) = grad_mat;
}
if (y_g) {
y_g->mutable_data<T>(context.GetPlace());
PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0],
"First dimension of gradient must be greater or "
"equal than first dimension of target.");
if (sub_result.dimensions()[0] == y_dims[0]) {
auto y_grad =
EigenMatrix<T>::From(*y_g, framework::make_ddim({y_dims[0], cols}));
y_grad.device(eigen_place) = -1 * grad_mat;
} else {
auto col_sum_res = -1 * (grad_mat.sum(Eigen::array<int, 1>({{0}})));
auto y_grad = EigenVector<T>::Flatten(*y_g);
y_grad.device(eigen_place) = col_sum_res;
}
}
}
};
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/sum_op.h"
#include <vector>
namespace paddle {
namespace operators {
using framework::Tensor;
class SumOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto *out = ctx.Output<framework::Tensor>("Out");
int N = ins.size();
auto in_dim = ins[0]->dims();
PADDLE_ENFORCE_GT(N, 1, "Input tensors count should > 1.");
for (int i = 1; i < N; i++) {
auto dim = ins[i]->dims();
PADDLE_ENFORCE(in_dim == dim, "Input tensors must have same shape");
}
out->Resize(in_dim);
}
};
class SumOpMaker : public framework::OpProtoAndCheckerMaker {
public:
SumOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "the input tensors of sum operator.").AsDuplicable();
AddOutput("Out", "the output tensor of sum operator.");
AddComment(R"DOC(
Sum the input tensors.
)DOC");
}
};
class SumGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
auto outputs = ctx.MultiOutput<Tensor>(framework::GradVarName("X"));
auto dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
for (auto output : outputs) {
output->Resize(dims);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP(sum, ops::SumOp, ops::SumOpMaker, sum_grad, ops::SumGradOp);
REGISTER_OP_CPU_KERNEL(sum, ops::SumKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL(sum_grad,
ops::SumGradKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/sum_op.h"
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(sum, ops::SumKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL(sum_grad,
ops::SumGradKernel<paddle::platform::GPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename Place, typename T>
class SumKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto ins = context.MultiInput<Tensor>("X");
auto* out = context.Output<Tensor>("Out");
out->mutable_data<T>(context.GetPlace());
auto place = context.GetEigenDevice<Place>();
auto result = EigenVector<T>::Flatten(*out);
int N = ins.size();
auto in = EigenVector<T>::Flatten(*(ins[0]));
result.device(place) = in;
for (int i = 1; i < N; i++) {
auto in = EigenVector<T>::Flatten(*(ins[i]));
result.device(place) = result + in;
}
}
};
template <typename Place, typename T>
class SumGradKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<Tensor>(framework::GradVarName("Out"));
auto outs = context.MultiOutput<Tensor>(framework::GradVarName("X"));
for (auto out : outs) {
out->mutable_data<T>(context.GetPlace());
}
auto place = context.GetEigenDevice<Place>();
auto in = EigenVector<T>::Flatten(*input);
for (auto out : outs) {
auto result = EigenVector<T>::Flatten(*out);
result.device(place) = in;
}
}
};
} // namespace operators
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/top_k_op.h"
namespace paddle {
namespace operators {
class TopkOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"Input of TopkOP must be initialized.");
auto *input = ctx.Input<framework::Tensor>("X");
const int k = static_cast<int>(ctx.Attr<int>("k"));
PADDLE_ENFORCE_GE(k, 1, "k must >= 1");
PADDLE_ENFORCE_GE(input->dims().size(), 1, "input must have >= 1d shape");
PADDLE_ENFORCE_GE(input->dims()[input->dims().size() - 1], k,
"input must have >= k columns");
framework::DDim dims = input->dims();
dims[dims.size() - 1] = k;
ctx.Output<Tensor>("Out")->Resize(dims);
ctx.Output<Tensor>("Indices")->Resize(dims);
}
};
class TopkOpMaker : public framework::OpProtoAndCheckerMaker {
public:
TopkOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input of Topk op");
AddOutput("Out", "The output tensor of Topk op");
AddOutput("Indices", "The indices of Topk elements of input");
AddComment(
R"DOC(If the input is a vector (1d tensor), finds the k largest entries in the vector and outputs their values and indices as vectors. Thus values[j] is the j-th largest entry in input, and its index is indices[j].
For matrices, computes the top k entries in each row. )DOC");
AddAttr<int>("k",
"Number of top elements to look for along the last "
"dimension (along each row for matrices).")
.SetDefault(1);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(top_k, ops::TopkOp, ops::TopkOpMaker);
REGISTER_OP_CPU_KERNEL(top_k,
ops::TopkKernel<paddle::platform::CPUPlace, float>);
/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_registry.h"
#include "paddle/platform/assert.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
struct Pair {
__device__ __forceinline__ Pair() {}
__device__ __forceinline__ Pair(T value, int id) : v(value), id(id) {}
__device__ __forceinline__ void set(T value, int id) {
v = value;
id = id;
}
__device__ __forceinline__ void operator=(const Pair<T>& in) {
v = in.v;
id = in.id;
}
__device__ __forceinline__ bool operator<(const T value) const {
return (v < value);
}
__device__ __forceinline__ bool operator<(const Pair<T>& in) const {
return (v < in.v) || ((v == in.v) && (id > in.id));
}
__device__ __forceinline__ bool operator>(const Pair<T>& in) const {
return (v > in.v) || ((v == in.v) && (id < in.id));
}
T v;
int id;
};
template <typename T>
__device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p,
int beam_size) {
for (int k = beam_size - 2; k >= 0; k--) {
if (topk[k] < p) {
topk[k + 1] = topk[k];
} else {
topk[k + 1] = p;
return;
}
}
topk[0] = p;
}
template <typename T, int beam_size>
__device__ __forceinline__ void AddTo(Pair<T> topk[], const Pair<T>& p) {
for (int k = beam_size - 2; k >= 0; k--) {
if (topk[k] < p) {
topk[k + 1] = topk[k];
} else {
topk[k + 1] = p;
return;
}
}
topk[0] = p;
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx,
int dim, int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < src[idx]) {
Pair<T> tmp(src[idx], idx);
AddTo<T>(topk, tmp, beam_size);
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* src, int idx,
int dim, const Pair<T>& max,
int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < src[idx]) {
Pair<T> tmp(src[idx], idx);
if (tmp < max) {
AddTo<T>(topk, tmp, beam_size);
}
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col,
int idx, int dim, int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < val[idx]) {
Pair<T> tmp(val[idx], col[idx]);
AddTo<T>(topk, tmp, beam_size);
}
idx += BlockSize;
}
}
template <typename T, int BlockSize>
__device__ __forceinline__ void GetTopK(Pair<T> topk[], const T* val, int* col,
int idx, int dim, const Pair<T>& max,
int beam_size) {
while (idx < dim) {
if (topk[beam_size - 1] < val[idx]) {
Pair<T> tmp(val[idx], col[idx]);
if (tmp < max) {
AddTo<T>(topk, tmp, beam_size);
}
}
idx += BlockSize;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int& beam,
int beam_size, const T* src,
bool& firstStep, bool& is_empty,
Pair<T>& max, int dim,
const int tid) {
if (beam > 0) {
int length = beam < beam_size ? beam : beam_size;
if (firstStep) {
firstStep = false;
GetTopK<T, BlockSize>(topk, src, tid, dim, length);
} else {
for (int k = 0; k < MaxLength; k++) {
if (k < MaxLength - beam) {
topk[k] = topk[k + beam];
} else {
topk[k].set(-INFINITY, -1);
}
}
if (!is_empty) {
GetTopK<T, BlockSize>(topk + MaxLength - beam, src, tid, dim, max,
length);
}
}
max = topk[MaxLength - 1];
if (max.v == -1) is_empty = true;
beam = 0;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void ThreadGetTopK(Pair<T> topk[], int& beam,
int beam_size, const T* val,
int* col, bool& firstStep,
bool& is_empty, Pair<T>& max,
int dim, const int tid) {
if (beam > 0) {
int length = beam < beam_size ? beam : beam_size;
if (firstStep) {
firstStep = false;
GetTopK<T, BlockSize>(topk, val, col, tid, dim, length);
} else {
for (int k = 0; k < MaxLength; k++) {
if (k < MaxLength - beam) {
topk[k] = topk[k + beam];
} else {
topk[k].set(-INFINITY, -1);
}
}
if (!is_empty) {
GetTopK<T, BlockSize>(topk + MaxLength - beam, val, col, tid, dim, max,
length);
}
}
max = topk[MaxLength - 1];
if (max.v == -1) is_empty = true;
beam = 0;
}
}
template <typename T, int MaxLength, int BlockSize>
__device__ __forceinline__ void BlockReduce(Pair<T>* sh_topk, int* maxid,
Pair<T> topk[], T** topVal,
int** topIds, int& beam, int& k,
const int tid, const int warp) {
while (true) {
__syncthreads();
if (tid < BlockSize / 2) {
if (sh_topk[tid] < sh_topk[tid + BlockSize / 2]) {
maxid[tid] = tid + BlockSize / 2;
} else {
maxid[tid] = tid;
}
}
__syncthreads();
for (int stride = BlockSize / 4; stride > 0; stride = stride / 2) {
if (tid < stride) {
if (sh_topk[maxid[tid]] < sh_topk[maxid[tid + stride]]) {
maxid[tid] = maxid[tid + stride];
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
**topVal = sh_topk[maxid[0]].v;
**topIds = sh_topk[maxid[0]].id;
(*topVal)++;
(*topIds)++;
}
if (tid == maxid[0]) beam++;
if (--k == 0) break;
__syncthreads();
if (tid == maxid[0]) {
if (beam < MaxLength) {
sh_topk[tid] = topk[beam];
}
}
if (maxid[0] / 32 == warp) {
if (__shfl(beam, (maxid[0]) % 32, 32) == MaxLength) break;
}
}
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top MaxLength value;
* 2. merge to sh_topk, block reduce and get max value;
* 3. go to the second setp, until one thread's topk value is null;
* 4. go to the first setp, until get the topk value.
*/
template <typename T, int MaxLength, int BlockSize>
__global__ void KeMatrixTopK(T* output, int output_stride, int* indices,
const T* src, int lds, int dim, int k) {
__shared__ Pair<T> sh_topk[BlockSize];
__shared__ int maxid[BlockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
output += blockIdx.x * output_stride;
indices += blockIdx.x * k;
Pair<T> topk[MaxLength];
int beam = MaxLength;
Pair<T> max;
bool is_empty = false;
bool firststep = true;
for (int k = 0; k < MaxLength; k++) {
topk[k].set(-INFINITY, -1);
}
while (k) {
ThreadGetTopK<T, MaxLength, BlockSize>(topk, beam, k,
src + blockIdx.x * lds, firststep,
is_empty, max, dim, tid);
sh_topk[tid] = topk[0];
BlockReduce<T, MaxLength, BlockSize>(sh_topk, maxid, topk, &output,
&indices, beam, k, tid, warp);
}
}
template <typename T>
class TopkOpCUDAKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use GPUPlace.");
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
size_t k = static_cast<int>(ctx.Attr<int>("k"));
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
// FIXME(typhoonzero): data is always converted to type T?
int* indices_data = indices->mutable_data<int>(ctx.GetPlace());
size_t input_height = input->dims()[0];
size_t input_width = input->dims()[1];
if (k > input_width) k = input_width;
// NOTE: pass lds and dim same to input width.
// NOTE: old matrix implementation of stride is different to eigen.
// TODO(typhoonzero): launch kernel on specified stream.
// TODO(typhoonzero): refine this kernel.
dim3 threads(256, 1);
dim3 grid(input_height, 1);
KeMatrixTopK<T, 5, 256><<<grid, threads>>>(
output_data, output->dims()[1], indices_data, input_data, input_width,
input_width, int(k));
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_GPU_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel<float>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <iostream>
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename Place, typename T>
class TopkKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
// Get the top k elements of each row of input tensor
// FIXME: only deal with matrix(2d tensor).
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
// k is determined by Attr
const size_t k = static_cast<int>(ctx.Attr<int>("k"));
T* output_data = output->mutable_data<T>(ctx.GetPlace());
T* indices_data = indices->mutable_data<T>(ctx.GetPlace());
auto eg_input = EigenMatrix<T>::From(*input);
// reshape input to a flattern matrix(like flat_inner_dims)
framework::DDim inputdims = input->dims();
const size_t row = framework::product(
framework::slice_ddim(inputdims, 0, inputdims.size() - 1));
const size_t col = inputdims[inputdims.size() - 1];
Eigen::DSizes<int, 2> flat2dims(row, col);
// NOTE: eigen shape doesn't affect paddle tensor.
eg_input.reshape(flat2dims);
for (size_t i = 0; i < row; i++) {
std::vector<std::pair<T, size_t>> vec;
for (size_t j = 0; j < col; j++) {
vec.push_back(std::pair<T, size_t>(eg_input(i, j), j));
}
std::partial_sort(
vec.begin(), vec.begin() + k, vec.end(),
[](const std::pair<T, size_t>& l, const std::pair<T, size_t>& r) {
return l.first > r.first;
});
for (size_t j = 0; j < k; j++) {
output_data[i * k + j] = vec[j].first;
indices_data[i * k + j] = vec[j].second;
}
}
}
};
} // namespace operators
} // namespace paddle
...@@ -26,16 +26,16 @@ class CPUUniformRandomKernel : public framework::OpKernel { ...@@ -26,16 +26,16 @@ class CPUUniformRandomKernel : public framework::OpKernel {
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* tensor = context.Output<framework::Tensor>("Out"); auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.GetAttr<int>("seed")); unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine; std::minstd_rand engine;
if (seed == 0) { if (seed == 0) {
seed = std::random_device()(); seed = std::random_device()();
} }
engine.seed(seed); engine.seed(seed);
std::uniform_real_distribution<T> dist( std::uniform_real_distribution<T> dist(
static_cast<T>(context.GetAttr<float>("min")), static_cast<T>(context.Attr<float>("min")),
static_cast<T>(context.GetAttr<float>("max"))); static_cast<T>(context.Attr<float>("max")));
int64_t size = framework::product(tensor->dims()); int64_t size = tensor->numel();
for (int64_t i = 0; i < size; ++i) { for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine); data[i] = dist(engine);
} }
...@@ -48,10 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel { ...@@ -48,10 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext& ctx) const override { void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE(GetAttr<float>("min") < GetAttr<float>("max"), PADDLE_ENFORCE(Attr<float>("min") < Attr<float>("max"),
"uniform_random's min must less then max"); "uniform_random's min must less then max");
auto* tensor = ctx.Output<framework::Tensor>("Out"); auto* tensor = ctx.Output<framework::Tensor>("Out");
auto dims = GetAttr<std::vector<int>>("dims"); auto dims = Attr<std::vector<int>>("dims");
std::vector<int64_t> temp; std::vector<int64_t> temp;
temp.reserve(dims.size()); temp.reserve(dims.size());
for (auto dim : dims) { for (auto dim : dims) {
......
...@@ -45,16 +45,16 @@ class GPUUniformRandomKernel : public framework::OpKernel { ...@@ -45,16 +45,16 @@ class GPUUniformRandomKernel : public framework::OpKernel {
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* tensor = context.Output<framework::Tensor>("Out"); auto* tensor = context.Output<framework::Tensor>("Out");
T* data = tensor->mutable_data<T>(context.GetPlace()); T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.GetAttr<int>("seed")); unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
if (seed == 0) { if (seed == 0) {
std::random_device rd; std::random_device rd;
seed = rd(); seed = rd();
} }
T min = static_cast<T>(context.GetAttr<float>("min")); T min = static_cast<T>(context.Attr<float>("min"));
T max = static_cast<T>(context.GetAttr<float>("max")); T max = static_cast<T>(context.Attr<float>("max"));
thrust::counting_iterator<unsigned int> index_sequence_begin(0); thrust::counting_iterator<unsigned int> index_sequence_begin(0);
ssize_t N = framework::product(tensor->dims()); int64_t size = tensor->numel();
thrust::transform(index_sequence_begin, index_sequence_begin + N, thrust::transform(index_sequence_begin, index_sequence_begin + size,
thrust::device_ptr<T>(data), thrust::device_ptr<T>(data),
UniformGenerator<T>(min, max, seed)); UniformGenerator<T>(min, max, seed));
} }
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include <vector>
#include "paddle/platform/dynload/cudnn.h" #include "paddle/platform/dynload/cudnn.h"
#include "paddle/platform/enforce.h" #include "paddle/platform/enforce.h"
#include "paddle/platform/macros.h" #include "paddle/platform/macros.h"
......
...@@ -25,10 +25,6 @@ limitations under the License. */ ...@@ -25,10 +25,6 @@ limitations under the License. */
#include "paddle/string/printf.h" #include "paddle/string/printf.h"
#include "paddle/string/to_string.h" #include "paddle/string/to_string.h"
#ifdef __GNUC__
#include <cxxabi.h> // for __cxa_demangle
#endif
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
#include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cublas.h"
...@@ -46,19 +42,6 @@ limitations under the License. */ ...@@ -46,19 +42,6 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace platform { namespace platform {
namespace {
#ifdef __GNUC__
inline std::string demangle(std::string name) {
int status = -4; // some arbitrary value to eliminate the compiler warning
std::unique_ptr<char, void (*)(void*)> res{
abi::__cxa_demangle(name.c_str(), NULL, NULL, &status), std::free};
return (status == 0) ? res.get() : name;
}
#else
inline std::string demangle(std::string name) { return name; }
#endif
}
struct EnforceNotMet : public std::exception { struct EnforceNotMet : public std::exception {
std::exception_ptr exp_; std::exception_ptr exp_;
std::string err_str_; std::string err_str_;
...@@ -79,7 +62,7 @@ struct EnforceNotMet : public std::exception { ...@@ -79,7 +62,7 @@ struct EnforceNotMet : public std::exception {
Dl_info info; Dl_info info;
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i) {
if (dladdr(call_stack[i], &info)) { if (dladdr(call_stack[i], &info)) {
auto demangled = demangle(info.dli_sname); auto demangled = info.dli_sname;
auto addr_offset = static_cast<char*>(call_stack[i]) - auto addr_offset = static_cast<char*>(call_stack[i]) -
static_cast<char*>(info.dli_saddr); static_cast<char*>(info.dli_saddr);
sout << string::Sprintf("%-3d %*0p %s + %zd\n", i, sout << string::Sprintf("%-3d %*0p %s + %zd\n", i,
......
...@@ -17,6 +17,7 @@ limitations under the License. */ ...@@ -17,6 +17,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/framework/backward.h" #include "paddle/framework/backward.h"
#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
#include "paddle/operators/recurrent_op.h" #include "paddle/operators/recurrent_op.h"
...@@ -30,7 +31,7 @@ limitations under the License. */ ...@@ -30,7 +31,7 @@ limitations under the License. */
namespace py = pybind11; namespace py = pybind11;
USE_OP(add_two); USE_OP(add);
USE_OP(onehot_cross_entropy); USE_OP(onehot_cross_entropy);
USE_OP(sgd); USE_OP(sgd);
USE_OP(mul); USE_OP(mul);
...@@ -49,11 +50,18 @@ USE_OP(minus); ...@@ -49,11 +50,18 @@ USE_OP(minus);
USE_OP(cos_sim); USE_OP(cos_sim);
USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(gather);
USE_CPU_ONLY_OP(scatter); USE_CPU_ONLY_OP(scatter);
USE_CPU_ONLY_OP(concat);
USE_OP(top_k);
USE_OP(squared_l2_distance);
USE_OP(sum);
USE_OP(reshape);
namespace paddle { namespace paddle {
namespace framework { namespace framework {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using LoD = framework::LoD;
static size_t UniqueIntegerGenerator() { static size_t UniqueIntegerGenerator() {
static std::atomic<size_t> generator; static std::atomic<size_t> generator;
...@@ -113,6 +121,60 @@ PYBIND11_PLUGIN(core) { ...@@ -113,6 +121,60 @@ PYBIND11_PLUGIN(core) {
return self.data<float>()[offset]; return self.data<float>()[offset];
}); });
py::class_<LoDTensor>(m, "LoDTensor", R"DOC(LoD(Leval of Ddetails) Tensor.
The tensor and LoD info should be created before creating the LoDTensor, then
call the set_tensor and set_lod functions to set them.
)DOC")
.def("__init__",
[](LoDTensor &instance,
const std::vector<std::vector<size_t>> &lod,
Tensor *t) {
#ifdef PADDLE_ONLY_CPU
new (&instance) LoDTensor(lod, t);
#else
paddle::framework::LoD new_lod;
new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
new (&instance) LoDTensor(new_lod, t);
#endif
})
.def("set_tensor",
[](LoDTensor &self, Tensor *tensor) { self.set_tensor(tensor); })
.def("set_lod",
[](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
#ifdef PADDLE_ONLY_CPU
self.set_lod(lod);
#else
paddle::framework::LoD new_lod;
new_lod.reserve(lod.size());
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
self.set_lod(new_lod);
#endif
})
.def("tensor",
[](LoDTensor &self) -> Tensor & { return self.tensor(); },
py::return_value_policy::reference)
.def("lod", [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
#ifdef PADDLE_ONLY_CPU
return self.lod();
#else
auto lod = self.lod();
std::vector<std::vector<size_t>> new_lod;
new_lod.reserve(lod.size());
std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod),
[](paddle::framework::Vector<size_t> item) ->
std::vector<size_t> {
std::vector<size_t> v;
v.reserve(item.size());
std::copy(item.begin(), item.end(), std::back_inserter(v));
return v;
});
return new_lod;
#endif
});
py::class_<Variable>(m, "Variable", R"DOC(Variable Class. py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
All parameter, weight, gradient are variables in Paddle. All parameter, weight, gradient are variables in Paddle.
...@@ -124,6 +186,11 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -124,6 +186,11 @@ All parameter, weight, gradient are variables in Paddle.
.def("get_tensor", .def("get_tensor",
[](Variable &self) -> Tensor * { return self.GetMutable<Tensor>(); }, [](Variable &self) -> Tensor * { return self.GetMutable<Tensor>(); },
py::return_value_policy::reference) py::return_value_policy::reference)
.def("get_lod_tensor",
[](Variable &self) -> LoDTensor * {
return self.GetMutable<LoDTensor>();
},
py::return_value_policy::reference)
.def("get_net", .def("get_net",
[](Variable &self) -> operators::NetOp * { [](Variable &self) -> operators::NetOp * {
return self.GetMutable<operators::NetOp>(); return self.GetMutable<operators::NetOp>();
...@@ -214,7 +281,10 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -214,7 +281,10 @@ All parameter, weight, gradient are variables in Paddle.
-> std::map<std::string, std::vector<std::string>> { -> std::map<std::string, std::vector<std::string>> {
return op.Outputs(); return op.Outputs();
}) })
.def("output_vars",
[](const OperatorBase &op) { return op.OutputVars(true); })
.def("inputs", [](const OperatorBase &op) { return op.Inputs(); }) .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
.def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
.def("__str__", &OperatorBase::DebugString) .def("__str__", &OperatorBase::DebugString)
.def("no_intermediate_outputs", .def("no_intermediate_outputs",
[](const OperatorBase &op) { return op.OutputVars(false); }) [](const OperatorBase &op) { return op.OutputVars(false); })
......
...@@ -30,6 +30,8 @@ Configuring cmake in /paddle/build ... ...@@ -30,6 +30,8 @@ Configuring cmake in /paddle/build ...
-DCMAKE_BUILD_TYPE=Release -DCMAKE_BUILD_TYPE=Release
-DWITH_DOC=OFF -DWITH_DOC=OFF
-DWITH_GPU=${WITH_GPU:-OFF} -DWITH_GPU=${WITH_GPU:-OFF}
-DWITH_MKLDNN=${WITH_MKLDNN:-ON}
-DWITH_MKLML=${WITH_MKLML:-ON}
-DWITH_AVX=${WITH_AVX:-OFF} -DWITH_AVX=${WITH_AVX:-OFF}
-DWITH_GOLANG=${WITH_GOLANG:-ON} -DWITH_GOLANG=${WITH_GOLANG:-ON}
-DWITH_SWIG_PY=ON -DWITH_SWIG_PY=ON
...@@ -37,7 +39,7 @@ Configuring cmake in /paddle/build ... ...@@ -37,7 +39,7 @@ Configuring cmake in /paddle/build ...
-DWITH_PYTHON=${WITH_PYTHON:-ON} -DWITH_PYTHON=${WITH_PYTHON:-ON}
-DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON}
-DCUDNN_ROOT=/usr/ -DCUDNN_ROOT=/usr/
-DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF} -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-ON}
-DWITH_TESTING=${WITH_TESTING:-ON} -DWITH_TESTING=${WITH_TESTING:-ON}
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
======================================== ========================================
...@@ -50,6 +52,8 @@ cmake .. \ ...@@ -50,6 +52,8 @@ cmake .. \
-DCMAKE_BUILD_TYPE=Release \ -DCMAKE_BUILD_TYPE=Release \
-DWITH_DOC=OFF \ -DWITH_DOC=OFF \
-DWITH_GPU=${WITH_GPU:-OFF} \ -DWITH_GPU=${WITH_GPU:-OFF} \
-DWITH_MKLDNN=${WITH_MKLDNN:-ON} \
-DWITH_MKLML=${WITH_MKLML:-ON} \
-DWITH_AVX=${WITH_AVX:-OFF} \ -DWITH_AVX=${WITH_AVX:-OFF} \
-DWITH_GOLANG=${WITH_GOLANG:-ON} \ -DWITH_GOLANG=${WITH_GOLANG:-ON} \
-DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} \ -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} \
......
...@@ -2,22 +2,58 @@ ...@@ -2,22 +2,58 @@
set -xe set -xe
mkdir -p /paddle/build_android BUILD_ROOT=/paddle/build_android
cd /paddle/build_android DEST_ROOT=/paddle/install
rm -rf /paddle/install 2>/dev/null || true
cmake -DCMAKE_SYSTEM_NAME=Android \ rm -rf $BUILD_ROOT 2>/dev/null || true
-DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_STANDALONE_TOOLCHAIN \ mkdir -p $BUILD_ROOT
-DANDROID_ABI=armeabi-v7a \ cd $BUILD_ROOT
-DANDROID_ARM_NEON=ON \
-DANDROID_ARM_MODE=ON \ if [ $ANDROID_ABI == "armeabi-v7a" ]; then
-DHOST_C_COMPILER=/usr/bin/gcc \ cmake -DCMAKE_SYSTEM_NAME=Android \
-DHOST_CXX_COMPILER=/usr/bin/g++ \ -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM_STANDALONE_TOOLCHAIN \
-DCMAKE_INSTALL_PREFIX=/paddle/install \ -DANDROID_ABI=$ANDROID_ABI \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \ -DANDROID_ARM_NEON=ON \
-DCMAKE_C_FLAGS_RELWITHDEBINFO="-O3" \ -DANDROID_ARM_MODE=ON \
-DCMAKE_CXX_FLAGS_RELWITHDEBINFO="-O3" \ -DHOST_C_COMPILER=/usr/bin/gcc \
-DWITH_C_API=ON \ -DHOST_CXX_COMPILER=/usr/bin/g++ \
-DWITH_SWIG_PY=OFF \ -DCMAKE_INSTALL_PREFIX=$DEST_ROOT \
.. -DCMAKE_BUILD_TYPE=Release \
-DUSE_EIGEN_FOR_BLAS=ON \
-DWITH_C_API=ON \
-DWITH_SWIG_PY=OFF \
-DWITH_STYLE_CHECK=OFF \
..
elif [ $ANDROID_ABI == "arm64-v8a" ]; then
cmake -DCMAKE_SYSTEM_NAME=Android \
-DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM64_STANDALONE_TOOLCHAIN \
-DANDROID_ABI=$ANDROID_ABI \
-DANDROID_ARM_MODE=ON \
-DHOST_C_COMPILER=/usr/bin/gcc \
-DHOST_CXX_COMPILER=/usr/bin/g++ \
-DCMAKE_INSTALL_PREFIX=$DEST_ROOT \
-DCMAKE_BUILD_TYPE=Release \
-DUSE_EIGEN_FOR_BLAS=OFF \
-DWITH_C_API=ON \
-DWITH_SWIG_PY=OFF \
-DWITH_STYLE_CHECK=OFF \
..
elif [ $ANDROID_ABI == "armeabi" ]; then
cmake -DCMAKE_SYSTEM_NAME=Android \
-DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM_STANDALONE_TOOLCHAIN \
-DANDROID_ABI=$ANDROID_ABI \
-DANDROID_ARM_MODE=ON \
-DHOST_C_COMPILER=/usr/bin/gcc \
-DHOST_CXX_COMPILER=/usr/bin/g++ \
-DCMAKE_INSTALL_PREFIX=/paddle/install \
-DCMAKE_BUILD_TYPE=Release \
-DWITH_C_API=ON \
-DWITH_SWIG_PY=OFF \
-DWITH_STYLE_CHECK=OFF \
..
else
echo "Invalid ANDROID_ABI: $ANDROID_ABI"
fi
make -j `nproc` make -j `nproc`
make install -j `nproc` make install -j `nproc`
...@@ -22,6 +22,7 @@ cmake -DCMAKE_SYSTEM_NAME=Android \ ...@@ -22,6 +22,7 @@ cmake -DCMAKE_SYSTEM_NAME=Android \
-DANDROID_ABI=armeabi-v7a \ -DANDROID_ABI=armeabi-v7a \
-DANDROID_ARM_NEON=ON \ -DANDROID_ARM_NEON=ON \
-DANDROID_ARM_MODE=ON \ -DANDROID_ARM_MODE=ON \
-DUSE_EIGEN_FOR_BLAS=ON \
-DWITH_C_API=ON \ -DWITH_C_API=ON \
-DWITH_SWIG_PY=OFF \ -DWITH_SWIG_PY=OFF \
-DWITH_STYLE_CHECK=OFF \ -DWITH_STYLE_CHECK=OFF \
......
...@@ -320,6 +320,9 @@ void loadFileList(const std::string& fileListFileName, ...@@ -320,6 +320,9 @@ void loadFileList(const std::string& fileListFileName,
} }
double getMemoryUsage() { double getMemoryUsage() {
#if defined(__ANDROID__)
return 0.0;
#else
FILE* fp = fopen("/proc/meminfo", "r"); FILE* fp = fopen("/proc/meminfo", "r");
CHECK(fp) << "failed to fopen /proc/meminfo"; CHECK(fp) << "failed to fopen /proc/meminfo";
size_t bufsize = 256 * sizeof(char); size_t bufsize = 256 * sizeof(char);
...@@ -357,6 +360,7 @@ double getMemoryUsage() { ...@@ -357,6 +360,7 @@ double getMemoryUsage() {
delete[] buf; delete[] buf;
double usedMem = 1.0 - 1.0 * (freeMem + bufMem + cacheMem) / totalMem; double usedMem = 1.0 - 1.0 * (freeMem + bufMem + cacheMem) / totalMem;
return usedMem; return usedMem;
#endif
} }
SyncThreadPool* getGlobalSyncThreadPool() { SyncThreadPool* getGlobalSyncThreadPool() {
......
...@@ -33,6 +33,13 @@ limitations under the License. */ ...@@ -33,6 +33,13 @@ limitations under the License. */
#include "Flags.h" #include "Flags.h"
#include "hl_gpu.h" #include "hl_gpu.h"
#if defined(__ANDROID__) && (__ANDROID_API__ < 21)
inline int rand_r(unsigned int* seedp) {
(void)seedp;
return rand();
}
#endif
/** /**
* Loop over the elements in a container * Loop over the elements in a container
* TODO(yuyang18): It's this foreach useful? Why not use C++ 11 foreach, * TODO(yuyang18): It's this foreach useful? Why not use C++ 11 foreach,
......
...@@ -271,6 +271,7 @@ message ImageConfig { ...@@ -271,6 +271,7 @@ message ImageConfig {
// The size of input feature map. // The size of input feature map.
required uint32 img_size = 8; required uint32 img_size = 8;
optional uint32 img_size_y = 9; optional uint32 img_size_y = 9;
optional uint32 img_size_z = 10 [ default = 1 ];
} }
message PriorBoxConfig { message PriorBoxConfig {
...@@ -287,6 +288,11 @@ message PadConfig { ...@@ -287,6 +288,11 @@ message PadConfig {
repeated uint32 pad_w = 4; repeated uint32 pad_w = 4;
} }
message ReshapeConfig {
repeated uint32 height_axis = 1;
repeated uint32 width_axis = 2;
}
message MultiBoxLossConfig { message MultiBoxLossConfig {
required uint32 num_classes = 1; required uint32 num_classes = 1;
required float overlap_threshold = 2; required float overlap_threshold = 2;
...@@ -339,7 +345,6 @@ message LayerInputConfig { ...@@ -339,7 +345,6 @@ message LayerInputConfig {
} }
message LayerConfig { message LayerConfig {
required string name = 1; required string name = 1;
required string type = 2; required string type = 2;
optional uint64 size = 3; optional uint64 size = 3;
...@@ -515,7 +520,11 @@ message LayerConfig { ...@@ -515,7 +520,11 @@ message LayerConfig {
// for HuberRegressionLoss // for HuberRegressionLoss
optional double delta = 57 [ default = 1.0 ]; optional double delta = 57 [ default = 1.0 ];
// for 3D data
optional uint64 depth = 58 [ default = 1 ]; optional uint64 depth = 58 [ default = 1 ];
// for switch order layer
optional ReshapeConfig reshape_conf = 59;
} }
message EvaluatorConfig { message EvaluatorConfig {
......
...@@ -1332,6 +1332,12 @@ def parse_image(image, input_layer_name, image_conf): ...@@ -1332,6 +1332,12 @@ def parse_image(image, input_layer_name, image_conf):
get_img_size(input_layer_name, image_conf.channels) get_img_size(input_layer_name, image_conf.channels)
def parse_image3d(image, input_layer_name, image_conf):
image_conf.channels = image.channels
image_conf.img_size, image_conf.img_size_y, image_conf.img_size_z = \
get_img3d_size(input_layer_name, image_conf.channels)
def parse_norm(norm, input_layer_name, norm_conf): def parse_norm(norm, input_layer_name, norm_conf):
norm_conf.norm_type = norm.norm_type norm_conf.norm_type = norm.norm_type
config_assert( config_assert(
...@@ -2028,6 +2034,7 @@ class ParameterReluLayer(LayerBase): ...@@ -2028,6 +2034,7 @@ class ParameterReluLayer(LayerBase):
config_assert(input_layer.size % partial_sum == 0, config_assert(input_layer.size % partial_sum == 0,
"a wrong setting for partial_sum") "a wrong setting for partial_sum")
self.set_layer_size(input_layer.size) self.set_layer_size(input_layer.size)
self.config.partial_sum = partial_sum
self.create_input_parameter(0, input_layer.size / partial_sum) self.create_input_parameter(0, input_layer.size / partial_sum)
...@@ -2365,9 +2372,11 @@ class BatchNormLayer(LayerBase): ...@@ -2365,9 +2372,11 @@ class BatchNormLayer(LayerBase):
name, name,
inputs, inputs,
bias=True, bias=True,
img3D=False,
use_global_stats=True, use_global_stats=True,
moving_average_fraction=0.9, moving_average_fraction=0.9,
batch_norm_type=None, batch_norm_type=None,
mean_var_names=None,
**xargs): **xargs):
if inputs is None: if inputs is None:
inputs = [] inputs = []
...@@ -2409,24 +2418,69 @@ class BatchNormLayer(LayerBase): ...@@ -2409,24 +2418,69 @@ class BatchNormLayer(LayerBase):
input_layer = self.get_input_layer(0) input_layer = self.get_input_layer(0)
image_conf = self.config.inputs[0].image_conf image_conf = self.config.inputs[0].image_conf
parse_image(self.inputs[0].image, input_layer.name, image_conf) if img3D:
parse_image3d(self.inputs[0].image, input_layer.name, image_conf)
# Only pass the width and height of input to batch_norm layer # Only pass the width and height of input to batch_norm layer
# when either of it is non-zero. # when either of it is non-zero.
if input_layer.width != 0 or input_layer.height != 0: if input_layer.width != 0 or input_layer.height != 0:
self.set_cnn_layer(name, image_conf.img_size_y, image_conf.img_size, self.set_cnn_layer(
image_conf.channels, False) input_layer_name=name,
depth=image_conf.img_size_z,
height=image_conf.img_size_y,
width=image_conf.img_size,
channels=image_conf.channels,
is_print=True)
else:
self.set_layer_size(input_layer.size)
else: else:
self.set_layer_size(input_layer.size) parse_image(self.inputs[0].image, input_layer.name, image_conf)
# Only pass the width and height of input to batch_norm layer
# when either of it is non-zero.
if input_layer.width != 0 or input_layer.height != 0:
self.set_cnn_layer(
input_layer_name=name,
height=image_conf.img_size_y,
width=image_conf.img_size,
channels=image_conf.channels,
is_print=True)
else:
self.set_layer_size(input_layer.size)
psize = self.calc_parameter_size(image_conf) psize = self.calc_parameter_size(image_conf)
dims = [1, psize] dims = [1, psize]
if mean_var_names is not None:
assert len(mean_var_names) == 2
self.inputs[1].parameter_name = mean_var_names[0]
self.inputs[2].parameter_name = mean_var_names[1]
self.create_input_parameter(0, psize) self.create_input_parameter(0, psize)
self.create_input_parameter(1, psize, dims) self.create_input_parameter(1, psize, dims)
self.create_input_parameter(2, psize, dims) self.create_input_parameter(2, psize, dims)
self.create_bias_parameter(bias, psize) self.create_bias_parameter(bias, psize)
def set_cnn_layer(self,
input_layer_name,
depth=None,
height=None,
width=None,
channels=None,
is_print=True):
depthIsNone = False
if depth is None:
depth = 1
depthIsNone = True
size = depth * height * width * channels
self.set_layer_size(size)
self.set_layer_height_width(height, width)
self.set_layer_depth(depth)
if is_print and depthIsNone:
print("output for %s: c = %d, h = %d, w = %d, size = %d" %
(input_layer_name, channels, height, width, size))
elif is_print:
print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" %
(input_layer_name, channels, depth, height, width, size))
def calc_parameter_size(self, image_conf): def calc_parameter_size(self, image_conf):
return image_conf.channels return image_conf.channels
...@@ -2688,9 +2742,20 @@ class AddToLayer(LayerBase): ...@@ -2688,9 +2742,20 @@ class AddToLayer(LayerBase):
super(AddToLayer, self).__init__( super(AddToLayer, self).__init__(
name, 'addto', 0, inputs=inputs, **xargs) name, 'addto', 0, inputs=inputs, **xargs)
config_assert(len(inputs) > 0, 'inputs cannot be empty for AddToLayer') config_assert(len(inputs) > 0, 'inputs cannot be empty for AddToLayer')
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index) if len(self.inputs) > 1:
self.set_layer_size(input_layer.size) for input_index in xrange(len(self.inputs)):
assert self.get_input_layer(0).height == self.get_input_layer(
input_index).height
assert self.get_input_layer(0).width == self.get_input_layer(
input_index).width
assert self.get_input_layer(0).depth == self.get_input_layer(
input_index).depth
self.set_layer_size(self.get_input_layer(0).size)
self.set_layer_height_width(self.get_input_layer(0).height, \
self.get_input_layer(0).width)
self.set_layer_depth(self.get_input_layer(0).depth)
self.create_bias_parameter(bias, self.config.size) self.create_bias_parameter(bias, self.config.size)
...@@ -3370,11 +3435,20 @@ class ConcatenateLayer(LayerBase): ...@@ -3370,11 +3435,20 @@ class ConcatenateLayer(LayerBase):
name, 'concat', 0, inputs=inputs, **xargs) name, 'concat', 0, inputs=inputs, **xargs)
size = 0 size = 0
for input_index in xrange(len(self.inputs)): for input_index in xrange(len(self.inputs)):
assert self.get_input_layer(0).height == self.get_input_layer(
input_index).height
assert self.get_input_layer(0).width == self.get_input_layer(
input_index).width
assert self.get_input_layer(0).depth == self.get_input_layer(
input_index).depth
input_layer = self.get_input_layer(input_index) input_layer = self.get_input_layer(input_index)
input = self.inputs[input_index] input = self.inputs[input_index]
if self.config.size == 0: if self.config.size == 0:
size += input_layer.size size += input_layer.size
self.set_layer_height_width(self.get_input_layer(0).height, \
self.get_input_layer(0).width)
self.set_layer_depth(self.get_input_layer(0).depth)
self.set_layer_size(size) self.set_layer_size(size)
...@@ -3670,6 +3744,15 @@ class RecurrentLayerGroup(LayerBase): ...@@ -3670,6 +3744,15 @@ class RecurrentLayerGroup(LayerBase):
name, 'recurrent_layer_group', 0, inputs=[], device=device) name, 'recurrent_layer_group', 0, inputs=[], device=device)
@config_layer('switch_order')
class SwitchOrderLayer(LayerBase):
def __init__(self, name, inputs, reshape, **xargs):
super(SwitchOrderLayer, self).__init__(
name, 'switch_order', 0, inputs=inputs, **xargs)
self.config.reshape_conf.height_axis.extend(reshape['height'])
self.config.reshape_conf.width_axis.extend(reshape['width'])
# Deprecated, use a new layer specific class instead # Deprecated, use a new layer specific class instead
@config_func @config_func
def Layer(name, type, **xargs): def Layer(name, type, **xargs):
......
...@@ -131,6 +131,7 @@ __all__ = [ ...@@ -131,6 +131,7 @@ __all__ = [
'row_conv_layer', 'row_conv_layer',
'dropout_layer', 'dropout_layer',
'prelu_layer', 'prelu_layer',
'switch_order_layer',
'gated_unit_layer', 'gated_unit_layer',
'crop_layer', 'crop_layer',
'sub_nested_seq_layer', 'sub_nested_seq_layer',
...@@ -239,6 +240,7 @@ class LayerType(object): ...@@ -239,6 +240,7 @@ class LayerType(object):
SMOOTH_L1 = 'smooth_l1' SMOOTH_L1 = 'smooth_l1'
PRELU = 'prelu' PRELU = 'prelu'
SWITCH_ORDER_LAYER = 'switch_order'
CROP_LAYER = 'crop' CROP_LAYER = 'crop'
SUB_NESTED_SEQ = 'sub_nested_seq' SUB_NESTED_SEQ = 'sub_nested_seq'
CLIP_LAYER = 'clip' CLIP_LAYER = 'clip'
...@@ -352,6 +354,10 @@ class LayerOutput(object): ...@@ -352,6 +354,10 @@ class LayerOutput(object):
def height(self): def height(self):
return cp.g_layer_map[self.full_name].height return cp.g_layer_map[self.full_name].height
@property
def depth(self):
return cp.g_layer_map[self.full_name].depth
def set_input(self, input): def set_input(self, input):
""" """
Set the input for a memory layer. Can only be used for memory layer Set the input for a memory layer. Can only be used for memory layer
...@@ -941,7 +947,7 @@ def data_layer(name, size, depth=None, height=None, width=None, ...@@ -941,7 +947,7 @@ def data_layer(name, size, depth=None, height=None, width=None,
if height is not None and width is not None: if height is not None and width is not None:
num_filters = size / (width * height * depth) num_filters = size / (width * height * depth)
assert num_filters * width * height * depth == size, \ assert num_filters * width * height * depth == size, \
"size=%s width=%s height=%s depth=%s" % (size, width, height, depth) "size=%s width=%s height=%s depth=%s" % (size, width, height, depth)
return LayerOutput(name, LayerType.DATA, size=size, num_filters=num_filters) return LayerOutput(name, LayerType.DATA, size=size, num_filters=num_filters)
...@@ -1217,7 +1223,8 @@ def detection_output_layer(input_loc, ...@@ -1217,7 +1223,8 @@ def detection_output_layer(input_loc,
name=None): name=None):
""" """
Apply the NMS to the output of network and compute the predict bounding Apply the NMS to the output of network and compute the predict bounding
box location. box location. The output of this layer could be None if there is no valid
bounding box.
:param name: The Layer Name. :param name: The Layer Name.
:type name: basestring :type name: basestring
...@@ -2951,13 +2958,15 @@ def img_cmrnorm_layer(input, ...@@ -2951,13 +2958,15 @@ def img_cmrnorm_layer(input,
def batch_norm_layer(input, def batch_norm_layer(input,
act=None, act=None,
name=None, name=None,
img3D=False,
num_channels=None, num_channels=None,
bias_attr=None, bias_attr=None,
param_attr=None, param_attr=None,
layer_attr=None, layer_attr=None,
batch_norm_type=None, batch_norm_type=None,
moving_average_fraction=0.9, moving_average_fraction=0.9,
use_global_stats=None): use_global_stats=None,
mean_var_names=None):
""" """
Batch Normalization Layer. The notation of this layer as follow. Batch Normalization Layer. The notation of this layer as follow.
...@@ -3024,6 +3033,8 @@ def batch_norm_layer(input, ...@@ -3024,6 +3033,8 @@ def batch_norm_layer(input,
:math:`runningMean = newMean*(1-factor) :math:`runningMean = newMean*(1-factor)
+ runningMean*factor` + runningMean*factor`
:type moving_average_fraction: float. :type moving_average_fraction: float.
:param mean_var_names: [mean name, variance name]
:type mean_var_names: string list
:return: LayerOutput object. :return: LayerOutput object.
:rtype: LayerOutput :rtype: LayerOutput
""" """
...@@ -3037,6 +3048,7 @@ def batch_norm_layer(input, ...@@ -3037,6 +3048,7 @@ def batch_norm_layer(input,
(batch_norm_type == "cudnn_batch_norm") (batch_norm_type == "cudnn_batch_norm")
l = Layer( l = Layer(
name=name, name=name,
img3D=img3D,
inputs=Input( inputs=Input(
input.name, image=Image(channels=num_channels), **param_attr.attr), input.name, image=Image(channels=num_channels), **param_attr.attr),
active_type=act.name, active_type=act.name,
...@@ -3045,6 +3057,7 @@ def batch_norm_layer(input, ...@@ -3045,6 +3057,7 @@ def batch_norm_layer(input,
bias=ParamAttr.to_bias(bias_attr), bias=ParamAttr.to_bias(bias_attr),
moving_average_fraction=moving_average_fraction, moving_average_fraction=moving_average_fraction,
use_global_stats=use_global_stats, use_global_stats=use_global_stats,
mean_var_names=mean_var_names,
**ExtraLayerAttribute.to_kwargs(layer_attr)) **ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput( return LayerOutput(
...@@ -6404,6 +6417,55 @@ def gated_unit_layer(input, ...@@ -6404,6 +6417,55 @@ def gated_unit_layer(input,
layer_attr=layer_attr) layer_attr=layer_attr)
@layer_support()
@wrap_name_default('switch_order')
def switch_order_layer(input,
name=None,
reshape_axis=None,
act=None,
layer_attr=None):
"""
This layer switch dimension order of image input.
From order "batchSize, channels, height, width"
to order "batchSize, height, width, channels".
The example usage is:
.. code-block:: python
reshape_axis = 3
switch = switch_order(input=layer, name='switch', reshape_axis=reshape_axis)
reshape = {'height':[ 0, 1, 2], 'width':[3]}
:param input: The input layer.
:type input: LayerOutput
:param name: Name of this layer.
:type name: basestring
:param reshape: reshape matrix by axises.
:type reshape: Dict
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, LayerOutput)
assert reshape_axis != None and (reshape_axis > 0 and reshape_axis < 4)
height = [ele for ele in xrange(reshape_axis)]
width = [ele for ele in range(reshape_axis, 4)]
reshape = {'height': height, 'width': width}
l = Layer(
name=name,
inputs=input.name,
reshape=reshape,
type=LayerType.SWITCH_ORDER_LAYER,
active_type=act.name,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.SWITCH_ORDER_LAYER,
activation=act,
parents=input,
size=l.config.size)
@wrap_name_default() @wrap_name_default()
@layer_support() @layer_support()
def crop_layer(input, offset, axis=2, shape=None, name=None, layer_attr=None): def crop_layer(input, offset, axis=2, shape=None, name=None, layer_attr=None):
......
...@@ -10,6 +10,6 @@ test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_la ...@@ -10,6 +10,6 @@ test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_la
test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer
test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer
test_seq_slice_layer test_cross_entropy_over_beam test_pooling3D_layer test_seq_slice_layer test_cross_entropy_over_beam test_pooling3D_layer
test_conv3d_layer test_deconv3d_layer) test_conv3d_layer test_deconv3d_layer test_BatchNorm3D)
export whole_configs=(test_split_datasource) export whole_configs=(test_split_datasource)
...@@ -62,6 +62,7 @@ layers { ...@@ -62,6 +62,7 @@ layers {
moving_average_fraction: 0.9 moving_average_fraction: 0.9
height: 227 height: 227
width: 227 width: 227
depth: 1
} }
layers { layers {
name: "__crmnorm_0__" name: "__crmnorm_0__"
......
...@@ -62,6 +62,7 @@ layers { ...@@ -62,6 +62,7 @@ layers {
moving_average_fraction: 0.9 moving_average_fraction: 0.9
height: 256 height: 256
width: 256 width: 256
depth: 1
} }
layers { layers {
name: "__crmnorm_0__" name: "__crmnorm_0__"
......
type: "nn"
layers {
name: "data3D"
type: "data"
size: 360
active_type: ""
height: 6
width: 20
depth: 3
}
layers {
name: "__batch_norm_0__"
type: "batch_norm"
size: 360
active_type: "relu"
inputs {
input_layer_name: "data3D"
input_parameter_name: "___batch_norm_0__.w0"
image_conf {
channels: 1
img_size: 20
img_size_y: 6
img_size_z: 3
}
}
inputs {
input_layer_name: "data3D"
input_parameter_name: "___batch_norm_0__.w1"
}
inputs {
input_layer_name: "data3D"
input_parameter_name: "___batch_norm_0__.w2"
}
bias_parameter_name: "___batch_norm_0__.wbias"
moving_average_fraction: 0.9
height: 6
width: 20
depth: 3
}
parameters {
name: "___batch_norm_0__.w0"
size: 1
initial_mean: 1.0
initial_std: 0.0
initial_strategy: 0
initial_smart: false
}
parameters {
name: "___batch_norm_0__.w1"
size: 1
initial_mean: 0.0
initial_std: 0.0
dims: 1
dims: 1
initial_strategy: 0
initial_smart: false
is_static: true
is_shared: true
}
parameters {
name: "___batch_norm_0__.w2"
size: 1
initial_mean: 0.0
initial_std: 0.0
dims: 1
dims: 1
initial_strategy: 0
initial_smart: false
is_static: true
is_shared: true
}
parameters {
name: "___batch_norm_0__.wbias"
size: 1
initial_mean: 0.0
initial_std: 0.0
dims: 1
dims: 1
initial_strategy: 0
initial_smart: false
}
input_layer_names: "data3D"
output_layer_names: "__batch_norm_0__"
sub_models {
name: "root"
layer_names: "data3D"
layer_names: "__batch_norm_0__"
input_layer_names: "data3D"
output_layer_names: "__batch_norm_0__"
is_recurrent_layer_group: false
}
...@@ -74,6 +74,9 @@ layers { ...@@ -74,6 +74,9 @@ layers {
inputs { inputs {
input_layer_name: "__bidirectional_gru_0___bw" input_layer_name: "__bidirectional_gru_0___bw"
} }
height: 0
width: 0
depth: 1
} }
parameters { parameters {
name: "___bidirectional_gru_0___fw_transform.w0" name: "___bidirectional_gru_0___fw_transform.w0"
......
...@@ -14,6 +14,29 @@ layers { ...@@ -14,6 +14,29 @@ layers {
input_layer_name: "input" input_layer_name: "input"
input_parameter_name: "___prelu_layer_0__.w0" input_parameter_name: "___prelu_layer_0__.w0"
} }
partial_sum: 1
}
layers {
name: "__prelu_layer_1__"
type: "prelu"
size: 300
active_type: ""
inputs {
input_layer_name: "input"
input_parameter_name: "___prelu_layer_1__.w0"
}
partial_sum: 1
}
layers {
name: "__prelu_layer_2__"
type: "prelu"
size: 300
active_type: ""
inputs {
input_layer_name: "input"
input_parameter_name: "___prelu_layer_2__.w0"
}
partial_sum: 5
} }
parameters { parameters {
name: "___prelu_layer_0__.w0" name: "___prelu_layer_0__.w0"
...@@ -23,14 +46,32 @@ parameters { ...@@ -23,14 +46,32 @@ parameters {
initial_strategy: 0 initial_strategy: 0
initial_smart: true initial_smart: true
} }
parameters {
name: "___prelu_layer_1__.w0"
size: 300
initial_mean: 0.0
initial_std: 0.057735026919
initial_strategy: 0
initial_smart: true
}
parameters {
name: "___prelu_layer_2__.w0"
size: 60
initial_mean: 0.0
initial_std: 0.129099444874
initial_strategy: 0
initial_smart: true
}
input_layer_names: "input" input_layer_names: "input"
output_layer_names: "__prelu_layer_0__" output_layer_names: "__prelu_layer_2__"
sub_models { sub_models {
name: "root" name: "root"
layer_names: "input" layer_names: "input"
layer_names: "__prelu_layer_0__" layer_names: "__prelu_layer_0__"
layer_names: "__prelu_layer_1__"
layer_names: "__prelu_layer_2__"
input_layer_names: "input" input_layer_names: "input"
output_layer_names: "__prelu_layer_0__" output_layer_names: "__prelu_layer_2__"
is_recurrent_layer_group: false is_recurrent_layer_group: false
} }
...@@ -16,6 +16,9 @@ layers { ...@@ -16,6 +16,9 @@ layers {
inputs { inputs {
input_layer_name: "data" input_layer_name: "data"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_1__" name: "__addto_1__"
...@@ -28,6 +31,9 @@ layers { ...@@ -28,6 +31,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_0__" input_layer_name: "__addto_0__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_2__" name: "__addto_2__"
...@@ -40,6 +46,9 @@ layers { ...@@ -40,6 +46,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_1__" input_layer_name: "__addto_1__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_3__" name: "__addto_3__"
...@@ -52,6 +61,9 @@ layers { ...@@ -52,6 +61,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_2__" input_layer_name: "__addto_2__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_4__" name: "__addto_4__"
...@@ -64,6 +76,9 @@ layers { ...@@ -64,6 +76,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_3__" input_layer_name: "__addto_3__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_5__" name: "__addto_5__"
...@@ -76,6 +91,9 @@ layers { ...@@ -76,6 +91,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_4__" input_layer_name: "__addto_4__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_6__" name: "__addto_6__"
...@@ -88,6 +106,9 @@ layers { ...@@ -88,6 +106,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_5__" input_layer_name: "__addto_5__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_7__" name: "__addto_7__"
...@@ -100,6 +121,9 @@ layers { ...@@ -100,6 +121,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_6__" input_layer_name: "__addto_6__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_8__" name: "__addto_8__"
...@@ -112,6 +136,9 @@ layers { ...@@ -112,6 +136,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_7__" input_layer_name: "__addto_7__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_9__" name: "__addto_9__"
...@@ -124,6 +151,9 @@ layers { ...@@ -124,6 +151,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_8__" input_layer_name: "__addto_8__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_10__" name: "__addto_10__"
...@@ -136,6 +166,9 @@ layers { ...@@ -136,6 +166,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_9__" input_layer_name: "__addto_9__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_11__" name: "__addto_11__"
...@@ -148,6 +181,9 @@ layers { ...@@ -148,6 +181,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_10__" input_layer_name: "__addto_10__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_12__" name: "__addto_12__"
...@@ -160,6 +196,9 @@ layers { ...@@ -160,6 +196,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_11__" input_layer_name: "__addto_11__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_13__" name: "__addto_13__"
...@@ -172,6 +211,9 @@ layers { ...@@ -172,6 +211,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_12__" input_layer_name: "__addto_12__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_14__" name: "__addto_14__"
...@@ -184,6 +226,9 @@ layers { ...@@ -184,6 +226,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_13__" input_layer_name: "__addto_13__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_15__" name: "__addto_15__"
...@@ -196,6 +241,9 @@ layers { ...@@ -196,6 +241,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_14__" input_layer_name: "__addto_14__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_16__" name: "__addto_16__"
...@@ -208,6 +256,9 @@ layers { ...@@ -208,6 +256,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_15__" input_layer_name: "__addto_15__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_17__" name: "__addto_17__"
...@@ -220,6 +271,9 @@ layers { ...@@ -220,6 +271,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_16__" input_layer_name: "__addto_16__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_18__" name: "__addto_18__"
...@@ -232,6 +286,9 @@ layers { ...@@ -232,6 +286,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_17__" input_layer_name: "__addto_17__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_19__" name: "__addto_19__"
...@@ -244,6 +301,9 @@ layers { ...@@ -244,6 +301,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_18__" input_layer_name: "__addto_18__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_20__" name: "__addto_20__"
...@@ -256,6 +316,9 @@ layers { ...@@ -256,6 +316,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_19__" input_layer_name: "__addto_19__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_21__" name: "__addto_21__"
...@@ -268,6 +331,9 @@ layers { ...@@ -268,6 +331,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_20__" input_layer_name: "__addto_20__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_22__" name: "__addto_22__"
...@@ -280,6 +346,9 @@ layers { ...@@ -280,6 +346,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_21__" input_layer_name: "__addto_21__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_23__" name: "__addto_23__"
...@@ -292,6 +361,9 @@ layers { ...@@ -292,6 +361,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_22__" input_layer_name: "__addto_22__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_24__" name: "__addto_24__"
...@@ -304,6 +376,9 @@ layers { ...@@ -304,6 +376,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_23__" input_layer_name: "__addto_23__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_25__" name: "__addto_25__"
...@@ -316,6 +391,9 @@ layers { ...@@ -316,6 +391,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_24__" input_layer_name: "__addto_24__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_26__" name: "__addto_26__"
...@@ -328,6 +406,9 @@ layers { ...@@ -328,6 +406,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_25__" input_layer_name: "__addto_25__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_27__" name: "__addto_27__"
...@@ -340,6 +421,9 @@ layers { ...@@ -340,6 +421,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_26__" input_layer_name: "__addto_26__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_28__" name: "__addto_28__"
...@@ -352,6 +436,9 @@ layers { ...@@ -352,6 +436,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_27__" input_layer_name: "__addto_27__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_29__" name: "__addto_29__"
...@@ -364,6 +451,9 @@ layers { ...@@ -364,6 +451,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_28__" input_layer_name: "__addto_28__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_30__" name: "__addto_30__"
...@@ -376,6 +466,9 @@ layers { ...@@ -376,6 +466,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_29__" input_layer_name: "__addto_29__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__addto_31__" name: "__addto_31__"
...@@ -388,6 +481,9 @@ layers { ...@@ -388,6 +481,9 @@ layers {
inputs { inputs {
input_layer_name: "__addto_30__" input_layer_name: "__addto_30__"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__fc_layer_0__" name: "__fc_layer_0__"
......
...@@ -22,6 +22,9 @@ layers { ...@@ -22,6 +22,9 @@ layers {
inputs { inputs {
input_layer_name: "b" input_layer_name: "b"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__concat_0__" name: "__concat_0__"
...@@ -34,6 +37,9 @@ layers { ...@@ -34,6 +37,9 @@ layers {
inputs { inputs {
input_layer_name: "b" input_layer_name: "b"
} }
height: 0
width: 0
depth: 1
} }
layers { layers {
name: "__concat_1__" name: "__concat_1__"
......
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-4)
#data = data_layer(name='data', size=180, width=30, height=6)
#batchNorm = batch_norm_layer(data, num_channels=1)
#outputs(batchNorm)
data3D = data_layer(name='data3D', size=120 * 3, width=20, height=6, depth=3)
batchNorm3D = batch_norm_layer(data3D, num_channels=1, img3D=True)
outputs(batchNorm3D)
...@@ -2,5 +2,7 @@ from paddle.trainer_config_helpers import * ...@@ -2,5 +2,7 @@ from paddle.trainer_config_helpers import *
data = data_layer(name='input', size=300) data = data_layer(name='input', size=300)
prelu = prelu_layer(input=data) prelu = prelu_layer(input=data)
prelu = prelu_layer(input=data, partial_sum=1)
prelu = prelu_layer(input=data, partial_sum=5)
outputs(prelu) outputs(prelu)
...@@ -53,10 +53,13 @@ class BeginPass(object): ...@@ -53,10 +53,13 @@ class BeginPass(object):
class EndPass(WithMetric): class EndPass(WithMetric):
""" """
Event On One Pass Training Complete. Event On One Pass Training Complete.
To get the output of a specific layer, add "event.gm.getLayerOutputs('predict_layer')"
in your event_handler call back
""" """
def __init__(self, pass_id, evaluator): def __init__(self, pass_id, evaluator, gm):
self.pass_id = pass_id self.pass_id = pass_id
self.gm = gm
WithMetric.__init__(self, evaluator) WithMetric.__init__(self, evaluator)
...@@ -73,10 +76,13 @@ class BeginIteration(object): ...@@ -73,10 +76,13 @@ class BeginIteration(object):
class EndIteration(WithMetric): class EndIteration(WithMetric):
""" """
Event On One Batch Training Complete. Event On One Batch Training Complete.
To get the output of a specific layer, add "event.gm.getLayerOutputs('predict_layer')"
in your event_handler call back
""" """
def __init__(self, pass_id, batch_id, cost, evaluator): def __init__(self, pass_id, batch_id, cost, evaluator, gm):
self.pass_id = pass_id self.pass_id = pass_id
self.batch_id = batch_id self.batch_id = batch_id
self.cost = cost self.cost = cost
self.gm = gm
WithMetric.__init__(self, evaluator) WithMetric.__init__(self, evaluator)
...@@ -4,8 +4,8 @@ import paddle.v2.framework.proto.framework_pb2 as framework_pb2 ...@@ -4,8 +4,8 @@ import paddle.v2.framework.proto.framework_pb2 as framework_pb2
def get_all_op_protos(): def get_all_op_protos():
""" """
Get all registered op proto from Paddle C++ Get all registered op proto from PaddlePaddle C++ end.
:return: list of OpProto :return: A list of registered OpProto.
""" """
protostrs = core.get_all_op_protos() protostrs = core.get_all_op_protos()
ret_values = [] ret_values = []
...@@ -21,8 +21,8 @@ def is_str(s): ...@@ -21,8 +21,8 @@ def is_str(s):
class OpDescCreationMethod(object): class OpDescCreationMethod(object):
""" """
A Functor object to convert user input(use key word args) to OpDesc based on Convert the user's input(only keyword arguments are supported) to OpDesc
OpProto. based on the OpProto.
:param op_proto: The OpProto object. :param op_proto: The OpProto object.
:type op_proto: op_proto_pb2.OpProto :type op_proto: op_proto_pb2.OpProto
...@@ -30,27 +30,28 @@ class OpDescCreationMethod(object): ...@@ -30,27 +30,28 @@ class OpDescCreationMethod(object):
def __init__(self, op_proto): def __init__(self, op_proto):
if not isinstance(op_proto, framework_pb2.OpProto): if not isinstance(op_proto, framework_pb2.OpProto):
raise TypeError("Argument should be OpProto") raise TypeError(
"Type of op_proto should be OpProto in PaddlePaddle.")
self.__op_proto__ = op_proto self.__op_proto__ = op_proto
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
""" """
Convert user input to OpDesc. Only key-word args are supported. Convert user's input to OpDesc. Only keyword arguments are supported.
:return: OpDesc based on user input :return: The OpDesc based on user input.
:rtype: op_desc_pb2.OpDesc :rtype: op_desc_pb2.OpDesc
""" """
if len(args) != 0: if len(args) != 0:
raise ValueError("Only keyword arguments is supported by Paddle") raise ValueError("Only keyword arguments are supported.")
op_desc = framework_pb2.OpDesc() op_desc = framework_pb2.OpDesc()
for input_parameter in self.__op_proto__.inputs: for input_parameter in self.__op_proto__.inputs:
input_arguments = kwargs.get(input_parameter.name, []) input_arguments = kwargs.get(input_parameter.name, [])
if is_str(input_arguments): if is_str(input_arguments):
input_arguments = [input_arguments] input_arguments = [input_arguments]
if not input_parameter.duplicable and len(input_arguments) > 1: if not input_parameter.duplicable and len(input_arguments) > 1:
raise ValueError("Input %s only accepts one input, but give %d" raise ValueError(
% (input_parameter.name, len(input_arguments))) "Input %s expects only one input, but %d are given." %
(input_parameter.name, len(input_arguments)))
ipt = op_desc.inputs.add() ipt = op_desc.inputs.add()
ipt.parameter = input_parameter.name ipt.parameter = input_parameter.name
...@@ -63,7 +64,7 @@ class OpDescCreationMethod(object): ...@@ -63,7 +64,7 @@ class OpDescCreationMethod(object):
if not output_parameter.duplicable and len(output_arguments) > 1: if not output_parameter.duplicable and len(output_arguments) > 1:
raise ValueError( raise ValueError(
"Output %s only accepts one output, but give %d" % "Output %s expects only one output, but %d are given." %
(output_parameter.name, len(output_arguments))) (output_parameter.name, len(output_arguments)))
out = op_desc.outputs.add() out = op_desc.outputs.add()
...@@ -100,15 +101,17 @@ class OpDescCreationMethod(object): ...@@ -100,15 +101,17 @@ class OpDescCreationMethod(object):
pair.first = p[0] pair.first = p[0]
pair.second = p[1] pair.second = p[1]
else: else:
raise NotImplementedError("Not support attribute type " + raise NotImplementedError(
str(attr.type)) "A not supported attribute type: %s." % (
str(attr.type)))
return op_desc return op_desc
@staticmethod @staticmethod
def any_is_true(generator): def any_is_true(generator):
""" """
Reduce a bool array to one. If any of them is True, then return True. Reduce a boolean array to a single boolean parameter. If any element in
the array is True, this function will return True, otherwise False.
""" """
for flag in generator: for flag in generator:
if flag: if flag:
...@@ -127,7 +130,7 @@ class OpInfo(object): ...@@ -127,7 +130,7 @@ class OpInfo(object):
def create_op_creation_method(op_proto): def create_op_creation_method(op_proto):
""" """
Generate op creation method for an OpProto Generate op creation method for an OpProto.
""" """
method = OpDescCreationMethod(op_proto) method = OpDescCreationMethod(op_proto)
...@@ -138,28 +141,31 @@ def create_op_creation_method(op_proto): ...@@ -138,28 +141,31 @@ def create_op_creation_method(op_proto):
return OpInfo( return OpInfo(
method=__impl__, method=__impl__,
name=op_proto.type, name=op_proto.type,
inputs=[var.name for var in op_proto.inputs], inputs=[(var.name, var.duplicable) for var in op_proto.inputs],
outputs=[var.name for var in op_proto.outputs], outputs=[(var.name, var.duplicable) for var in op_proto.outputs],
attrs=[attr.name for attr in op_proto.attrs]) attrs=[attr.name for attr in op_proto.attrs])
class OperatorFactory(object): class OperatorFactory(object):
def __init__(self): def __init__(self):
self.op_methods = dict() self.op_methods = dict()
for op_proto in get_all_op_protos(): for op_proto in get_all_op_protos():
method = create_op_creation_method(op_proto) method = create_op_creation_method(op_proto)
self.op_methods[method.name] = method self.op_methods[method.name] = method
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
if 'type' in kwargs: if "type" in kwargs:
if len(args) != 0: if len(args) != 0:
raise ValueError("All Paddle argument should be key-word " raise ValueError(
"argument except type") "Except the argument \"type\","
t = kwargs.pop('type') "all of the other arguments should be keyword arguments.")
t = kwargs.pop("type")
else: else:
if len(args) != 1: if len(args) != 1:
raise ValueError("All Paddle argument should be key-word " raise ValueError(
"argument except type") "Except the argument \"type\","
"all of the other arguments should be keyword arguments.")
t = args[0] t = args[0]
return self.get_op_info(t).method(**kwargs) return self.get_op_info(t).method(**kwargs)
...@@ -169,13 +175,19 @@ class OperatorFactory(object): ...@@ -169,13 +175,19 @@ class OperatorFactory(object):
def get_op_info(self, t): def get_op_info(self, t):
if t not in self.op_methods: if t not in self.op_methods:
raise ValueError("operator %s is not registered", t) raise ValueError("The operator: %s is not registered." % t)
return self.op_methods.get(t) return self.op_methods.get(t)
def get_op_input_names(self, type): def get_op_input_names(self, type):
return map(lambda x: x[0], self.get_op_info(type).inputs)
def get_op_inputs(self, type):
return self.get_op_info(type).inputs return self.get_op_info(type).inputs
def get_op_output_names(self, type): def get_op_output_names(self, type):
return map(lambda x: x[0], self.get_op_info(type).outputs)
def get_op_outputs(self, type):
return self.get_op_info(type).outputs return self.get_op_info(type).outputs
def get_op_attr_names(self, type): def get_op_attr_names(self, type):
...@@ -184,7 +196,7 @@ class OperatorFactory(object): ...@@ -184,7 +196,7 @@ class OperatorFactory(object):
class __RecurrentOp__(object): class __RecurrentOp__(object):
__proto__ = None __proto__ = None
type = 'recurrent' type = "recurrent"
def __init__(self): def __init__(self):
# cache recurrent_op's proto # cache recurrent_op's proto
...@@ -194,8 +206,8 @@ class __RecurrentOp__(object): ...@@ -194,8 +206,8 @@ class __RecurrentOp__(object):
self.__proto__ = op_proto self.__proto__ = op_proto
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
if self.type not in args and 'type' not in kwargs: if self.type not in args and "type" not in kwargs:
kwargs['type'] = self.type kwargs["type"] = self.type
# create proto # create proto
create_method = OpDescCreationMethod(self.__proto__) create_method = OpDescCreationMethod(self.__proto__)
proto = create_method(*args, **kwargs) proto = create_method(*args, **kwargs)
...@@ -203,5 +215,5 @@ class __RecurrentOp__(object): ...@@ -203,5 +215,5 @@ class __RecurrentOp__(object):
return core.RecurrentOp.create(proto.SerializeToString()) return core.RecurrentOp.create(proto.SerializeToString())
Operator = OperatorFactory() # Default global factory Operator = OperatorFactory() # The default global factory
RecurrentOp = __RecurrentOp__() RecurrentOp = __RecurrentOp__()
py_test(test_net SRCS test_net.py) file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
py_test(test_scope SRCS test_scope.py) foreach(src ${TEST_OPS})
py_test(${src} SRCS ${src}.py)
py_test(test_tensor SRCS test_tensor.py) endforeach()
py_test(test_mul_op SRCS test_mul_op.py)
py_test(test_cos_sim_op SRCS test_cos_sim_op.py)
py_test(test_mean_op SRCS test_mean_op.py)
py_test(test_protobuf SRCS test_protobuf.py)
py_test(test_add_two_op SRCS test_add_two_op.py)
py_test(test_sigmoid_op SRCS test_sigmoid_op.py)
py_test(test_softmax_op SRCS test_softmax_op.py)
py_test(test_cross_entropy_op SRCS test_cross_entropy_op.py)
py_test(test_gather_op SRCS test_gather_op.py)
py_test(test_scatter_op SRCS test_scatter_op.py)
py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py)
py_test(gradient_checker SRCS gradient_checker.py)
py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py)
py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
py_test(test_operator SRCS test_operator.py)
py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py)
py_test(test_uniform_random_op SRCS test_uniform_random_op.py)
py_test(test_recurrent_op SRCS test_recurrent_op.py)
py_test(test_sgd_op SRCS test_sgd_op.py)
py_test(test_gradient_checker SRCS test_gradient_checker.py)
py_test(test_lookup_table SRCS test_lookup_table.py)
py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py)
py_test(mnist SRCS mnist.py)
import unittest
import numpy
import itertools
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
__all__ = ['get_numeric_gradient']
def create_op(op_type):
# TODO need to set attrs
kwargs = dict()
for in_name in Operator.get_op_input_names(op_type):
kwargs[in_name] = in_name
for out_name in Operator.get_op_output_names(op_type):
kwargs[out_name] = out_name
return Operator(op_type, **kwargs)
def grad_var_name(var_name):
return var_name + "@GRAD"
def empty_var_name():
return "@EMPTY@"
def get_numeric_gradient(op,
input_values,
output_name,
input_to_check,
delta=0.005,
local_scope=None,
in_place=False):
"""
Get Numeric Gradient for an operator's input.
:param op: C++ operator instance, could be an network
:param input_values: The input variables. Should be an dictionary, key is
variable name. Value is numpy array.
:param output_name: The final output variable name.
:param input_to_check: The input variable need to get gradient.
:param delta: The perturbation value for numeric gradient method. The
smaller delta is, the more accurate result will get. But if that delta is
too small, it could occur numerical stability problem.
:param local_scope: The local scope used for get_numeric_gradient.
:return: The gradient array in numpy format.
"""
if local_scope is None:
local_scope = core.Scope()
# Create all input variable in local_scope
for var_name in input_values:
var = local_scope.new_var(var_name)
tensor = var.get_tensor()
tensor.set_dims(input_values[var_name].shape)
tensor.alloc_float(core.CPUPlace())
tensor.set(input_values[var_name], core.CPUPlace())
# Create all output variable in local_scope
opts = op.outputs()
for key in opts:
for output in opts[key]:
if local_scope.find_var(output) is None:
local_scope.new_var(output).get_tensor()
op.infer_shape(local_scope)
# allocate output memory
for key in opts:
for output in opts[key]:
local_scope.find_var(output).get_tensor().alloc_float(core.CPUPlace(
))
cpu_ctx = core.DeviceContext.create(core.CPUPlace())
def get_output():
op.run(local_scope, cpu_ctx)
return numpy.array(local_scope.find_var(output_name).get_tensor()).sum()
def product(dim):
return reduce(lambda a, b: a * b, dim, 1)
def restore_inputs():
for var_name in input_values:
tensor_ = local_scope.find_var(var_name).get_tensor()
tensor_.set(numpy.copy(input_values[var_name]), core.CPUPlace())
# get the input tensor that we want to get it's numeric gradient.
tensor_to_check = local_scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.get_dims())
# prepare a numpy array to store the gradient.
gradient_flat = numpy.zeros(shape=(tensor_size, ), dtype='float32')
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size):
if in_place:
restore_inputs()
# get one input element throw it's index i.
origin = tensor_to_check.get_float_element(i)
# add delta to it, run op and then get the sum of the result tensor.
x_pos = origin + delta
tensor_to_check.set_float_element(i, x_pos)
y_pos = get_output()
# plus delta to this element, run op and get the sum of the result tensor.
if in_place:
restore_inputs()
x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg)
y_neg = get_output()
# restore old value
tensor_to_check.set_float_element(i, origin)
# compute the gradient of this element and store it into a numpy array.
gradient_flat[i] = (y_pos - y_neg) / delta / 2
# reshape the gradient result to the shape of the source tensor.
return gradient_flat.reshape(tensor_to_check.get_dims())
class GradientChecker(unittest.TestCase):
def __get_gradient(self, forward_op, backward_op, input_value, grad_names,
place):
"""Get the input gradients after running forward and backward operators
on the given places.
:param forward_op: forward operator
:type forward_op: Operator
:param backward_op: backward operator
:type backward_op: Operator
:param input_value: input values.
:type input_value: dict{string:numpy.array}
:param grad_names: the names of returned input gradients.
:type input_value: a list of string
:param place: the device type.
:type place: CPUPlace or GPUPlace
:return: the input grdients of given grad_names.
:rtype: a list of numpy.array
"""
scope = core.Scope()
ctx = core.DeviceContext.create(place)
inputs = forward_op.inputs()
in_names = [item for k in inputs for item in inputs[k]]
outputs = forward_op.outputs()
out_names = [item for k in outputs for item in outputs[k]]
# create input var and set value
for name, value in input_value.iteritems():
if name not in in_names:
raise ValueError(name + "does not exist in Op's inputs.")
var = scope.new_var(name).get_tensor()
var.set_dims(value.shape)
var.set(value, place)
# run forward op
for out_name in out_names:
scope.new_var(out_name)
forward_op.infer_shape(scope)
forward_op.run(scope, ctx)
# set output var's shape
# set output grad to ones
for name in out_names:
out_tensor = scope.find_var(name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(name)).get_tensor()
grad_tensor.set_dims(out_tensor.shape())
data = numpy.ones(out_tensor.shape(), dtype=numpy.float32)
grad_tensor.set(data, place)
# run backward op
backward_outs = backward_op.outputs()
backward_names = [
item for key in backward_outs for item in backward_outs[key]
]
for name in backward_names:
scope.new_var(name)
backward_op.infer_shape(scope)
backward_op.run(scope, ctx)
outs = [
numpy.array(scope.find_var(name).get_tensor())
for name in grad_names
]
return outs
def compare_grad(self, forward_op, input_value, no_grad_set=None):
""" Compare the input gradients between CPU and GPU for the given forward
operator.
:param forward_op: forward operator
:type forward_op: Operator
:param input_value: input values.
:type input_value: dict{string:numpy.array}
:param no_grad_set: the set of variables names without gradients.
:type no_grad_set: a set of string
:raises: AssertionError, there is different gradient value.
"""
if no_grad_set is None:
no_grad_set = set()
backward_op = core.Operator.backward(forward_op, no_grad_set)
# return if not compile with GPU or not implementing GPU kernel
if not (core.is_compile_gpu() and backward_op.support_gpu()):
return
outputs = backward_op.outputs()
out_names = [item for k in outputs for item in outputs[k]]
out_names = filter(lambda x: x != empty_var_name(), out_names)
cpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
out_names, core.CPUPlace())
gpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
out_names, core.GPUPlace(0))
for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads,
out_names):
self.assertTrue(
numpy.allclose(
c_grad, g_grad, atol=1e-4),
"output name: " + name + " has diff")
def __assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
"""Use relative error for the comparison.
:param numeric_grads: the numerical graidents.
:type numeric_grads: a list of numpy.array
:param analytic_grads: the analytical graidents.
:type analytic_grads: a list of numpy.array
:param name: the names of gradients, used to print for debug.
:type names: a list of string
:param msg_prefix: string info, used to print for debug.
:type msf_prefix: string
"""
for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
abs_a = numpy.abs(a)
# if abs_a is nearly zero, then use abs error for a, not relative
# error.
abs_a[abs_a < 1e-3] = 1
diff_mat = numpy.abs(a - b) / abs_a
max_diff = numpy.max(diff_mat)
def err_msg():
offset = numpy.argmax(diff_mat > max_relative_error)
return "%s Variable %s max gradient diff %f over limit %f, the first " \
"error element is %d" % (
msg_prefix, name, max_diff, max_relative_error, offset)
self.assertLessEqual(max_diff, max_relative_error, err_msg())
def check_grad(self,
forward_op,
input_vars,
inputs_to_check,
output_name,
no_grad_set=None,
only_cpu=False,
in_place=False,
max_relative_error=0.005):
"""
:param forward_op: used to create backward_op
:param input_vars: numpy value of input variable. The following
computation will use these variables.
:param inputs_to_check: inputs var names that should check gradient.
:param output_name: the output variable name of forward network.
:param max_relative_error: The relative tolerance parameter.
:param no_grad_set: used when create backward ops
:param only_cpu: only compute and check gradient on cpu kernel.
:return:
"""
if no_grad_set is None:
no_grad_set = set()
no_tmp_out = forward_op.no_intermediate_outputs()
if len(no_tmp_out) != 1:
raise ValueError("non temp out_names should be 1")
inputs = forward_op.inputs()
in_names = [item for k in inputs for item in inputs[k]]
for no_grad in no_grad_set:
if no_grad not in in_names:
raise ValueError("no_grad should be in in_names")
if no_grad in inputs_to_check:
raise ValueError("no_grad should not be in inputs_to_check")
backward_op = core.Operator.backward(forward_op, no_grad_set)
places = [core.CPUPlace()]
if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu():
places.append(core.GPUPlace(0))
# get numerical gradients
numeric_grads = [
get_numeric_gradient(
forward_op, input_vars, output_name, name, in_place=in_place)
for name in inputs_to_check
]
check_names = [grad_var_name(name) for name in inputs_to_check]
for place in places:
analytic_grads = self.__get_gradient(forward_op, backward_op,
input_vars, check_names, place)
self.__assert_is_close(numeric_grads, analytic_grads, check_names,
max_relative_error,
"Gradient Check On %s" % str(place))
import unittest
import numpy as np
import itertools
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
def grad_var_name(var_name):
return var_name + "@GRAD"
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
for in_name, in_dup in Operator.get_op_inputs(op_type):
if in_name in inputs:
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for sub_in_name, _ in sub_in:
var = scope.new_var(sub_in_name)
kwargs[in_name].append(sub_in_name)
else:
var = scope.new_var(in_name)
kwargs[in_name].append(in_name)
for out_name, out_dup in Operator.get_op_outputs(op_type):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_in = outputs[out_name]
for sub_in_name, _ in sub_in:
var = scope.new_var(sub_in_name)
kwargs[out_name].append(sub_in_name)
else:
var = scope.new_var(out_name)
kwargs[out_name].append(out_name)
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
def set_input(scope, op, inputs, place):
for in_name, in_dup in Operator.get_op_inputs(op.type()):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for sub_in_name, sub_in_array in sub_in:
var = scope.find_var(sub_in_name)
tensor = var.get_tensor()
tensor.set_dims(sub_in_array.shape)
tensor.set(sub_in_array, place)
else:
var = scope.find_var(in_name)
tensor = var.get_tensor()
arr = inputs[in_name]
tensor.set_dims(arr.shape)
tensor.set(arr, place)
def set_output_grad(scope, op, outputs, place):
for out_name, out_dup in Operator.get_op_outputs(op.type()):
if out_name in outputs:
if out_dup:
sub_out = outputs[out_name]
for sub_out_name, _ in sub_out:
out_tensor = scope.find_var(sub_out_name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(
sub_out_name)).get_tensor()
grad_tensor.set_dims(out_tensor.shape())
data = np.ones(out_tensor.shape(), dtype=np.float32)
grad_tensor.set(data, place)
else:
out_tensor = scope.find_var(out_name).get_tensor()
grad_tensor = scope.new_var(grad_var_name(out_name)).get_tensor(
)
grad_tensor.set_dims(out_tensor.shape())
data = np.ones(out_tensor.shape(), dtype=np.float32)
grad_tensor.set(data, place)
def get_numeric_gradient(scope,
op,
inputs,
input_to_check,
output_name,
delta=0.005,
in_place=False):
set_input(scope, op, inputs, core.CPUPlace())
op.infer_shape(scope)
tensor_to_check = scope.find_var(input_to_check).get_tensor()
def product(dim):
return reduce(lambda a, b: a * b, dim, 1)
ctx = core.DeviceContext.create(core.CPUPlace())
def get_output():
op.run(scope, ctx)
return np.array(scope.find_var(output_name).get_tensor()).sum()
tensor_to_check = scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.get_dims())
gradient_flat = np.zeros(shape=(tensor_size, ), dtype='float32')
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size):
if in_place:
set_input(scope, op, inputs, core.CPUPlace())
# get one input element throw it's index i.
origin = tensor_to_check.get_float_element(i)
# add delta to it, run op and then get the sum of the result tensor.
x_pos = origin + delta
tensor_to_check.set_float_element(i, x_pos)
y_pos = get_output()
if in_place:
set_input(scope, op, inputs, core.CPUPlace())
x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg)
y_neg = get_output()
tensor_to_check.set_float_element(i, origin)
gradient_flat[i] = (y_pos - y_neg) / delta / 2
return gradient_flat.reshape(tensor_to_check.get_dims())
def get_backward_op(scope, op, no_grad_set):
backward_op = core.Operator.backward(op, no_grad_set)
for input in backward_op.input_vars():
var = scope.new_var(input)
var.get_tensor()
for output in backward_op.output_vars():
var = scope.new_var(output)
var.get_tensor()
return backward_op
def get_gradient(scope, op, inputs, outputs, grad_name, place,
no_grad_set=None):
ctx = core.DeviceContext.create(place)
set_input(scope, op, inputs, place)
op.infer_shape(scope)
op.run(scope, ctx)
if no_grad_set is None:
no_grad_set = set()
backward_op = get_backward_op(scope, op, no_grad_set)
set_output_grad(scope, op, outputs, place)
backward_op.infer_shape(scope)
backward_op.run(scope, ctx)
out = np.array(scope.find_var(grad_name).get_tensor())
return out
class OpTest(unittest.TestCase):
def check_output_with_place(self, place):
self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
op_attrs)
if isinstance(place, core.GPUPlace) and not self.op.support_gpu():
return
set_input(self.scope, self.op, self.inputs, place)
self.op.infer_shape(self.scope)
ctx = core.DeviceContext.create(place)
self.op.run(self.scope, ctx)
for out_name, out_dup in Operator.get_op_outputs(self.op.type()):
if out_dup:
sub_out = self.outputs[out_name]
for sub_out_name in sub_out:
actual = np.array(
self.scope.find_var(sub_out_name).get_tensor())
expect = sub_out[sub_out_name]
self.assertTrue(
np.allclose(
actual, expect, atol=1e-05),
"output name: " + out_name + "has diff")
else:
actual = np.array(self.scope.find_var(out_name).get_tensor())
expect = self.outputs[out_name]
self.assertTrue(
np.allclose(
actual, expect, atol=1e-05),
"output name: " + out_name + "has diff")
def check_output(self):
places = [core.CPUPlace()]
if core.is_compile_gpu():
places.append(core.GPUPlace(0))
for place in places:
self.check_output_with_place(place)
def __assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
abs_a = np.abs(a)
abs_a[abs_a < 1e-3] = 1
diff_mat = np.abs(a - b) / abs_a
max_diff = np.max(diff_mat)
def err_msg():
offset = np.argmax(diff_mat > max_relative_error)
return "%s Variable %s max gradient diff %f over limit %f, the first " \
"error element is %d" % (
msg_prefix, name, max_diff, max_relative_error, offset)
self.assertLessEqual(max_diff, max_relative_error, err_msg())
def check_grad(self,
inputs_to_check,
output_name,
no_grad_set=None,
in_place=False,
max_relative_error=0.005):
self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
op_attrs)
if no_grad_set is None:
no_grad_set = set()
numeric_grads = [
get_numeric_gradient(
self.scope,
self.op,
self.inputs,
input_to_check,
output_name,
in_place=in_place) for input_to_check in inputs_to_check
]
grad_names = [
grad_var_name(input_to_check) for input_to_check in inputs_to_check
]
cpu_place = core.CPUPlace()
cpu_analytic_grads = [
get_gradient(self.scope, self.op, self.inputs, self.outputs,
grad_name, cpu_place, no_grad_set)
for grad_name in grad_names
]
self.__assert_is_close(numeric_grads, cpu_analytic_grads, grad_names,
max_relative_error,
"Gradient Check On %s" % str(cpu_place))
if core.is_compile_gpu() and self.op.support_gpu():
gpu_place = core.GPUPlace(0)
gpu_analytic_grads = [
get_gradient(self.scope, self.op, self.inputs, self.outputs,
grad_name, gpu_place, no_grad_set)
for grad_name in grad_names
]
self.__assert_is_close(numeric_grads, gpu_analytic_grads,
grad_names, max_relative_error,
"Gradient Check On %s" % str(gpu_place))
for c_grad, g_grad, name in itertools.izip(
cpu_analytic_grads, gpu_analytic_grads, grad_names):
self.assertTrue(
np.allclose(
c_grad, g_grad, atol=1e-4),
"output name: " + name + " has diff")
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
class OpTestMeta(type):
"""
Operator Test ClassMeta.
It injects `test_all` method into user's OperatorTest class, to make Python
unittest module run that method.
The `test_all` read what value is stored in `self`. It use self's values to
create and run a operator, and check whether that op is OK or not.
See `test_add_two_op` for example usage.
"""
def __new__(cls, name, bases, attrs):
obj = super(OpTestMeta, cls).__new__(cls, name, bases, attrs)
def test_all(self):
scope = core.Scope()
kwargs = dict()
places = [core.CPUPlace()]
if core.is_compile_gpu():
places.append(core.GPUPlace(0))
for place in places:
for in_name in Operator.get_op_input_names(self.type):
if hasattr(self, "inputs") and in_name in self.inputs:
kwargs[in_name] = in_name
var = scope.new_var(in_name).get_tensor()
arr = self.inputs[in_name]
var.set_dims(arr.shape)
var.set(arr, place)
else:
kwargs[in_name] = "@EMPTY@"
for out_name in Operator.get_op_output_names(self.type):
if not hasattr(self, "outputs"):
raise ValueError(
"The test op must set self.outputs dict.")
if out_name not in self.outputs:
raise ValueError("The %s is not in self.outputs dict." %
(out_name))
kwargs[out_name] = out_name
scope.new_var(out_name).get_tensor()
for attr_name in Operator.get_op_attr_names(self.type):
if hasattr(self, "attrs") and attr_name in self.attrs:
kwargs[attr_name] = self.attrs[attr_name]
op = Operator(self.type, **kwargs)
if isinstance(place, core.GPUPlace) and not op.support_gpu():
return
op.infer_shape(scope)
ctx = core.DeviceContext.create(place)
op.run(scope, ctx)
for out_name in Operator.get_op_output_names(self.type):
actual = numpy.array(scope.find_var(out_name).get_tensor())
expect = self.outputs[out_name]
self.assertTrue(
numpy.allclose(
actual, expect, atol=1e-05),
"output name: " + out_name + "has diff")
obj.test_all = test_all
return obj
import unittest import unittest
import numpy as np
from op_test import OpTest
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
from op_test_util import OpTestMeta
class TestAddOp(unittest.TestCase):
__metaclass__ = OpTestMeta
class TestAddOp(OpTest):
def setUp(self): def setUp(self):
self.type = "add_two" self.op_type = "add"
self.inputs = { self.inputs = {
'X': numpy.random.random((102, 105)).astype("float32"), 'X': np.random.random((102, 105)).astype("float32"),
'Y': numpy.random.random((102, 105)).astype("float32") 'Y': np.random.random((102, 105)).astype("float32")
} }
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']}
def test_check_output(self):
self.check_output()
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestConcatOp(OpTest):
def setUp(self):
self.op_type = "concat"
x0 = np.random.random((2, 3, 2, 5)).astype('float32')
x1 = np.random.random((2, 3, 3, 5)).astype('float32')
x2 = np.random.random((2, 3, 4, 5)).astype('float32')
axis = 2
self.inputs = {'X': [('x0', x0), ('x1', x1), ('x2', x2)]}
self.attrs = {'axis': axis}
self.outputs = {'Out': np.concatenate((x0, x1, x2), axis=axis)}
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
class TestCosSimOpWithRank2(unittest.TestCase): class TestCosSimOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "cos_sim" self.op_type = "cos_sim"
self.inputs = { self.inputs = {
'X': np.random.random((32, 64)).astype("float32"), 'X': np.random.random((6, 5)).astype("float32"),
'Y': np.random.random((32, 64)).astype("float32") 'Y': np.random.random((6, 5)).astype("float32")
} }
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
...@@ -23,15 +20,27 @@ class TestCosSimOpWithRank2(unittest.TestCase): ...@@ -23,15 +20,27 @@ class TestCosSimOpWithRank2(unittest.TestCase):
'Out': np.expand_dims(expect_out, 1) 'Out': np.expand_dims(expect_out, 1)
} }
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05)
class TestCosSimOpWithRank2Bcast(unittest.TestCase): def test_check_grad_ingore_x(self):
__metaclass__ = OpTestMeta self.check_grad(
['Y'], 'Out', max_relative_error=0.05, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y'))
class TestCosSimOp2(TestCosSimOp):
def setUp(self): def setUp(self):
self.type = "cos_sim" self.op_type = "cos_sim"
self.inputs = { self.inputs = {
'X': np.random.random((32, 64)).astype("float32"), 'X': np.random.random((6, 5)).astype("float32"),
'Y': np.random.random((1, 64)).astype("float32") 'Y': np.random.random((1, 5)).astype("float32")
} }
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
...@@ -44,14 +53,12 @@ class TestCosSimOpWithRank2Bcast(unittest.TestCase): ...@@ -44,14 +53,12 @@ class TestCosSimOpWithRank2Bcast(unittest.TestCase):
} }
class TestCosSimOpWithRank3(unittest.TestCase): class TestCosSimOp3(TestCosSimOp):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "cos_sim" self.op_type = "cos_sim"
self.inputs = { self.inputs = {
'X': np.random.random((32, 64, 10)).astype("float32"), 'X': np.random.random((6, 5, 2)).astype("float32"),
'Y': np.random.random((32, 64, 10)).astype("float32") 'Y': np.random.random((6, 5, 2)).astype("float32")
} }
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2))
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2))
...@@ -64,14 +71,12 @@ class TestCosSimOpWithRank3(unittest.TestCase): ...@@ -64,14 +71,12 @@ class TestCosSimOpWithRank3(unittest.TestCase):
} }
class TestCosSimOpWithRank3Bcast(unittest.TestCase): class TestCosSimOp4(TestCosSimOp):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "cos_sim" self.op_type = "cos_sim"
self.inputs = { self.inputs = {
'X': np.random.random((32, 64, 10)).astype("float32"), 'X': np.random.random((6, 5, 2)).astype("float32"),
'Y': np.random.random((1, 64, 10)).astype("float32") 'Y': np.random.random((1, 5, 2)).astype("float32")
} }
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2))
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2))
...@@ -84,64 +89,5 @@ class TestCosSimOpWithRank3Bcast(unittest.TestCase): ...@@ -84,64 +89,5 @@ class TestCosSimOpWithRank3Bcast(unittest.TestCase):
} }
class TestCosSimGradOp(GradientChecker):
def setUp(self):
self.op = create_op("cos_sim")
self.inputs = {
'X': np.random.random((6, 5)).astype("float32"),
'Y': np.random.random((6, 5)).astype("float32")
}
def test_cpu_gpu_compare(self):
self.compare_grad(self.op, self.inputs)
def test_normal(self):
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.05)
def test_ignore_x(self):
self.check_grad(
self.op,
self.inputs, ["Y"],
"Out",
max_relative_error=0.05,
no_grad_set={"X"})
def test_ignore_y(self):
self.check_grad(
self.op,
self.inputs, ["X"],
"Out",
max_relative_error=0.05,
no_grad_set={"Y"})
class TestCosSimGradOpWithRank2Bcast(TestCosSimGradOp):
def setUp(self):
self.op = create_op("cos_sim")
self.inputs = {
'X': np.random.random((6, 5)).astype("float32"),
'Y': np.random.random((1, 5)).astype("float32")
}
class TestCosSimGradOpWithRank3(TestCosSimGradOp):
def setUp(self):
self.op = create_op("cos_sim")
self.inputs = {
'X': np.random.random((6, 5, 2)).astype("float32"),
'Y': np.random.random((6, 5, 2)).astype("float32")
}
class TestCosSimGradOpWithRank3Bcast(TestCosSimGradOp):
def setUp(self):
self.op = create_op("cos_sim")
self.inputs = {
'X': np.random.random((6, 5, 2)).astype("float32"),
'Y': np.random.random((1, 5, 2)).astype("float32")
}
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
import unittest import unittest
import numpy import numpy
from op_test_util import OpTestMeta from op_test import OpTest
from gradient_checker import GradientChecker, create_op
class TestCrossEntropy(unittest.TestCase): class TestCrossEntropy(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "onehot_cross_entropy" self.op_type = "onehot_cross_entropy"
batch_size = 30 batch_size = 30
class_num = 10 class_num = 10
X = numpy.random.random((batch_size, class_num)).astype("float32") X = numpy.random.uniform(0.1, 1.0,
label = 5 * numpy.ones(batch_size).astype("int32") [batch_size, class_num]).astype("float32")
label = (class_num / 2) * numpy.ones(batch_size).astype("int32")
self.inputs = {'X': X, 'label': label} self.inputs = {'X': X, 'label': label}
Y = [] Y = []
for i in range(0, batch_size): for i in range(0, batch_size):
Y.append(-numpy.log(X[i][label[i]])) Y.append(-numpy.log(X[i][label[i]]))
self.outputs = {'Y': numpy.array(Y).astype("float32")} self.outputs = {'Y': numpy.array(Y).astype("float32")}
def test_check_output(self):
self.check_output()
class CrossEntropyGradOpTest(GradientChecker):
def test_check_grad(self): def test_check_grad(self):
op = create_op("onehot_cross_entropy") self.check_grad(['X'], 'Y')
batch_size = 30
class_num = 10
inputs = {
"X": numpy.random.uniform(
0.1, 1.0, [batch_size, class_num]).astype("float32"),
"label": (class_num / 2) * numpy.ones(batch_size).astype("int32")
}
self.check_grad(op, inputs, set("X"), "Y")
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
from op_test_util import OpTestMeta import numpy as np
import numpy from op_test import OpTest
class TestFillZerosLikeOp(unittest.TestCase): class TestFillZerosLikeOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "fill_zeros_like" self.op_type = "fill_zeros_like"
self.inputs = {'Src': numpy.random.random((219, 232)).astype("float32")} self.inputs = {'Src': np.random.random((219, 232)).astype("float32")}
self.outputs = {'Dst': numpy.zeros_like(self.inputs['Src'])} self.outputs = {'Dst': np.zeros_like(self.inputs["Src"])}
def test_check_output(self):
self.check_output()
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
class TestGatherOp(unittest.TestCase): class TestGatherOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "gather" self.op_type = "gather"
xnp = numpy.random.random((10, 20)).astype("float32") xnp = np.random.random((10, 20)).astype("float32")
self.inputs = { self.inputs = {'X': xnp, 'Index': np.array([1, 3, 5]).astype("int32")}
'X': xnp, self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
'Index': numpy.array([1, 3, 5]).astype("int32")
}
self.outputs = {'Out': self.inputs['X'][self.inputs['Index']]}
def test_check_output(self):
self.check_output()
class TestGatherGradOp(GradientChecker): def test_check_grad(self):
def test_gather_grad(self): self.check_grad(['X'], 'Out')
op = create_op("gather")
xnp = numpy.random.random((10, 20)).astype("float32")
inputs = {'X': xnp, 'Index': numpy.array([1, 3, 5]).astype("int32")}
self.check_grad(op, inputs, set("X"), "Out")
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase): ...@@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase):
def gaussian_random_test(self, place): def gaussian_random_test(self, place):
scope = core.Scope() scope = core.Scope()
scope.new_var("Out").get_tensor() scope.new_var('Out').get_tensor()
op = Operator( op = Operator(
"gaussian_random", "gaussian_random",
Out="Out", Out='Out',
dims=[1000, 784], dims=[1000, 784],
mean=.0, mean=.0,
std=1., std=1.,
...@@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase): ...@@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase):
op.infer_shape(scope) op.infer_shape(scope)
context = core.DeviceContext.create(place) context = core.DeviceContext.create(place)
op.run(scope, context) op.run(scope, context)
tensor = numpy.array(scope.find_var("Out").get_tensor()) tensor = numpy.array(scope.find_var('Out').get_tensor())
self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1)
self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1) self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy import numpy as np
from paddle.v2.framework.op import Operator import paddle.v2.framework.core as core
from gradient_checker import GradientChecker from op_test import get_numeric_gradient
from gradient_checker import get_numeric_gradient from op_test import create_op
class GetNumericGradientTest(unittest.TestCase): class GetNumericGradientTest(unittest.TestCase):
def test_add_op(self): def test_add_op(self):
add_op = Operator('add_two', X="X", Y="Y", Out="Z") x = np.random.random((10, 1)).astype("float32")
x = numpy.random.random((10, 1)).astype("float32") y = np.random.random((10, 1)).astype("float32")
y = numpy.random.random((10, 1)).astype("float32") z = x + y
scope = core.Scope()
arr = get_numeric_gradient(add_op, {'X': x, "Y": y}, 'Z', 'X') add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict())
arr = get_numeric_gradient(scope, add_op, {'X': x, 'Y': y}, 'X', 'Out')
self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4)
def test_softmax_op(self): def test_softmax_op(self):
def stable_softmax(x): def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way.""" """Compute the softmax of vector x in a numerically stable way."""
shiftx = x - numpy.max(x) shiftx = x - np.max(x)
exps = numpy.exp(shiftx) exps = np.exp(shiftx)
return exps / numpy.sum(exps) return exps / np.sum(exps)
def label_softmax_grad(Y, dY): def label_softmax_grad(Y, dY):
dX = Y * 0.0 dX = Y * 0.0
for i in range(Y.shape[0]): for i in range(Y.shape[0]):
d = numpy.dot(Y[i, :], dY[i, :]) d = np.dot(Y[i, :], dY[i, :])
dX[i, :] = Y[i, :] * (dY[i, :] - d) dX[i, :] = Y[i, :] * (dY[i, :] - d)
return dX return dX
softmax_op = Operator("softmax", X="X", Y="Y") X = np.random.random((2, 2)).astype("float32")
Y = np.apply_along_axis(stable_softmax, 1, X)
X = numpy.random.random((2, 2)).astype("float32") dY = np.ones(Y.shape)
Y = numpy.apply_along_axis(stable_softmax, 1, X)
dY = numpy.ones(Y.shape)
dX = label_softmax_grad(Y, dY) dX = label_softmax_grad(Y, dY)
arr = get_numeric_gradient(softmax_op, {"X": X}, 'Y', 'X') scope = core.Scope()
numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2) softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict())
arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y")
np.testing.assert_almost_equal(arr, dX, decimal=1e-2)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from op_test_util import OpTestMeta from op_test import OpTest
from gradient_checker import GradientChecker, create_op
class TestSigmoidOp(unittest.TestCase): class TestLookupTableOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = 'lookup_table' self.op_type = "lookup_table"
table = np.random.random((17, 31)).astype('float32') table = np.random.random((17, 31)).astype("float32")
ids = np.random.randint(0, 17, 4).astype('int32') ids = np.random.randint(0, 17, 4).astype("int32")
self.inputs = {'W': table, 'Ids': ids} self.inputs = {'W': table, 'Ids': ids}
self.outputs = {'Out': table[ids]} self.outputs = {'Out': table[ids]}
def test_check_output(self):
self.check_output()
class TestSigmoidGradOp(GradientChecker): def test_check_grad(self):
def test_grad(self): self.check_grad(['W'], 'Out', no_grad_set=set('Ids'))
op = create_op('lookup_table')
table = np.random.random((17, 31)).astype('float32')
ids = np.random.randint(0, 17, 4).astype('int32')
inputs = {'W': table, 'Ids': ids}
# comapre gradients
self.compare_grad(op, inputs, set(['Ids']))
# check gradients
self.check_grad(op, inputs, set('W'), 'Out')
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np import numpy as np
from op_test import OpTest
class TestMeanOp(unittest.TestCase): class TestMeanOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "mean" self.op_type = "mean"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")} self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Out': np.mean(self.inputs['X'])} self.outputs = {'Out': np.mean(self.inputs["X"])}
def test_check_output(self):
self.check_output()
class MeanGradOpTest(GradientChecker): def test_checkout_grad(self):
def test_normal(self): self.check_grad(['X'], 'Out')
op = create_op("mean")
inputs = {"X": np.random.random((10, 10)).astype("float32")}
self.check_grad(op, inputs, set("X"), "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
class MinusOpTest(unittest.TestCase): class MinusOpTest(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "minus" self.op_type = "minus"
self.inputs = { self.inputs = {
'X': np.random.random((32, 84)).astype("float32"), 'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((32, 84)).astype("float32") 'Y': np.random.random((32, 84)).astype("float32")
} }
self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])} self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])}
def test_check_output(self):
self.check_output()
class MinusGradTest(GradientChecker): def test_check_grad(self):
def test_left(self): self.check_grad(['X', 'Y'], 'Out')
op = create_op("minus")
inputs = {
"X": np.random.random((10, 10)).astype("float32"),
"Y": np.random.random((10, 10)).astype("float32")
}
self.check_grad(op, inputs, ["X", 'Y'], "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -38,9 +38,9 @@ def feed_data(name, data): ...@@ -38,9 +38,9 @@ def feed_data(name, data):
assert isinstance(data, numpy.ndarray) assert isinstance(data, numpy.ndarray)
tensor = scope.find_var(name).get_tensor() tensor = scope.find_var(name).get_tensor()
tensor.set_dims(data.shape) tensor.set_dims(data.shape)
if data.dtype == numpy.dtype('int32'): if data.dtype == numpy.dtype("int32"):
tensor.alloc_int(place) tensor.alloc_int(place)
elif data.dtype == numpy.dtype('float32'): elif data.dtype == numpy.dtype("float32"):
tensor.alloc_float(place) tensor.alloc_float(place)
else: else:
raise ValueError("data type not supported") raise ValueError("data type not supported")
...@@ -74,22 +74,25 @@ def init_param(net, param_name, dims): ...@@ -74,22 +74,25 @@ def init_param(net, param_name, dims):
# fc_layer # fc_layer
def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
""" """
Add a fc layer to net The fully connected layer.
:param input: input variable name. :param input: The name of input variable.
:type input: str :type input: str
:param size: fully connected layer size. :param size: The size of fully connected layer.
:param act: activation name :param act: The name of activation.
:param param: parameter attribute, used for initialize parameters. :param param: The attribute of learnable parameter which can be used to
:param bias: bias attribute. False will not have a bias. modify initialization mean and std of the parameter.
:param name: the name of fc layer. If not set, model will generate a :param bias: The attribute of bias. If set False, this layer does not have
readable name a bias.
:return: output variable name. :param name: The name of this layer. If it is not set explictly, a name
will be generated automatically.
:return: The name of the output variable.
""" """
if name is None: if name is None:
name = 'fc_%d' % uniq_id() name = "fc_%d" % uniq_id()
if not isinstance(name, str): if not isinstance(name, str):
raise ValueError("name should be string") raise ValueError("The name of a layer should be a string.")
input_dims = scope.find_var(input).get_tensor().get_dims() input_dims = scope.find_var(input).get_tensor().get_dims()
...@@ -123,7 +126,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): ...@@ -123,7 +126,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
def cross_entropy_layer(net, input, label): def cross_entropy_layer(net, input, label):
cost_name = 'cross_entropy_%d' % uniq_id() cost_name = "cross_entropy_%d" % uniq_id()
cross_entropy_op = Operator( cross_entropy_op = Operator(
"onehot_cross_entropy", X=input, label=label, Y=cost_name) "onehot_cross_entropy", X=input, label=label, Y=cost_name)
net.append_op(cross_entropy_op) net.append_op(cross_entropy_op)
...@@ -177,8 +180,8 @@ def error_rate(predict, label): ...@@ -177,8 +180,8 @@ def error_rate(predict, label):
return error_num / float(len(label)) return error_num / float(len(label))
images = data_layer(name='pixel', dims=[BATCH_SIZE, 784]) images = data_layer(name="pixel", dims=[BATCH_SIZE, 784])
labels = data_layer(name='label', dims=[BATCH_SIZE]) labels = data_layer(name="label", dims=[BATCH_SIZE])
fc1 = fc_layer(net=forward_net, input=images, size=100, act="sigmoid") fc1 = fc_layer(net=forward_net, input=images, size=100, act="sigmoid")
fc2 = fc_layer(net=forward_net, input=fc1, size=100, act="sigmoid") fc2 = fc_layer(net=forward_net, input=fc1, size=100, act="sigmoid")
predict = fc_layer(net=forward_net, input=fc2, size=10, act="softmax") predict = fc_layer(net=forward_net, input=fc2, size=10, act="softmax")
......
import unittest import unittest
import numpy as np import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
from op_test_util import OpTestMeta
class TestMulOp(unittest.TestCase): class TestMulOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "mul" self.op_type = "mul"
self.inputs = { self.inputs = {
'X': np.random.random((32, 84)).astype("float32"), 'X': np.random.random((32, 84)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32") 'Y': np.random.random((84, 100)).astype("float32")
} }
self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
class TestMulGradOp(GradientChecker): class TestMulOp2(OpTest):
def setUp(self): def setUp(self):
self.op = create_op("mul") self.op_type = "mul"
self.inputs = { self.inputs = {
'X': np.random.random((32, 84)).astype("float32"), 'X': np.random.random((15, 4, 12, 10)).astype("float32"),
'Y': np.random.random((84, 100)).astype("float32") 'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32")
}
self.attrs = {'x_num_col_dims': 2, 'y_num_col_dims': 2}
self.outputs = {
'Out': np.dot(self.inputs['X'].reshape(15 * 4, 12 * 10),
self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9))
} }
def test_cpu_gpu_compare(self): def test_check_output(self):
self.compare_grad(self.op, self.inputs) self.check_output()
def test_normal(self): def test_check_grad_normal(self):
# mul op will enlarge the relative error self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5)
self.check_grad(
self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5)
def test_ignore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
self.op, ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X'))
self.inputs, ["Y"],
"Out",
max_relative_error=0.5,
no_grad_set={"X"})
def test_ignore_y(self): def test_check_grad_ignore_y(self):
self.check_grad( self.check_grad(
self.op, ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y'))
self.inputs, ["X"],
"Out",
max_relative_error=0.5,
no_grad_set={"Y"})
# TODO(dzh,qijun) : mulgrad test case need transpose feature of blas library
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -15,7 +15,7 @@ def fc(X, W, Y): ...@@ -15,7 +15,7 @@ def fc(X, W, Y):
class TestNet(unittest.TestCase): class TestNet(unittest.TestCase):
def test_net_all(self): def test_net_all(self):
net = core.Net.create() net = core.Net.create()
op1 = Operator("add_two", X="X", Y="Y", Out="Out") op1 = Operator("add", X="X", Y="Y", Out="Out")
net.append_op(op1) net.append_op(op1)
net2 = core.Net.create() net2 = core.Net.create()
...@@ -26,7 +26,7 @@ class TestNet(unittest.TestCase): ...@@ -26,7 +26,7 @@ class TestNet(unittest.TestCase):
expected = ''' expected = '''
Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}.
Op(add_two), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}. Op(add), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}.
Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}.
...@@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]} ...@@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}
self.assertEqual(expected, "\n" + str(net)) self.assertEqual(expected, "\n" + str(net))
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -193,10 +193,10 @@ class TestOpDescCreationMethod(unittest.TestCase): ...@@ -193,10 +193,10 @@ class TestOpDescCreationMethod(unittest.TestCase):
class TestOpCreations(unittest.TestCase): class TestOpCreations(unittest.TestCase):
def test_all(self): def test_all(self):
add_op = op.Operator("add_two", X="a", Y="b", Out="z") add_op = op.Operator("add", X="a", Y="b", Out="z")
self.assertIsNotNone(add_op) self.assertIsNotNone(add_op)
# Invoke C++ DebugString() # Invoke C++ DebugString()
self.assertEqual('Op(add_two), inputs:{X[a], Y[b]}, outputs:{Out[z]}.', self.assertEqual('Op(add), inputs:{X[a], Y[b]}, outputs:{Out[z]}.',
str(add_op)) str(add_op))
......
...@@ -146,7 +146,7 @@ class TestRecurrentOp(unittest.TestCase): ...@@ -146,7 +146,7 @@ class TestRecurrentOp(unittest.TestCase):
stepnet = core.Net.create() stepnet = core.Net.create()
x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx") x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx")
h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh")
sum_op = Operator("add_two", X="Wx", Y="Uh", Out="sum") sum_op = Operator("add", X="Wx", Y="Uh", Out="sum")
sig_op = Operator("sigmoid", X="sum", Y="h@alias") sig_op = Operator("sigmoid", X="sum", Y="h@alias")
for op in [x_fc_op, h_fc_op, sum_op, sig_op]: for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
......
import unittest
import numpy as np
from op_test import OpTest
class TestReshapeOp(OpTest):
def setUp(self):
self.op_type = "reshape"
self.inputs = {'X': np.random.random((10, 20)).astype("float32")}
self.attrs = {'shape': [10 * 20]}
self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
if __name__ == '__main__':
unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from op_test_util import OpTestMeta from op_test import OpTest
from gradient_checker import GradientChecker, create_op
class TestRowwiseAddOp(unittest.TestCase): class TestRowwiseAddOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "rowwise_add" self.op_type = "rowwise_add"
self.inputs = { self.inputs = {
'X': np.random.random((32, 84)).astype("float32"), 'X': np.random.uniform(0.1, 1, [5, 10]).astype("float32"),
'b': np.random.random(84).astype("float32") 'b': np.random.uniform(0.1, 1, [10]).astype("float32")
} }
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'b'], 'Out')
def test_check_grad_ingore_b(self):
self.check_grad(['X'], 'Out', no_grad_set=set('b'))
def test_check_grad_ingore_x(self):
self.check_grad(['b'], 'Out', no_grad_set=set('X'))
class TestRowwiseAddGradOp(GradientChecker):
class TestRowwiseAddOp2(OpTest):
def setUp(self): def setUp(self):
self.op = create_op("rowwise_add") self.op_type = "rowwise_add"
self.inputs = { self.inputs = {
"X": np.random.uniform(0.1, 1, [5, 10]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"),
"b": np.random.uniform(0.1, 1, [10]).astype("float32") 'b': np.random.uniform(0.1, 1, [2, 5]).astype("float32")
} }
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])}
def test_check_output(self):
self.check_output()
def test_normal(self): def test_check_grad_normal(self):
self.check_grad(self.op, self.inputs, ["X", "b"], "Out") self.check_grad(['X', 'b'], 'Out')
def test_ignore_b(self): def test_check_grad_ignore_b(self):
self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"}) self.check_grad(['X'], 'Out', no_grad_set=set('b'))
def test_ignore_x(self): def test_check_grad_ignore_x(self):
self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"}) self.check_grad(['b'], 'Out', no_grad_set=set('X'))
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta
from gradient_checker import GradientChecker, create_op
import numpy as np import numpy as np
from paddle.v2.framework.op import Operator from op_test import OpTest
class IdentityTest(unittest.TestCase): class IdentityTest(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "identity" self.op_type = "identity"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")} self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.outputs = {'Out': self.inputs['X']} self.outputs = {'Out': self.inputs['X']}
def test_check_output(self):
self.check_output()
class IdentityGradOpTest(GradientChecker): def test_check_grad(self):
def test_normal(self): self.check_grad(['X'], 'Out')
op = create_op("identity")
inputs = {"X": np.random.random((10, 10)).astype("float32")}
self.check_grad(op, inputs, set("X"), "Out")
class ScaleTest(unittest.TestCase):
__metaclass__ = OpTestMeta
class ScaleTest(OpTest):
def setUp(self): def setUp(self):
self.type = "scale" self.op_type = "scale"
self.inputs = {'X': np.random.random((32, 784)).astype("float32")} self.inputs = {'X': np.random.random((10, 10)).astype("float32")}
self.attrs = {'scale': -2.3} self.attrs = {'scale': -2.3}
self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']} self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']}
def test_check_output(self):
self.check_output()
class ScaleGradTest(GradientChecker): def test_check_grad(self):
def test_normal(self): self.check_grad(['X'], 'Out')
op = Operator("scale", X="X", Out="Out", scale=3.2)
self.check_grad(op,
{"X": np.random.random((10, 10)).astype("float32")},
set("X"), "Out")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
from op_test_util import OpTestMeta import numpy as np
from gradient_checker import GradientChecker, create_op from op_test import OpTest
import numpy
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
class TestScatterOp(unittest.TestCase): class TestScatterOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "scatter" self.op_type = "scatter"
ref_np = numpy.ones((3, 3)).astype("float32") ref_np = np.ones((3, 3)).astype("float32")
index_np = numpy.array([1, 2]).astype("int32") index_np = np.array([1, 2]).astype("int32")
updates_np = numpy.random.random((2, 3)).astype("float32") updates_np = np.random.random((2, 3)).astype("float32")
output_np = numpy.copy(ref_np) output_np = np.copy(ref_np)
output_np[index_np] += updates_np output_np[index_np] += updates_np
self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
class TestScatterGradOp(GradientChecker): def test_check_grad(self):
def test_scatter_grad(self): self.check_grad(['Updates', 'Ref'], 'Out', in_place=True)
op = create_op("scatter")
# test data setup
ref_np = numpy.ones((3, 10)).astype("float32")
index_np = numpy.array([1, 2]).astype("int32")
updates_np = numpy.random.random((2, 10)).astype("float32")
output_np = numpy.copy(ref_np)
output_np[index_np] += updates_np
inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np}
self.check_grad(
op, inputs, set(["Updates", "Ref"]), "Out", in_place=True)
if __name__ == "__main__": if __name__ == "__main__":
......
import unittest import unittest
import numpy import numpy as np
from op_test_util import OpTestMeta from op_test import OpTest
class TestSGD(unittest.TestCase): class TestSGD(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "sgd" self.op_type = "sgd"
w = numpy.random.random((102, 105)).astype("float32") w = np.random.random((102, 105)).astype("float32")
g = numpy.random.random((102, 105)).astype("float32") g = np.random.random((102, 105)).astype("float32")
lr = 0.1 lr = 0.1
self.inputs = {'param': w, 'grad': g} self.inputs = {'param': w, 'grad': g}
self.attrs = {'learning_rate': lr} self.attrs = {'learning_rate': lr}
self.outputs = {'param_out': w - lr * g} self.outputs = {'param_out': w - lr * g}
def test_check_output(self):
self.check_output()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
import unittest import unittest
import numpy as np import numpy as np
from op_test_util import OpTestMeta from op_test import OpTest
from gradient_checker import GradientChecker, create_op
class TestSigmoidOp(unittest.TestCase): class TestSigmoid(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "sigmoid" self.op_type = "sigmoid"
self.inputs = {'X': np.random.random((15, 31)).astype("float32")} self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
}
self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))} self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))}
def test_check_output(self):
self.check_output()
class TestSigmoidGradOp(GradientChecker): def test_check_grad(self):
def test_grad(self): self.check_grad(["X"], "Y", max_relative_error=0.007)
op = create_op("sigmoid")
inputs = {"X": np.random.uniform(0.1, 1, [11, 17]).astype("float32")}
# compare gpu and cpu results for backward op.
# this test will be skiped if only compiling CPU version.
self.compare_grad(op, inputs)
# check gradients
self.check_grad(op, inputs, set("X"), "Y", max_relative_error=0.007)
if __name__ == '__main__': if __name__ == '__main__':
......
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest
from gradient_checker import GradientChecker, create_op
from op_test_util import OpTestMeta
def stable_softmax(x): def stable_softmax(x):
...@@ -13,23 +10,22 @@ def stable_softmax(x): ...@@ -13,23 +10,22 @@ def stable_softmax(x):
return exps / np.sum(exps) return exps / np.sum(exps)
class TestSoftmaxOp(unittest.TestCase): class TestSoftmaxOp(OpTest):
__metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "softmax" self.op_type = "softmax"
self.inputs = {'X': np.random.random((32, 100)).astype("float32")} self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32")
}
self.outputs = { self.outputs = {
'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X']) 'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X'])
} }
def test_check_output(self):
self.check_output()
class SoftmaxGradOpTest(GradientChecker): def test_check_grad(self):
def test_softmax(self): self.check_grad(['X'], 'Y')
op = create_op("softmax")
inputs = {"X": np.random.uniform(0.1, 1, [10, 10]).astype("float32")}
self.check_grad(op, inputs, set("X"), "Y")
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestSquaredL2DistanceOp_f0(OpTest):
def setUp(self):
self.op_type = "squared_l2_distance"
self.inputs = {
'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"),
'Y': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32")
}
sub_res = self.inputs['X'] - self.inputs['Y']
output = sub_res * sub_res
self.outputs = {
'sub_result': sub_res,
'Out': np.expand_dims(output.sum(1), 1)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
class TestSquaredL2DistanceOp_f1(OpTest):
def setUp(self):
self.op_type = "squared_l2_distance"
self.inputs = {
'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"),
'Y': np.random.uniform(0.1, 0.6, (1, 3)).astype("float32")
}
sub_res = self.inputs['X'] - self.inputs['Y']
output = sub_res * sub_res
self.outputs = {
'sub_result': sub_res,
'Out': np.expand_dims(output.sum(1), 1)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
class TestSquaredL2DistanceOp_f2(OpTest):
def setUp(self):
self.op_type = "squared_l2_distance"
self.inputs = {
'X': np.random.uniform(0.1, 0.6, (2, 3, 4)).astype("float32"),
'Y': np.random.uniform(0.1, 0.6, (1, 3, 4)).astype("float32")
}
sub_res = self.inputs['X'] - self.inputs['Y']
sub_res = sub_res.reshape((2, 3 * 4))
output = sub_res * sub_res
self.outputs = {
'sub_result': sub_res,
'Out': np.expand_dims(output.sum(1), 1)
}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out')
if __name__ == "__main__":
unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestSumOp(OpTest):
def setUp(self):
self.op_type = "sum"
x0 = np.random.random((3, 4)).astype('float32')
x1 = np.random.random((3, 4)).astype('float32')
x2 = np.random.random((3, 4)).astype('float32')
self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
y = x0 + x1 + x2
self.outputs = {'Out': y}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['x0'], 'Out')
if __name__ == "__main__":
unittest.main()
...@@ -3,7 +3,7 @@ import unittest ...@@ -3,7 +3,7 @@ import unittest
import numpy import numpy
class TestScope(unittest.TestCase): class TestTensor(unittest.TestCase):
def test_int_tensor(self): def test_int_tensor(self):
scope = core.Scope() scope = core.Scope()
var = scope.new_var("test_tensor") var = scope.new_var("test_tensor")
...@@ -20,8 +20,8 @@ class TestScope(unittest.TestCase): ...@@ -20,8 +20,8 @@ class TestScope(unittest.TestCase):
tensor.set(tensor_array, place) tensor.set(tensor_array, place)
tensor_array_2 = numpy.array(tensor) tensor_array_2 = numpy.array(tensor)
self.assertEqual(1.0, tensor_array_2[3, 9]) self.assertEqual(1, tensor_array_2[3, 9])
self.assertEqual(2.0, tensor_array_2[19, 11]) self.assertEqual(2, tensor_array_2[19, 11])
def test_float_tensor(self): def test_float_tensor(self):
scope = core.Scope() scope = core.Scope()
...@@ -43,6 +43,84 @@ class TestScope(unittest.TestCase): ...@@ -43,6 +43,84 @@ class TestScope(unittest.TestCase):
self.assertAlmostEqual(1.0, tensor_array_2[3, 9]) self.assertAlmostEqual(1.0, tensor_array_2[3, 9])
self.assertAlmostEqual(2.0, tensor_array_2[19, 11]) self.assertAlmostEqual(2.0, tensor_array_2[19, 11])
def test_int_lod_tensor(self):
places = [core.CPUPlace(), core.GPUPlace(0)]
for place in places:
scope = core.Scope()
var = scope.new_var("test_tensor")
var_lod = scope.new_var("test_lod_tensor")
tensor = var.get_tensor()
lod_tensor = var_lod.get_lod_tensor()
tensor.set_dims([4, 4, 6])
tensor.alloc_int(place)
array = numpy.array(tensor)
array[0, 0, 0] = 3
array[3, 3, 5] = 10
tensor.set(array, place)
lod_tensor.set_tensor(tensor)
lod_tensor.set_lod([[0, 2, 4]])
lod_v = numpy.array(lod_tensor.tensor())
self.assertTrue(numpy.alltrue(array == lod_v))
lod = lod_tensor.lod()
self.assertEqual(0, lod[0][0])
self.assertEqual(2, lod[0][1])
self.assertEqual(4, lod[0][2])
def test_float_lod_tensor(self):
places = [core.CPUPlace(), core.GPUPlace(0)]
for place in places:
scope = core.Scope()
var = scope.new_var("test_tensor")
var_lod = scope.new_var("test_lod_tensor")
tensor = var.get_tensor()
lod_tensor = var_lod.get_lod_tensor()
tensor.set_dims([5, 2, 3, 4])
tensor.alloc_float(place)
tensor_array = numpy.array(tensor)
self.assertEqual((5, 2, 3, 4), tensor_array.shape)
tensor_array[0, 0, 0, 0] = 1.0
tensor_array[0, 0, 0, 1] = 2.0
tensor.set(tensor_array, place)
lod_tensor.set_tensor(tensor)
lod_v = numpy.array(lod_tensor.tensor())
self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
self.assertEqual(len(lod_tensor.lod()), 0)
lod_py = [[0, 2, 5], [0, 2, 4, 5]]
lod_tensor.set_lod(lod_py)
lod = lod_tensor.lod()
self.assertListEqual(lod_py, lod)
def test_lod_tensor_init(self):
scope = core.Scope()
var = scope.new_var("test_tensor")
place = core.CPUPlace()
tensor = var.get_tensor()
tensor.set_dims([5, 2, 3, 4])
tensor.alloc_float(place)
tensor_array = numpy.array(tensor)
tensor_array[0, 0, 0, 0] = 1.0
tensor_array[0, 0, 0, 1] = 2.0
tensor.set(tensor_array, place)
lod_py = [[0, 2, 5], [0, 2, 4, 5]]
lod_tensor = core.LoDTensor(lod_py, tensor)
lod_v = numpy.array(lod_tensor.tensor())
self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
self.assertListEqual(lod_py, lod_tensor.lod())
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
import unittest
import numpy as np
from op_test import OpTest
class TestTopkOp(OpTest):
def setUp(self):
self.op_type = "top_k"
k = 1
input = np.random.random((32, 84)).astype("float32")
output = np.ndarray((32, k))
indices = np.ndarray((32, k))
self.inputs = {'X': input}
self.attrs = {'k': k}
for rowid in xrange(32):
row = input[rowid]
output[rowid] = np.sort(row)[-k:]
indices[rowid] = row.argsort()[-k:]
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp3d(OpTest):
def setUp(self):
self.op_type = "top_k"
k = 1
input = np.random.random((32, 2, 84)).astype("float32")
input_flat_2d = input.reshape(64, 84)
output = np.ndarray((64, k))
indices = np.ndarray((64, k)).astype("int")
# FIXME: should use 'X': input for a 3d input
self.inputs = {'X': input_flat_2d}
self.attrs = {'k': k}
for rowid in xrange(64):
row = input_flat_2d[rowid]
output[rowid] = np.sort(row)[-k:]
indices[rowid] = row.argsort()[-k:]
self.outputs = {'Out': output, 'Indices': indices}
if __name__ == "__main__":
unittest.main()
...@@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase): ...@@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase):
def uniform_random_test(self, place): def uniform_random_test(self, place):
scope = core.Scope() scope = core.Scope()
scope.new_var("X").get_tensor() scope.new_var('X').get_tensor()
op = Operator( op = Operator(
"uniform_random", "uniform_random",
Out="X", Out='X',
dims=[1000, 784], dims=[1000, 784],
min=-5.0, min=-5.0,
max=10.0, max=10.0,
...@@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase): ...@@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase):
op.infer_shape(scope) op.infer_shape(scope)
ctx = core.DeviceContext.create(place) ctx = core.DeviceContext.create(place)
op.run(scope, ctx) op.run(scope, ctx)
tensor = numpy.array(scope.find_var("X").get_tensor()) tensor = numpy.array(scope.find_var('X').get_tensor())
self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1) self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1)
if __name__ == '__main__': if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -2,6 +2,7 @@ import numpy ...@@ -2,6 +2,7 @@ import numpy
import collections import collections
import topology import topology
import minibatch import minibatch
import cPickle
__all__ = ['infer', 'Inference'] __all__ = ['infer', 'Inference']
...@@ -25,11 +26,23 @@ class Inference(object): ...@@ -25,11 +26,23 @@ class Inference(object):
:type parameters: paddle.v2.parameters.Parameters :type parameters: paddle.v2.parameters.Parameters
""" """
def __init__(self, output_layer, parameters): def __init__(self, parameters, output_layer=None, fileobj=None):
import py_paddle.swig_paddle as api import py_paddle.swig_paddle as api
topo = topology.Topology(output_layer)
gm = api.GradientMachine.createFromConfigProto( if output_layer is not None:
topo.proto(), api.CREATE_MODE_TESTING, [api.PARAMETER_VALUE]) topo = topology.Topology(output_layer)
gm = api.GradientMachine.createFromConfigProto(
topo.proto(), api.CREATE_MODE_TESTING, [api.PARAMETER_VALUE])
self.__data_types__ = topo.data_type()
elif fileobj is not None:
tmp = cPickle.load(fileobj)
gm = api.GradientMachine.createByConfigProtoStr(
tmp['protobin'], api.CREATE_MODE_TESTING,
[api.PARAMETER_VALUE])
self.__data_types__ = tmp['data_type']
else:
raise ValueError("Either output_layer or fileobj must be set")
for param in gm.getParameters(): for param in gm.getParameters():
val = param.getBuf(api.PARAMETER_VALUE) val = param.getBuf(api.PARAMETER_VALUE)
name = param.getName() name = param.getName()
...@@ -43,7 +56,6 @@ class Inference(object): ...@@ -43,7 +56,6 @@ class Inference(object):
# called here, but it's better to call this function in one place. # called here, but it's better to call this function in one place.
param.setValueUpdated() param.setValueUpdated()
self.__gradient_machine__ = gm self.__gradient_machine__ = gm
self.__data_types__ = topo.data_type()
def iter_infer(self, input, feeding=None): def iter_infer(self, input, feeding=None):
from data_feeder import DataFeeder from data_feeder import DataFeeder
......
...@@ -18,6 +18,7 @@ from paddle.proto.ModelConfig_pb2 import ModelConfig ...@@ -18,6 +18,7 @@ from paddle.proto.ModelConfig_pb2 import ModelConfig
import paddle.trainer_config_helpers as conf_helps import paddle.trainer_config_helpers as conf_helps
import layer as v2_layer import layer as v2_layer
import config_base import config_base
import cPickle
__all__ = ['Topology'] __all__ = ['Topology']
...@@ -100,6 +101,14 @@ class Topology(object): ...@@ -100,6 +101,14 @@ class Topology(object):
return layer return layer
return None return None
def serialize_for_inference(self, stream):
protobin = self.proto().SerializeToString()
data_type = self.data_type()
cPickle.dump({
'protobin': protobin,
'data_type': data_type
}, stream, cPickle.HIGHEST_PROTOCOL)
def __check_layer_type__(layer): def __check_layer_type__(layer):
if not isinstance(layer, config_base.Layer): if not isinstance(layer, config_base.Layer):
......
...@@ -174,13 +174,18 @@ class SGD(object): ...@@ -174,13 +174,18 @@ class SGD(object):
pass_id=pass_id, pass_id=pass_id,
batch_id=batch_id, batch_id=batch_id,
cost=cost, cost=cost,
evaluator=batch_evaluator)) evaluator=batch_evaluator,
gm=self.__gradient_machine__))
self.__parameter_updater__.finishBatch(cost) self.__parameter_updater__.finishBatch(cost)
batch_evaluator.finish() batch_evaluator.finish()
self.__parameter_updater__.finishPass() self.__parameter_updater__.finishPass()
pass_evaluator.finish() pass_evaluator.finish()
event_handler(v2_event.EndPass(pass_id, evaluator=pass_evaluator)) event_handler(
v2_event.EndPass(
pass_id,
evaluator=pass_evaluator,
gm=self.__gradient_machine__))
self.__gradient_machine__.finish() self.__gradient_machine__.finish()
def test(self, reader, feeding=None): def test(self, reader, feeding=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册