提交 090b8114 编写于 作者: Q qijun

merge baidu/develop

...@@ -4,7 +4,6 @@ cache: ...@@ -4,7 +4,6 @@ cache:
- $HOME/.ccache - $HOME/.ccache
- $HOME/.cache/pip - $HOME/.cache/pip
- $TRAVIS_BUILD_DIR/build/third_party - $TRAVIS_BUILD_DIR/build/third_party
- $TRAVIS_BUILD_DIR/build_android/third_party
sudo: required sudo: required
dist: trusty dist: trusty
os: os:
...@@ -12,7 +11,6 @@ os: ...@@ -12,7 +11,6 @@ os:
env: env:
- JOB=build_doc - JOB=build_doc
- JOB=check_style - JOB=check_style
- JOB=build_android
addons: addons:
apt: apt:
packages: packages:
...@@ -23,7 +21,6 @@ addons: ...@@ -23,7 +21,6 @@ addons:
- python - python
- python-pip - python-pip
- python2.7-dev - python2.7-dev
- python-numpy
- python-wheel - python-wheel
- libboost-dev - libboost-dev
- curl - curl
...@@ -37,8 +34,8 @@ before_install: ...@@ -37,8 +34,8 @@ before_install:
- if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python # Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
# protobuf version. # protobuf version.
- pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt - sudo pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt
- pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker - sudo pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker
- curl https://glide.sh/get | bash - curl https://glide.sh/get | bash
- eval "$(GIMME_GO_VERSION=1.8.3 gimme)" - eval "$(GIMME_GO_VERSION=1.8.3 gimme)"
- go get -u github.com/alecthomas/gometalinter - go get -u github.com/alecthomas/gometalinter
......
...@@ -65,8 +65,8 @@ if(NOT CMAKE_BUILD_TYPE) ...@@ -65,8 +65,8 @@ if(NOT CMAKE_BUILD_TYPE)
endif() endif()
if(ANDROID) if(ANDROID)
if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16")
message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 21") message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16")
endif() endif()
set(WITH_GPU OFF CACHE STRING set(WITH_GPU OFF CACHE STRING
......
...@@ -4,9 +4,15 @@ MAINTAINER PaddlePaddle Authors <paddle-dev@baidu.com> ...@@ -4,9 +4,15 @@ MAINTAINER PaddlePaddle Authors <paddle-dev@baidu.com>
ARG UBUNTU_MIRROR ARG UBUNTU_MIRROR
RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi' RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi'
# ENV variables
ARG ANDROID_ABI
ENV ANDROID_ABI=${ANDROID_ABI:-"armeabi-v7a"}
ENV HOME=/root \ ENV HOME=/root \
ANDROID_NDK_HOME=/opt/android-ndk-linux \ ANDROID_NDK_HOME=/opt/android-ndk-linux \
ANDROID_STANDALONE_TOOLCHAIN=/opt/android-toolchain-gcc ANDROID_ARM_STANDALONE_TOOLCHAIN=/opt/arm-toolchain \
ANDROID_ARM64_STANDALONE_TOOLCHAIN=/opt/arm64-toolchain
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y \ apt-get install -y \
...@@ -15,12 +21,11 @@ RUN apt-get update && \ ...@@ -15,12 +21,11 @@ RUN apt-get update && \
apt-get clean -y apt-get clean -y
# Install Go and glide # Install Go and glide
RUN wget -O go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz && \ RUN wget -qO- go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \
tar -C /usr/local -xzf go.tgz && \ tar -xz -C /usr/local && \
mkdir /root/gopath && \ mkdir /root/gopath && \
mkdir /root/gopath/bin && \ mkdir /root/gopath/bin && \
mkdir /root/gopath/src && \ mkdir /root/gopath/src
rm go.tgz
ENV GOROOT=/usr/local/go GOPATH=/root/gopath ENV GOROOT=/usr/local/go GOPATH=/root/gopath
# should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT. # should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT.
ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin
...@@ -42,7 +47,8 @@ RUN mkdir /opt/android-ndk-tmp && \ ...@@ -42,7 +47,8 @@ RUN mkdir /opt/android-ndk-tmp && \
wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \ wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \
unzip -q android-ndk-r14b-linux-x86_64.zip && \ unzip -q android-ndk-r14b-linux-x86_64.zip && \
mv android-ndk-r14b ${ANDROID_NDK_HOME} && \ mv android-ndk-r14b ${ANDROID_NDK_HOME} && \
${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-21 --install-dir=${ANDROID_STANDALONE_TOOLCHAIN} && \ ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-23 --install-dir=${ANDROID_ARM_STANDALONE_TOOLCHAIN} && \
${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm64 --platform=android-23 --install-dir=${ANDROID_ARM64_STANDALONE_TOOLCHAIN} && \
rm -rf /opt/android-ndk-tmp && \ rm -rf /opt/android-ndk-tmp && \
rm -rf ${ANDROID_NDK_HOME} rm -rf ${ANDROID_NDK_HOME}
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
# The supported variables are listed belows: # The supported variables are listed belows:
# #
# ANDROID_STANDALONE_TOOLCHAIN # ANDROID_STANDALONE_TOOLCHAIN
# ANDROID_TOOLCHAIN
# ANDROID_ABI # ANDROID_ABI
# ANDROID_NATIVE_API_LEVEL # ANDROID_NATIVE_API_LEVEL
# ANDROID_ARM_MODE # ANDROID_ARM_MODE
...@@ -57,6 +58,10 @@ IF(NOT DEFINED CMAKE_SYSTEM_VERSION AND ANDROID_NATIVE_API_LEVEL) ...@@ -57,6 +58,10 @@ IF(NOT DEFINED CMAKE_SYSTEM_VERSION AND ANDROID_NATIVE_API_LEVEL)
ENDIF() ENDIF()
ENDIF() ENDIF()
IF(NOT DEFINED ANDROID_TOOLCHAIN)
SET(ANDROID_TOOLCHAIN clang)
ENDIF()
IF(NOT DEFINED ANDROID_ABI) IF(NOT DEFINED ANDROID_ABI)
SET(ANDROID_ABI "armeabi-v7a") SET(ANDROID_ABI "armeabi-v7a")
ENDIF() ENDIF()
...@@ -82,6 +87,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -82,6 +87,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
"${CMAKE_VERSION}), when cross-compiling for Android.") "${CMAKE_VERSION}), when cross-compiling for Android.")
IF(ANDROID_STANDALONE_TOOLCHAIN) IF(ANDROID_STANDALONE_TOOLCHAIN)
# Use standalone toolchain
SET(CMAKE_SYSROOT "${ANDROID_STANDALONE_TOOLCHAIN}/sysroot") SET(CMAKE_SYSROOT "${ANDROID_STANDALONE_TOOLCHAIN}/sysroot")
IF(NOT CMAKE_SYSTEM_VERSION) IF(NOT CMAKE_SYSTEM_VERSION)
...@@ -96,26 +102,44 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -96,26 +102,44 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
ENDIF() ENDIF()
# Toolchain # Toolchain
SET(ANDROID_TOOLCHAIN "gcc")
SET(ANDROID_TOOLCHAIN_ROOT ${ANDROID_STANDALONE_TOOLCHAIN}) SET(ANDROID_TOOLCHAIN_ROOT ${ANDROID_STANDALONE_TOOLCHAIN})
IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$") ELSE(ANDROID_NDK)
SET(ANDROID_TOOLCHAIN_NAME arm-linux-androideabi) # TODO: use android ndk
IF(ANDROID_ABI STREQUAL "armeabi") ENDIF()
SET(CMAKE_SYSTEM_PROCESSOR armv5te)
ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a") IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
SET(CMAKE_SYSTEM_PROCESSOR armv7-a) SET(ANDROID_TOOLCHAIN_NAME arm-linux-androideabi)
ENDIF() IF(ANDROID_ABI STREQUAL "armeabi")
ENDIF() SET(CMAKE_SYSTEM_PROCESSOR armv5te)
IF(ANDROID_ABI STREQUAL "arm64-v8a") SET(ANDROID_CLANG_TRIPLE armv5te-none-linux-androideabi)
SET(ANDROID_TOOLCHAIN_NAME aarch64-linux-android) ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a")
SET(CMAKE_SYSTEM_PROCESSOR aarch64) SET(CMAKE_SYSTEM_PROCESSOR armv7-a)
SET(ANDROID_CLANG_TRIPLE armv7-none-linux-androideabi)
ENDIF() ENDIF()
SET(ANDROID_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_NAME}-") ELSEIF(ANDROID_ABI STREQUAL "arm64-v8a")
SET(ANDROID_TOOLCHAIN_NAME aarch64-linux-android)
SET(CMAKE_SYSTEM_PROCESSOR aarch64)
SET(ANDROID_CLANG_TRIPLE aarch64-none-linux-android)
ELSE()
MESSAGE(FATAL_ERROR "Invalid Android ABI: ${ANDROID_ABI}.")
ENDIF()
SET(ANDROID_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_NAME}-")
IF(ANDROID_TOOLCHAIN STREQUAL clang)
SET(ANDROID_C_COMPILER_NAME clang)
SET(ANDROID_CXX_COMPILER_NAME clang++)
SET(CMAKE_C_COMPILER_TARGET ${ANDROID_CLANG_TRIPLE})
SET(CMAKE_CXX_COMPILER_TARGET ${ANDROID_CLANG_TRIPLE})
ELSEIF(ANDROID_TOOLCHAIN STREQUAL gcc)
SET(ANDROID_C_COMPILER_NAME gcc)
SET(ANDROID_CXX_COMPILER_NAME g++)
ELSE()
MESSAGE(FATAL_ERROR "Invalid Android toolchain: ${ANDROID_TOOLCHAIN}")
ENDIF() ENDIF()
# C compiler # C compiler
IF(NOT CMAKE_C_COMPILER) IF(NOT CMAKE_C_COMPILER)
SET(ANDROID_C_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}gcc") SET(ANDROID_C_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}${ANDROID_C_COMPILER_NAME}")
ELSE() ELSE()
GET_FILENAME_COMPONENT(ANDROID_C_COMPILER ${CMAKE_C_COMPILER} PROGRAM) GET_FILENAME_COMPONENT(ANDROID_C_COMPILER ${CMAKE_C_COMPILER} PROGRAM)
ENDIF() ENDIF()
...@@ -125,7 +149,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -125,7 +149,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
# CXX compiler # CXX compiler
IF(NOT CMAKE_CXX_COMPILER) IF(NOT CMAKE_CXX_COMPILER)
SET(ANDROID_CXX_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}g++") SET(ANDROID_CXX_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}${ANDROID_CXX_COMPILER_NAME}")
ELSE() ELSE()
GET_FILENAME_COMPONENT(ANDROID_CXX_COMPILER ${CMAKE_CXX_COMPILER} PROGRAM) GET_FILENAME_COMPONENT(ANDROID_CXX_COMPILER ${CMAKE_CXX_COMPILER} PROGRAM)
ENDIF() ENDIF()
...@@ -137,7 +161,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -137,7 +161,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
SET(CMAKE_CXX_COMPILER ${ANDROID_CXX_COMPILER} CACHE PATH "CXX compiler" FORCE) SET(CMAKE_CXX_COMPILER ${ANDROID_CXX_COMPILER} CACHE PATH "CXX compiler" FORCE)
# Toolchain and ABI specific flags. # Toolchain and ABI specific flags.
SET(ANDROID_COMPILER_FLAGS "-ffunction-sections -fdata-sections -finline-limit=64") SET(ANDROID_COMPILER_FLAGS "-ffunction-sections -fdata-sections")
SET(ANDROID_LINKER_FLAGS "-Wl,--gc-sections") SET(ANDROID_LINKER_FLAGS "-Wl,--gc-sections")
IF(ANDROID_ABI STREQUAL "armeabi") IF(ANDROID_ABI STREQUAL "armeabi")
...@@ -145,8 +169,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -145,8 +169,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
-march=armv5te -march=armv5te
-mtune=xscale -mtune=xscale
-msoft-float) -msoft-float)
ENDIF() ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a")
IF(ANDROID_ABI STREQUAL "armeabi-v7a")
LIST(APPEND ANDROID_COMPILER_FLAGS LIST(APPEND ANDROID_COMPILER_FLAGS
-march=armv7-a -march=armv7-a
-mfloat-abi=softfp) -mfloat-abi=softfp)
...@@ -156,6 +179,8 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -156,6 +179,8 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
LIST(APPEND ANDROID_COMPILER_FLAGS -mfpu=vfpv3-d16) LIST(APPEND ANDROID_COMPILER_FLAGS -mfpu=vfpv3-d16)
ENDIF() ENDIF()
LIST(APPEND ANDROID_LINKER_FLAGS -Wl,--fix-cortex-a8) LIST(APPEND ANDROID_LINKER_FLAGS -Wl,--fix-cortex-a8)
ELSEIF(ANDROID_ABI STREQUAL "arm64-v8a")
LIST(APPEND ANDROID_COMPILER_FLAGS -march=armv8-a)
ENDIF() ENDIF()
IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$") IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
...@@ -164,10 +189,18 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ...@@ -164,10 +189,18 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0")
ELSE() ELSE()
LIST(APPEND ANDROID_COMPILER_FLAGS -mthumb) LIST(APPEND ANDROID_COMPILER_FLAGS -mthumb)
ENDIF() ENDIF()
IF(ANDROID_TOOLCHAIN STREQUAL clang)
# Disable integrated-as for better compatibility.
LIST(APPEND ANDROID_COMPILER_FLAGS -fno-integrated-as)
ENDIF()
ENDIF() ENDIF()
IF(ANDROID_ABI STREQUAL "arm64-v8a") IF(ANDROID_TOOLCHAIN STREQUAL clang)
LIST(APPEND ANDROID_COMPILER_FLAGS -march=armv8-a) # CMake automatically forwards all compiler flags to the linker,
# and clang doesn't like having -Wa flags being used for linking.
# To prevent CMake from doing this would require meddling with
# the CMAKE_<LANG>_COMPILE_OBJECT rules, which would get quite messy.
LIST(APPEND ANDROID_LINKER_FLAGS -Qunused-arguments)
ENDIF() ENDIF()
STRING(REPLACE ";" " " ANDROID_COMPILER_FLAGS "${ANDROID_COMPILER_FLAGS}") STRING(REPLACE ";" " " ANDROID_COMPILER_FLAGS "${ANDROID_COMPILER_FLAGS}")
......
...@@ -12,6 +12,10 @@ ...@@ -12,6 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
IF(USE_EIGEN_FOR_BLAS)
return()
ENDIF(USE_EIGEN_FOR_BLAS)
INCLUDE(cblas) INCLUDE(cblas)
IF(NOT ${CBLAS_FOUND}) IF(NOT ${CBLAS_FOUND})
......
...@@ -86,12 +86,13 @@ def layer.fc(X): ...@@ -86,12 +86,13 @@ def layer.fc(X):
We'd like to have Python bindings to operators in package `paddle.operator`, and Python compositions of operators in package `paddle.layer`. So we have the following concepts in above illustrative example: We'd like to have Python bindings to operators in package `paddle.operator`, and Python compositions of operators in package `paddle.layer`. So we have the following concepts in above illustrative example:
```
| C++ functions/functors | mul | add | | | | C++ functions/functors | mul | add | | |
|------------------------|--------------|--------------|-------------|----------|
| C++ operator class | mulOp | addOp | FCOp | | | C++ operator class | mulOp | addOp | FCOp | |
| Python binding | operator.mul | operator.add | operator.fc | | | Python binding | operator.mul | operator.add | operator.fc | |
| Python function | | | | layer.fc | | Python function | | | | layer.fc |
```
This is how we differentiate layer and operators in PaddlePaddle: This is how we differentiate layer and operators in PaddlePaddle:
......
# Design Doc: Computations as Graphs # Design Doc: Computations as a Graph
A primary goal of the refactorization of PaddlePaddle is a more flexible representation of deep learning computation, in particular, a graph of operators and variables, instead of sequences of layers as before. A primary goal of the refactorization of PaddlePaddle is a more flexible representation of deep learning computation, in particular, a graph of operators and variables, instead of sequences of layers as before.
...@@ -8,6 +8,8 @@ This document explains that the construction of a graph as three steps: ...@@ -8,6 +8,8 @@ This document explains that the construction of a graph as three steps:
- construct the backward part - construct the backward part
- construct the optimization part - construct the optimization part
## The Construction of a Graph
Let us take the problem of image classification as a simple example. The application program that trains the model looks like: Let us take the problem of image classification as a simple example. The application program that trains the model looks like:
```python ```python
...@@ -25,7 +27,9 @@ The first four lines of above program build the forward part of the graph. ...@@ -25,7 +27,9 @@ The first four lines of above program build the forward part of the graph.
![](images/graph_construction_example_forward_only.png) ![](images/graph_construction_example_forward_only.png)
In particular, the first line `x = layer.data("images")` creates variable x and a Feed operator that copies a column from the minibatch to x. `y = layer.fc(x)` creates not only the FC operator and output variable y, but also two parameters, W and b. In particular, the first line `x = layer.data("images")` creates variable x and a Feed operator that copies a column from the minibatch to x. `y = layer.fc(x)` creates not only the FC operator and output variable y, but also two parameters, W and b, and the initialization operators.
Initialization operators are kind of "run-once" operators -- the `Run` method increments a class data member counter so to run at most once. By doing so, a parameter wouldn't be initialized repeatedly, say, in every minibatch.
In this example, all operators are created as `OpDesc` protobuf messages, and all variables are `VarDesc`. These protobuf messages are saved in a `BlockDesc` protobuf message. In this example, all operators are created as `OpDesc` protobuf messages, and all variables are `VarDesc`. These protobuf messages are saved in a `BlockDesc` protobuf message.
...@@ -49,3 +53,18 @@ According to the chain rule of gradient computation, `ConstructBackwardGraph` wo ...@@ -49,3 +53,18 @@ According to the chain rule of gradient computation, `ConstructBackwardGraph` wo
For each parameter, like W and b created by `layer.fc`, marked as double circles in above graphs, `ConstructOptimizationGraph` creates an optimization operator to apply its gradient. Here results in the complete graph: For each parameter, like W and b created by `layer.fc`, marked as double circles in above graphs, `ConstructOptimizationGraph` creates an optimization operator to apply its gradient. Here results in the complete graph:
![](images/graph_construction_example_all.png) ![](images/graph_construction_example_all.png)
## Block and Graph
The word block and graph are interchangable in the desgin of PaddlePaddle. A [Block[(https://github.com/PaddlePaddle/Paddle/pull/3708) is a metaphore of the code and local variables in a pair of curly braces in programming languages, where operators are like statements or instructions. A graph of operators and variables is a representation of the block.
A Block keeps operators in an array `BlockDesc::ops`
```protobuf
message BlockDesc {
repeated OpDesc ops = 1;
repeated VarDesc vars = 2;
}
```
in the order that there appear in user programs, like the Python program at the beginning of this article. We can imagine that in `ops`, we have some forward operators, followed by some gradient operators, and then some optimization operators.
...@@ -2,6 +2,8 @@ digraph ImageClassificationGraph { ...@@ -2,6 +2,8 @@ digraph ImageClassificationGraph {
///////// The forward part ///////// ///////// The forward part /////////
FeedX [label="Feed", color=blue, shape=box]; FeedX [label="Feed", color=blue, shape=box];
FeedY [label="Feed", color=blue, shape=box]; FeedY [label="Feed", color=blue, shape=box];
InitW [label="Init", color=blue, shape=diamond];
Initb [label="Init", color=blue, shape=diamond];
FC [label="FC", color=blue, shape=box]; FC [label="FC", color=blue, shape=box];
MSE [label="MSE", color=blue, shape=box]; MSE [label="MSE", color=blue, shape=box];
...@@ -14,6 +16,8 @@ digraph ImageClassificationGraph { ...@@ -14,6 +16,8 @@ digraph ImageClassificationGraph {
FeedX -> x -> FC -> y -> MSE -> cost [color=blue]; FeedX -> x -> FC -> y -> MSE -> cost [color=blue];
FeedY -> l [color=blue]; FeedY -> l [color=blue];
InitW -> W [color=blue];
Initb -> b [color=blue];
W -> FC [color=blue]; W -> FC [color=blue];
b -> FC [color=blue]; b -> FC [color=blue];
l -> MSE [color=blue]; l -> MSE [color=blue];
......
...@@ -147,7 +147,7 @@ class CosineOp { ...@@ -147,7 +147,7 @@ class CosineOp {
struct CosineOpProtoMaker : public OpProtoMaker { struct CosineOpProtoMaker : public OpProtoMaker {
CosineOpProtoMaker(OpProto* proto) : OpProtoMaker(proto) { CosineOpProtoMaker(OpProto* proto) : OpProtoMaker(proto) {
AddInput("input", "input of cosine op"); AddInput("input", "input of cosine op");
AddAttr("scale", "scale of cosine op", float).Default(1.0).LargerThan(0.0); AddAttr("scale", "scale of cosine op", float).Default(1.0).GreaterThan(0.0);
AddType("cos"); AddType("cos");
AddComment("This is cos op"); AddComment("This is cos op");
} }
......
## Background
PaddlePaddle divides the description of neural network computation graph into two stages: compile time and runtime.
PaddlePaddle use proto message to describe compile time graph for
1. Computation graph should be able to be saved to a file.
1. In distributed training, the graph will be serialized and send to multiple workers.
The computation graph is constructed by Data Node and Operation Node. The concept to represent them is in the table below.
| |compile time|runtime|
|---|---|---|
|Data|VarDesc(proto)|Variable(cpp)|
|Operation|OpDesc(proto)|Operator(cpp)|
## Definition of VarDesc
A VarDesc should have a name and value, in PaddlePaddle, the value will always be a tensor. Since we use LoDTensor most of the time. We add a LoDTesnorDesc to represent it.
```proto
message VarDesc {
required string name = 1;
optional LoDTensorDesc lod_tensor = 2;
}
```
## Definition of LodTensorDesc
```proto
enum DataType {
BOOL = 0;
INT16 = 1;
INT32 = 2;
INT64 = 3;
FP16 = 4;
FP32 = 5;
FP64 = 6;
}
message LoDTensorDesc {
required DataType data_type = 1;
repeated int32 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480]
optional int32 lod_level = 3 [default=0];
}
```
## Definition of Variable in Python
In Python API, layer will take Variable as Input, and return Variable as Output. There should be a class `Variable` in python to help create and manage Variable.
```python
image = Variable(dims=[-1, 640, 480])
# fc1 and fc2 are both Variable
fc1 = layer.fc(input=image, output_size=10)
fc2 = layer.fc(input=fc1, output_size=20)
```
### what should class `Variable` Have
1. `name`.a name of string type is used to mark the value of the Variable.
1. `initializer`. Since our Tensor does not have value. we will always use some Operator to fullfill it when run. So we should have a initialize method to help add the init operator.
1. `operator`. Variable should record which operator produce itself. The reaon is:
- we use pd.eval(targets=[var1, var2]) to run the related ops to get the value of var1 and var2. var.op is used to trace the dependency of the current variable.
In PaddlePaddle, we use Block to describe Computation Graph, so in the code we will use Block but not Graph.
```python
import VarDesc
import LoDTensorDesc
import framework
def AddInitialOperator(variable, initializer):
# add an initialize Operator to block to init this Variable
class Variable(object):
def __init__(self, name, dims, type, initializer):
self._block = get_default_block()
self._name = name
self.op = None
tensor_desc = LoDTensorDesc(data_type=type, dims=dims)
_var_desc = VarDesc(name=name, lod_tensor=tensor_desc)
self._var = framework.CreateVar(_var_desc)
self._block.add_var(self)
# add initial op according to initializer
if initializer is not None:
AddInitialOperator(self, initializer)
def dims(self):
return self._var.dims()
def data_type(self):
return self._var.data_type()
def to_proto(self):
pass
```
Then we can use this Variable to create a fc layer in Python.
```python
import paddle as pd
def flatten_size(X, num_flatten_dims):
prod = 1 # of last num_flatten_dims
for i in xrange(num_flatten_dims):
prod = prod * X.dims[-i-1]
return prod
def layer.fc(X, output_size, num_flatten_dims):
W = Variable(pd.random_uniform(), type=FP32, dims=[flatten_size(X, num_flatten_dims), output_size])
b = Variable(pd.random_uniform(), type=FP32, dims=[output_size])
out = Variable(type=FP32)
y = operator.fc(X, W, b, output=out) # fc will put fc op input into out
pd.InferShape(y)
return out
x = Variable(dims=[-1, 640, 480])
y = layer.fc(x, output_size=100)
z = layer.fc(y, output_size=200)
paddle.eval(targets=[z], ...)
print(z)
```
此差异已折叠。
## 在Paddle中如何使用Eigen
神经网络本质上是一个计算图,计算需要的数据存放在`Tensor`中,而计算过程是由`Operartor`来描述的。在执行时,`Operator`调用对应`OpKernel`中的`Compute`接口,实现对`Tensor`的操作。
### Eigen Tensor模块
Eigen Tensor模块对element-wise计算提供了强大的支持,并且书写一份代码,可以同时在CPU、GPU执行。但Eigen Tensor是一个正在开发中的模块,因此可能测试不够完备,文档较少。
关于Eigen Tensor模块的详细介绍请参考[文档1](https://github.com/RLovelett/eigen/blob/master/unsupported/Eigen/CXX11/src/Tensor/README.md)[文档2](https://bitbucket.org/eigen/eigen/src/default/unsupported/Eigen/CXX11/src/Tensor/README.md)
### paddle::framework::Tensor
Paddle Tensor定义在framework目录下,其主要接口如下:
```cpp
class Tensor {
public:
/*! Return a pointer to mutable memory block. */
template <typename T>
inline T* data();
/**
* @brief Return a pointer to mutable memory block.
* @note If not exist, then allocation.
*/
template <typename T>
inline T* mutable_data(platform::Place place);
/**
* @brief Return a pointer to mutable memory block.
*
* @param[in] dims The dimensions of the memory block.
* @param[in] place The place of the memory block.
*
* @note If not exist, then allocation.
*/
template <typename T>
inline T* mutable_data(DDim dims, platform::Place place);
/*! Resize the dimensions of the memory block. */
inline Tensor& Resize(const DDim& dims);
/*! Return the dimensions of the memory block. */
inline const DDim& dims() const;
private:
/*! holds the memory block if allocated. */
std::shared_ptr<Placeholder> holder_;
/*! points to dimensions of memory block. */
DDim dim_;
};
```
`Placeholder`的作用是延迟分配内存,即我们可以先定义一个Tensor,然后使用Resize接口设置Tensor的大小,最后再调用mutable_data接口分配实际的内存。
```cpp
paddle::framework::Tensor t;
paddle::platform::CPUPlace place;
// set size first
t.Resize({2, 3});
// allocate memory on CPU later
t.mutable_data(place);
```
### paddle::framework::Tensor使用样例
下面以AddOp为例说明Tensor的使用过程:
- InferShape
在运行神经网络计算图时,我们先调用每个`Operator``InferShape`接口,根据输入Tensor的大小来设置输出Tensor的大小,`Resize`接口会被调用。
```cpp
void InferShape(const framework::InferShapeContext &ctx) const override {
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(),
ctx.Input<Tensor>("Y")->dims(),
"Two input of Add Op's dimension must be same.");
ctx.Output<Tensor>("Out")->Resize(ctx.Input<Tensor>("X")->dims());
}
```
- Run
`Operator``Run`接口最终会调用对应`OpKernel``Compute`接口,在这时真正的分配内存,`mutable_data`接口会被调用。
```cpp
void Compute(const framework::ExecutionContext& context) const override {
auto* input0 = context.Input<Tensor>("X");
auto* input1 = context.Input<Tensor>("Y");
auto* output = context.Output<Tensor>("Out");
output->mutable_data<T>(context.GetPlace());
auto x = EigenVector<T>::Flatten(*input0);
auto y = EigenVector<T>::Flatten(*input1);
auto z = EigenVector<T>::Flatten(*output);
auto place = context.GetEigenDevice<Place>();
z.device(place) = x + y;
}
```
### paddle::framework::Tensor到EigenTensor的转换
如上一小节所示,在具体的计算中,我们需要先把输入Tensor和输出Tensor转换为Eigen支持的格式。我们在[eigen.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/eigen.h)中提供了一些全局函数用来实现paddle::framework::Tensor到EigenTensor/EigenMatrix/EigenVector/EigenScalar的转换。
以EigenTensor为例,做一个介绍
```cpp
Tensor t;
float* p = t.mutable_data<float>(make_ddim({1, 2, 3}), platform::CPUPlace());
for (int i = 0; i < 1 * 2 * 3; i++) {
p[i] = static_cast<float>(i);
}
EigenTensor<float, 3>::Type et = EigenTensor<float, 3>::From(t);
```
From是EigenTensor模板提供的一个接口,可以实现从paddle::framework::Tensor到对EigenTensor的转换。由于Tensor的rank是模板参数,因此在转换时需要显示的指定。
在Eigen中,不同rank的Tensor是不同类型,Vector是rank为1的Tensor。需要额外注意的是,EigenVector<T>::From方法是把paddle中的一维Tensor转为Eigen的一维Tensor,在这里用EigenVector来表示;而EigenVector<T>::Flatten方法是把paddle中的一个Tensor进行reshape操作,压扁成为Eigen的一维Tensor,类型仍然为EigenVector。
更多的转换方法请参考eigen_test.cc中的[单元测试](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/eigen_test.cc)
### 实现计算
当需要完成计算时,我们需要等式左边的EigenTensor调用device接口。在这里需要注意的是,这里的EigenTensor之间的运算只是改变了原有Tensor中的数据,而不会改变原有Tensor的shape信息。
```cpp
auto x = EigenVector<T>::Flatten(*input0);
auto y = EigenVector<T>::Flatten(*input1);
auto z = EigenVector<T>::Flatten(*output);
auto place = context.GetEigenDevice<Place>();
z.device(place) = x + y;
```
在这段代码中,input0/input1/output可以是任意维度的Tensor。我们调用了EigenVector的Flatten接口,把任意维度的Tensor转为了一维的EigenVector。而在计算结束之后,input0/input1/output的原有shape信息不变。如果想改变原有Tensor的shape信息,可以调用Resize接口进行改变。
由于Eigen Tensor模块的文档较少,我们可以参考TensorFlow的[kernels](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/kernels)模块下的相关`OpKernel`的计算代码。
...@@ -18,14 +18,6 @@ limitations under the License. */ ...@@ -18,14 +18,6 @@ limitations under the License. */
#ifndef __NVCC__ #ifndef __NVCC__
#include "paddle/math/MathFunctions.h"
#ifndef PADDLE_TYPE_DOUBLE
#define CBLAS_GEMM paddle::gemm<float>
#else
#define CBLAS_GEMM paddle::gemm<double>
#endif
template<class OpResetOutput> template<class OpResetOutput>
void hl_naive_gru_forward_reset_output(OpResetOutput opResetOutput, void hl_naive_gru_forward_reset_output(OpResetOutput opResetOutput,
real *gateValue, real *gateValue,
...@@ -210,51 +202,6 @@ inline void forward_final_output(OpFinalOutput opFinalOutput, ...@@ -210,51 +202,6 @@ inline void forward_final_output(OpFinalOutput opFinalOutput,
} }
} }
template<class OpResetOutput, class OpFinalOutput>
void hl_cpu_gru_forward(OpResetOutput opResetOutput,
OpFinalOutput opFinalOutput,
hl_gru_value value,
int frameSize,
int batchSize,
hl_activation_mode_t active_node,
hl_activation_mode_t active_gate) {
if (value.prevOutValue) {
CBLAS_GEMM(CblasNoTrans,
CblasNoTrans,
batchSize,
2 * frameSize,
frameSize,
1,
value.prevOutValue,
frameSize,
value.gateWeight,
frameSize * 2,
1,
value.gateValue,
frameSize * 3);
}
forward_reset_output(opResetOutput, value, frameSize, batchSize, active_gate);
if (value.prevOutValue) {
CBLAS_GEMM(CblasNoTrans,
CblasNoTrans,
batchSize,
frameSize,
frameSize,
1,
value.resetOutputValue,
frameSize,
value.stateWeight,
frameSize,
1,
value.gateValue + frameSize * 2,
frameSize * 3);
}
forward_final_output(opFinalOutput, value, frameSize, batchSize, active_node);
}
template<class OpStateGrad> template<class OpStateGrad>
void hl_naive_gru_backward_state_grad(OpStateGrad opStateGrad, void hl_naive_gru_backward_state_grad(OpStateGrad opStateGrad,
real *gateValue, real *gateValue,
...@@ -525,86 +472,6 @@ inline void backward_reset_grad(OpResetGrad opResetGrad, ...@@ -525,86 +472,6 @@ inline void backward_reset_grad(OpResetGrad opResetGrad,
} }
} }
template<class OpStateGrad, class OpResetGrad>
void hl_cpu_gru_backward(OpStateGrad opStateGrad,
OpResetGrad opResetGrad,
hl_gru_value value,
hl_gru_grad grad,
int frameSize,
int batchSize,
hl_activation_mode_t active_node,
hl_activation_mode_t active_gate) {
backward_state_grad(opStateGrad, value, grad,
frameSize, batchSize, active_node);
if (value.prevOutValue && grad.prevOutGrad) {
CBLAS_GEMM(CblasNoTrans,
CblasTrans,
batchSize,
frameSize,
frameSize,
1,
grad.gateGrad + frameSize * 2,
frameSize * 3,
value.stateWeight,
frameSize,
0,
grad.resetOutputGrad,
frameSize);
if (grad.stateWeightGrad) {
CBLAS_GEMM(CblasTrans,
CblasNoTrans,
frameSize,
frameSize,
batchSize,
1,
value.resetOutputValue,
frameSize,
grad.gateGrad + frameSize * 2,
frameSize * 3,
1,
grad.stateWeightGrad,
frameSize);
}
}
backward_reset_grad(opResetGrad, value, grad,
frameSize, batchSize, active_gate);
if (grad.prevOutGrad && value.prevOutValue) {
CBLAS_GEMM(CblasNoTrans,
CblasTrans,
batchSize,
frameSize,
frameSize * 2,
1,
grad.gateGrad,
frameSize * 3,
value.gateWeight,
frameSize * 2,
1,
grad.prevOutGrad,
frameSize);
if (grad.gateWeightGrad) {
CBLAS_GEMM(CblasTrans,
CblasNoTrans,
frameSize,
frameSize * 2,
batchSize,
1,
value.prevOutValue,
frameSize,
grad.gateGrad,
frameSize * 3,
1,
grad.gateWeightGrad,
frameSize * 2);
}
}
}
#endif #endif
#endif // HL_CPU_GRU_CUH_ #endif // HL_CPU_GRU_CUH_
...@@ -43,6 +43,10 @@ template <> ...@@ -43,6 +43,10 @@ template <>
AttrType AttrTypeID<std::vector<std::string>>() { AttrType AttrTypeID<std::vector<std::string>>() {
return STRINGS; return STRINGS;
} }
template <>
AttrType AttrTypeID<std::vector<std::pair<int, int>>>() {
return INT_PAIRS;
}
Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { Attribute GetAttrValue(const OpDesc::Attr& attr_desc) {
switch (attr_desc.type()) { switch (attr_desc.type()) {
...@@ -76,6 +80,14 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { ...@@ -76,6 +80,14 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc) {
} }
return val; return val;
} }
case paddle::framework::AttrType::INT_PAIRS: {
std::vector<std::pair<int, int>> val(attr_desc.int_pairs_size());
for (int i = 0; i < attr_desc.int_pairs_size(); ++i) {
val[i].first = attr_desc.int_pairs(i).first();
val[i].second = attr_desc.int_pairs(i).second();
}
return val;
}
} }
PADDLE_ENFORCE(false, "Unknown OpDesc::AttrDesc::type !"); PADDLE_ENFORCE(false, "Unknown OpDesc::AttrDesc::type !");
return boost::blank(); return boost::blank();
......
...@@ -28,7 +28,8 @@ namespace paddle { ...@@ -28,7 +28,8 @@ namespace paddle {
namespace framework { namespace framework {
typedef boost::variant<boost::blank, int, float, std::string, std::vector<int>, typedef boost::variant<boost::blank, int, float, std::string, std::vector<int>,
std::vector<float>, std::vector<std::string>> std::vector<float>, std::vector<std::string>,
std::vector<std::pair<int, int>>>
Attribute; Attribute;
typedef std::unordered_map<std::string, Attribute> AttributeMap; typedef std::unordered_map<std::string, Attribute> AttributeMap;
...@@ -40,11 +41,23 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc); ...@@ -40,11 +41,23 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc);
// check whether a value(attribute) fit a certain limit // check whether a value(attribute) fit a certain limit
template <typename T> template <typename T>
class LargerThanChecker { class GreaterThanChecker {
public: public:
explicit LargerThanChecker(T lower_bound) : lower_bound_(lower_bound) {} explicit GreaterThanChecker(T lower_bound) : lower_bound_(lower_bound) {}
void operator()(T& value) const { void operator()(T& value) const {
PADDLE_ENFORCE(value > lower_bound_, "larger_than check fail"); PADDLE_ENFORCE(value > lower_bound_, "larger_than check fails.");
}
private:
T lower_bound_;
};
template <typename T>
class EqualGreaterThanChecker {
public:
explicit EqualGreaterThanChecker(T lower_bound) : lower_bound_(lower_bound) {}
void operator()(T& value) const {
PADDLE_ENFORCE_GE(value, lower_bound_, "equal_larger_than check fails.");
} }
private: private:
...@@ -109,8 +122,13 @@ class TypedAttrChecker { ...@@ -109,8 +122,13 @@ class TypedAttrChecker {
return *this; return *this;
} }
TypedAttrChecker& LargerThan(const T& lower_bound) { TypedAttrChecker& GreaterThan(const T& lower_bound) {
value_checkers_.push_back(LargerThanChecker<T>(lower_bound)); value_checkers_.push_back(GreaterThanChecker<T>(lower_bound));
return *this;
}
TypedAttrChecker& EqualGreaterThan(const T& lower_bound) {
value_checkers_.push_back(EqualGreaterThanChecker<T>(lower_bound));
return *this; return *this;
} }
......
...@@ -2,20 +2,20 @@ ...@@ -2,20 +2,20 @@
## Motivation ## Motivation
In Neural Network, the backpropagation algorithm follows the chain rule, so we need to compound the fundmental gradient operators/expressions together with chain rule . Every forward network need a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. In Neural Network, the backpropagation algorithm follows the chain rule, so we need to compound the gradient operators/expressions together with the chain rule. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass.
## Backward Operator Registry ## Backward Operator Registry
A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs and output gradients and then calculate its input gradients. A backward network is built up with several backward operators. Backward operators take forward operators' inputs outputs, and output gradients and then calculate its input gradients.
| | forward operator | backward operator | | forward operator | backward operator
| ---------------------- | ---------------- |------------------------- | | ---------------------- | ---------------- |------------------------- |
| **Operator::inputs_** | Inputs | Inputs, Outputs, OutputGradients | | **Operator::inputs_** | Inputs | Inputs, Outputs, OutputGradients |
| **Operator::outputs_** | Outputs | InputGradients | | **Operator::outputs_** | Outputs | InputGradients |
In most cases, there is a one-to-one correspondence between forward and backward operators. These correspondences are recorded by a global hash map(`OpInfoMap`). To follow the philosophy of minimum core and make operators pluggable, the registry mechanism is introduced. In most cases, there is a one-to-one correspondence between the forward and backward operators. These correspondences are recorded by a global hash map(`OpInfoMap`). To follow the philosophy of minimum core and make operators pluggable, the registry mechanism is introduced.
For example, we have got a `mul_op`, and we can register it's information and corresponding backward operator by the following macro: For example, we have got a `mul_op`, and we can register its information and corresponding backward operator by the following macro:
```cpp ```cpp
REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad); REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad);
...@@ -27,17 +27,17 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad); ...@@ -27,17 +27,17 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad);
## Backward Opeartor Creating ## Backward Opeartor Creating
Given a certain forward operator, we can get its corresponding backward opeartor by calling: Given a certain forward operator, we can get its corresponding backward operator by calling:
```cpp ```cpp
OperatorBase* bwd_op = BuildGradOp(const OperatorBase* fwd_op); OperatorBase* bwd_op = BuildGradOp(const OperatorBase* fwd_op);
``` ```
The function `BuildGradOp` will sequentially execute following processes: The function `BuildGradOp` will sequentially execute following processes:
1. Get the `type_` of given forward operator, and then get the corresponding backward operator's type by looking up the `OpInfoMap`. 1. Get the `type_` of given forward operator, and then get the corresponding backward operator's type by looking up the `OpInfoMap`.
2. Build two maps named `inputs` and `outputs` to temporary storage backward operator's inputs and outputs. Copy forward operator's `inputs_` and `outputs_` to map `inputs`, except these are not necessary for gradient computing. 2. Build two maps named `inputs` and `outputs` to temporary storage backward operator's inputs and outputs. Copy forward operator's `inputs_` and `outputs_` to map `inputs`, except these, are not necessary for gradient computing.
3. Add forward inputs' gradient variables into map `output`, adding forward outputs' gradient variables into map `input`. 3. Add forward inputs' gradient variables into map `output`, adding forward outputs' gradient variables into map `input`.
...@@ -49,31 +49,31 @@ A backward network is a series of backward operators. The main idea of building ...@@ -49,31 +49,31 @@ A backward network is a series of backward operators. The main idea of building
In our design, the network itself is also a kind of operator. So the operators contained by a big network may be some small network. In our design, the network itself is also a kind of operator. So the operators contained by a big network may be some small network.
given a forward network, it generates the backward network. We only care about the Gradients—`OutputGradients`,`InputGradients`. given a forward network, it generates the backward network. We only care about the Gradients—`OutputGradients`, `InputGradients`.
1. Op 1. Op
when the input forward network is a Op, return its gradient Operator Immediately. when the input forward network is an Op, return its gradient Operator Immediately.
2. NetOp 2. NetOp
when the input forward network is a NetOp, it need to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to forward NetOp. when the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp.
**shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwirte their shared input variable. **shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their shared input variable.
<p align="center"> <p align="center">
<img src="./images/duplicate_op.png" width="70%" ><br/> <img src="./images/duplicate_op.png" width="50%" ><br/>
1. shared variable in two operators. 1. Shared variable in operators.
</p> </p>
Share variable between operators or same input variable used in multiple operators lead to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively, and add a generic add operator replace the overwirte links. Share variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator replace the overwrite links.
<p align="center"> <p align="center">
<img src="images/duplicate_op2.png" width="90%" ><br/> <img src="images/duplicate_op2.png" width="50%" ><br/>
2. replace shared variable gradient with `Add` Operator 2. Replace shared variable's gradient with `Add` operator.
</p> </p>
......
...@@ -21,16 +21,16 @@ namespace framework { ...@@ -21,16 +21,16 @@ namespace framework {
/// @cond HIDDEN /// @cond HIDDEN
template <int i> template <int i>
Dim<i> make_dim(const int* d) { Dim<i> make_dim(const int64_t* d) {
return Dim<i>(*d, make_dim<i - 1>(d + 1)); return Dim<i>(*d, make_dim<i - 1>(d + 1));
} }
template <> template <>
Dim<1> make_dim<1>(const int* d) { Dim<1> make_dim<1>(const int64_t* d) {
return Dim<1>(*d); return Dim<1>(*d);
} }
void make_ddim(DDim& ddim, const int* dims, int n) { void make_ddim(DDim& ddim, const int64_t* dims, int n) {
switch (n) { switch (n) {
case 1: case 1:
ddim = make_dim<1>(dims); ddim = make_dim<1>(dims);
...@@ -67,13 +67,13 @@ void make_ddim(DDim& ddim, const int* dims, int n) { ...@@ -67,13 +67,13 @@ void make_ddim(DDim& ddim, const int* dims, int n) {
/// @endcond /// @endcond
DDim make_ddim(std::initializer_list<int> dims) { DDim make_ddim(std::initializer_list<int64_t> dims) {
DDim result(make_dim(0)); DDim result(make_dim(0));
make_ddim(result, dims.begin(), dims.size()); make_ddim(result, dims.begin(), dims.size());
return result; return result;
} }
DDim make_ddim(const std::vector<int>& dims) { DDim make_ddim(const std::vector<int64_t>& dims) {
DDim result(make_dim(0)); DDim result(make_dim(0));
make_ddim(result, &dims[0], dims.size()); make_ddim(result, &dims[0], dims.size());
return result; return result;
...@@ -81,12 +81,12 @@ DDim make_ddim(const std::vector<int>& dims) { ...@@ -81,12 +81,12 @@ DDim make_ddim(const std::vector<int>& dims) {
/// @cond HIDDEN /// @cond HIDDEN
// XXX For some reason, putting this in an anonymous namespace causes errors // XXX For some reason, putting this in an anonymous namespace causes errors
class DynamicMutableIndexer : public boost::static_visitor<int&> { class DynamicMutableIndexer : public boost::static_visitor<int64_t&> {
public: public:
explicit DynamicMutableIndexer(int idx) : idx_(idx) {} explicit DynamicMutableIndexer(int idx) : idx_(idx) {}
template <int D> template <int D>
int& operator()(Dim<D>& dim) const { int64_t& operator()(Dim<D>& dim) const {
return dim[idx_]; return dim[idx_];
} }
...@@ -94,12 +94,12 @@ class DynamicMutableIndexer : public boost::static_visitor<int&> { ...@@ -94,12 +94,12 @@ class DynamicMutableIndexer : public boost::static_visitor<int&> {
int idx_; int idx_;
}; };
class DynamicConstIndexer : public boost::static_visitor<int> { class DynamicConstIndexer : public boost::static_visitor<int64_t> {
public: public:
explicit DynamicConstIndexer(int idx) : idx_(idx) {} explicit DynamicConstIndexer(int idx) : idx_(idx) {}
template <int D> template <int D>
int operator()(const Dim<D>& dim) const { int64_t operator()(const Dim<D>& dim) const {
return dim[idx_]; return dim[idx_];
} }
...@@ -109,22 +109,22 @@ class DynamicConstIndexer : public boost::static_visitor<int> { ...@@ -109,22 +109,22 @@ class DynamicConstIndexer : public boost::static_visitor<int> {
/// @endcond /// @endcond
int& DDim::operator[](int idx) { int64_t& DDim::operator[](int idx) {
return boost::apply_visitor(DynamicMutableIndexer(idx), var); return boost::apply_visitor(DynamicMutableIndexer(idx), var);
} }
int DDim::operator[](int idx) const { int64_t DDim::operator[](int idx) const {
return boost::apply_visitor(DynamicConstIndexer(idx), var); return boost::apply_visitor(DynamicConstIndexer(idx), var);
} }
ssize_t DDim::size() const { return arity(*this); } int64_t DDim::size() const { return arity(*this); }
bool DDim::operator==(DDim d) const { bool DDim::operator==(DDim d) const {
if (var.which() != d.getVar().which()) { if (var.which() != d.getVar().which()) {
return false; return false;
} else { } else {
std::vector<int> v1 = vectorize(*this); std::vector<int64_t> v1 = vectorize(*this);
std::vector<int> v2 = vectorize(d); std::vector<int64_t> v2 = vectorize(d);
for (unsigned int i = 0; i < v1.size(); i++) { for (unsigned int i = 0; i < v1.size(); i++) {
if (v1[i] != v2[i]) { if (v1[i] != v2[i]) {
...@@ -139,10 +139,10 @@ bool DDim::operator==(DDim d) const { ...@@ -139,10 +139,10 @@ bool DDim::operator==(DDim d) const {
bool DDim::operator!=(DDim d) const { return !(*this == d); } bool DDim::operator!=(DDim d) const { return !(*this == d); }
DDim DDim::operator+(DDim d) const { DDim DDim::operator+(DDim d) const {
std::vector<int> v1 = vectorize(*this); std::vector<int64_t> v1 = vectorize(*this);
std::vector<int> v2 = vectorize(d); std::vector<int64_t> v2 = vectorize(d);
std::vector<int> v3; std::vector<int64_t> v3;
assert(v1.size() == v2.size()); assert(v1.size() == v2.size());
...@@ -154,10 +154,10 @@ DDim DDim::operator+(DDim d) const { ...@@ -154,10 +154,10 @@ DDim DDim::operator+(DDim d) const {
} }
DDim DDim::operator*(DDim d) const { DDim DDim::operator*(DDim d) const {
std::vector<int> v1 = vectorize(*this); std::vector<int64_t> v1 = vectorize(*this);
std::vector<int> v2 = vectorize(d); std::vector<int64_t> v2 = vectorize(d);
std::vector<int> v3; std::vector<int64_t> v3;
assert(v1.size() == v2.size()); assert(v1.size() == v2.size());
...@@ -168,15 +168,15 @@ DDim DDim::operator*(DDim d) const { ...@@ -168,15 +168,15 @@ DDim DDim::operator*(DDim d) const {
return make_ddim(v3); return make_ddim(v3);
} }
int get(const DDim& ddim, int idx) { return ddim[idx]; } int64_t get(const DDim& ddim, int idx) { return ddim[idx]; }
void set(DDim& ddim, int idx, int value) { ddim[idx] = value; } void set(DDim& ddim, int idx, int value) { ddim[idx] = value; }
/// @cond HIDDEN /// @cond HIDDEN
struct VectorizeVisitor : public boost::static_visitor<> { struct VectorizeVisitor : public boost::static_visitor<> {
std::vector<int>& vector; std::vector<int64_t>& vector;
explicit VectorizeVisitor(std::vector<int>& v) : vector(v) {} explicit VectorizeVisitor(std::vector<int64_t>& v) : vector(v) {}
template <typename T> template <typename T>
void operator()(const T& t) { void operator()(const T& t) {
...@@ -188,31 +188,31 @@ struct VectorizeVisitor : public boost::static_visitor<> { ...@@ -188,31 +188,31 @@ struct VectorizeVisitor : public boost::static_visitor<> {
}; };
/// @endcond /// @endcond
std::vector<int> vectorize(const DDim& ddim) { std::vector<int64_t> vectorize(const DDim& ddim) {
std::vector<int> result; std::vector<int64_t> result;
VectorizeVisitor visitor(result); VectorizeVisitor visitor(result);
boost::apply_visitor(visitor, ddim); boost::apply_visitor(visitor, ddim);
return result; return result;
} }
struct ProductVisitor : public boost::static_visitor<ssize_t> { struct ProductVisitor : public boost::static_visitor<int64_t> {
template <int D> template <int D>
ssize_t operator()(const Dim<D>& dim) { int64_t operator()(const Dim<D>& dim) {
return product(dim); return product(dim);
} }
}; };
ssize_t product(const DDim& ddim) { int64_t product(const DDim& ddim) {
ProductVisitor visitor; ProductVisitor visitor;
return boost::apply_visitor(visitor, ddim); return boost::apply_visitor(visitor, ddim);
} }
struct SliceVectorizeVisitor : public boost::static_visitor<> { struct SliceVectorizeVisitor : public boost::static_visitor<> {
std::vector<int>& vector; std::vector<int64_t>& vector;
int begin; int begin;
int end; int end;
SliceVectorizeVisitor(std::vector<int>& v, int b, int e) SliceVectorizeVisitor(std::vector<int64_t>& v, int b, int e)
: vector(v), begin(b), end(e) { : vector(v), begin(b), end(e) {
PADDLE_ENFORCE(begin < end, PADDLE_ENFORCE(begin < end,
"Begin index must be less than end index in ddim slice."); "Begin index must be less than end index in ddim slice.");
...@@ -240,7 +240,7 @@ struct SliceVectorizeVisitor : public boost::static_visitor<> { ...@@ -240,7 +240,7 @@ struct SliceVectorizeVisitor : public boost::static_visitor<> {
}; };
DDim slice_ddim(const DDim& dim, int begin, int end) { DDim slice_ddim(const DDim& dim, int begin, int end) {
std::vector<int> vec; std::vector<int64_t> vec;
vec.reserve(end - begin); vec.reserve(end - begin);
SliceVectorizeVisitor visitor(vec, begin, end); SliceVectorizeVisitor visitor(vec, begin, end);
boost::apply_visitor(visitor, dim); boost::apply_visitor(visitor, dim);
...@@ -280,8 +280,17 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) { ...@@ -280,8 +280,17 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) {
return os; return os;
} }
DDim::DDim(std::initializer_list<int> init_list) { DDim::DDim(std::initializer_list<int64_t> init_list) {
*this = make_ddim(init_list); *this = make_ddim(init_list);
} }
DDim flatten_to_2d(const DDim& src, int num_col_dims) {
int rank = src.size();
return make_ddim({product(slice_ddim(src, 0, num_col_dims)),
product(slice_ddim(src, num_col_dims, rank))});
}
DDim flatten_to_1d(const DDim& src) { return make_ddim({product(src)}); }
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -40,7 +40,7 @@ struct DDim { ...@@ -40,7 +40,7 @@ struct DDim {
template <int D> template <int D>
explicit DDim(const Dim<D>& in) : var(in) {} explicit DDim(const Dim<D>& in) : var(in) {}
/*implicit*/ DDim(std::initializer_list<int> init_list); /*implicit*/ DDim(std::initializer_list<int64_t> init_list);
template <int D> template <int D>
DDim& operator=(const Dim<D>& in) { DDim& operator=(const Dim<D>& in) {
...@@ -48,8 +48,8 @@ struct DDim { ...@@ -48,8 +48,8 @@ struct DDim {
return *this; return *this;
} }
int& operator[](int idx); int64_t& operator[](int idx);
int operator[](int idx) const; int64_t operator[](int idx) const;
template <typename Visitor> template <typename Visitor>
typename Visitor::result_type apply_visitor(Visitor& visitor) { typename Visitor::result_type apply_visitor(Visitor& visitor) {
...@@ -71,15 +71,15 @@ struct DDim { ...@@ -71,15 +71,15 @@ struct DDim {
DDim operator*(DDim d) const; DDim operator*(DDim d) const;
ssize_t size() const; int64_t size() const;
}; };
/** /**
* \brief Make a DDim from std::vector<int> * \brief Make a DDim from std::vector<int64_t>
* *
* \param dims An vector of ints. Must be sized between [1, 9] * \param dims An vector of ints. Must be sized between [1, 9]
*/ */
DDim make_ddim(const std::vector<int>& dims); DDim make_ddim(const std::vector<int64_t>& dims);
/** /**
* \brief Make a DDim from an initializer list * \brief Make a DDim from an initializer list
...@@ -87,14 +87,14 @@ DDim make_ddim(const std::vector<int>& dims); ...@@ -87,14 +87,14 @@ DDim make_ddim(const std::vector<int>& dims);
* \param dims An initializer list of ints. Must be sized between [1, 9] * \param dims An initializer list of ints. Must be sized between [1, 9]
* *
*/ */
DDim make_ddim(std::initializer_list<int> dims); DDim make_ddim(std::initializer_list<int64_t> dims);
int get(const DDim& dim, int idx); int64_t get(const DDim& dim, int idx);
void set(DDim& dim, int idx, int val); void set(DDim& dim, int idx, int val);
std::vector<int> vectorize(const DDim& ddim); std::vector<int64_t> vectorize(const DDim& ddim);
ssize_t product(const DDim& ddim); int64_t product(const DDim& ddim);
/** /**
* \brief Slice a ddim * \brief Slice a ddim
...@@ -115,6 +115,12 @@ int arity(const DDim& ddim); ...@@ -115,6 +115,12 @@ int arity(const DDim& ddim);
std::ostream& operator<<(std::ostream&, const DDim&); std::ostream& operator<<(std::ostream&, const DDim&);
// Reshape a tensor to a matrix. The matrix's first dimension(column length)
// will be the product of tensor's first `num_col_dims` dimensions.
DDim flatten_to_2d(const DDim& src, int num_col_dims);
DDim flatten_to_1d(const DDim& src);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
......
...@@ -12,7 +12,7 @@ TEST(DDim, Equality) { ...@@ -12,7 +12,7 @@ TEST(DDim, Equality) {
EXPECT_EQ(ddim[2], 5); EXPECT_EQ(ddim[2], 5);
// construct a DDim from a vector // construct a DDim from a vector
std::vector<int> vec({9, 1, 5}); std::vector<int64_t> vec({9, 1, 5});
paddle::framework::DDim vddim = paddle::framework::make_ddim(vec); paddle::framework::DDim vddim = paddle::framework::make_ddim(vec);
EXPECT_EQ(ddim[0], 9); EXPECT_EQ(ddim[0], 9);
EXPECT_EQ(ddim[1], 1); EXPECT_EQ(ddim[1], 1);
...@@ -25,7 +25,7 @@ TEST(DDim, Equality) { ...@@ -25,7 +25,7 @@ TEST(DDim, Equality) {
EXPECT_EQ(paddle::framework::get(ddim, 0), 6); EXPECT_EQ(paddle::framework::get(ddim, 0), 6);
// vectorize a DDim // vectorize a DDim
std::vector<int> res_vec = paddle::framework::vectorize(vddim); std::vector<int64_t> res_vec = paddle::framework::vectorize(vddim);
EXPECT_EQ(res_vec[0], 9); EXPECT_EQ(res_vec[0], 9);
EXPECT_EQ(res_vec[1], 1); EXPECT_EQ(res_vec[1], 1);
EXPECT_EQ(res_vec[2], 5); EXPECT_EQ(res_vec[2], 5);
......
...@@ -17,13 +17,13 @@ struct Dim { ...@@ -17,13 +17,13 @@ struct Dim {
static constexpr int dimensions = i; static constexpr int dimensions = i;
template <typename... Args> template <typename... Args>
HOSTDEVICE Dim(int _head, Args... _tail) : head(_head), tail(_tail...) { HOSTDEVICE Dim(int64_t _head, Args... _tail) : head(_head), tail(_tail...) {
static_assert(sizeof...(_tail) == i - 1, static_assert(sizeof...(_tail) == i - 1,
"Dim initialized with the wrong number of parameters"); "Dim initialized with the wrong number of parameters");
} }
HOSTDEVICE HOSTDEVICE
Dim(int _head, const Dim<i - 1>& _tail) : head(_head), tail(_tail) {} Dim(int64_t _head, const Dim<i - 1>& _tail) : head(_head), tail(_tail) {}
HOSTDEVICE HOSTDEVICE
Dim() : head(0), tail() {} Dim() : head(0), tail() {}
...@@ -31,12 +31,12 @@ struct Dim { ...@@ -31,12 +31,12 @@ struct Dim {
/** Construct a Dim from a linear index and size. Uses Fortran order /** Construct a Dim from a linear index and size. Uses Fortran order
* indexing. */ * indexing. */
HOSTDEVICE HOSTDEVICE
Dim(int idx, const Dim<i>& size) Dim(int64_t idx, const Dim<i>& size)
: head(idx % size.head), tail(idx / size.head, size.tail) {} : head(idx % size.head), tail(idx / size.head, size.tail) {}
/** Construct a Dim with each dimension set to the given index */ /** Construct a Dim with each dimension set to the given index */
HOSTDEVICE HOSTDEVICE
Dim(int idx) : head(idx), tail(idx) {} Dim(int64_t idx) : head(idx), tail(idx) {}
HOSTDEVICE HOSTDEVICE
bool operator==(const Dim<i>& o) const { bool operator==(const Dim<i>& o) const {
...@@ -47,13 +47,13 @@ struct Dim { ...@@ -47,13 +47,13 @@ struct Dim {
bool operator!=(const Dim<i>& o) const { return !(*this == o); } bool operator!=(const Dim<i>& o) const { return !(*this == o); }
HOSTDEVICE HOSTDEVICE
int& operator[](int idx); int64_t& operator[](int idx);
HOSTDEVICE HOSTDEVICE
int operator[](int idx) const; int64_t operator[](int idx) const;
HOST std::string to_string() const; HOST std::string to_string() const;
int head; int64_t head;
Dim<i - 1> tail; Dim<i - 1> tail;
}; };
...@@ -63,7 +63,7 @@ struct Dim<1> { ...@@ -63,7 +63,7 @@ struct Dim<1> {
static constexpr int dimensions = 1; static constexpr int dimensions = 1;
HOSTDEVICE HOSTDEVICE
Dim(int _head) : head(_head) {} Dim(int64_t _head) : head(_head) {}
HOSTDEVICE HOSTDEVICE
Dim() : head(0) {} Dim() : head(0) {}
...@@ -86,11 +86,11 @@ struct Dim<1> { ...@@ -86,11 +86,11 @@ struct Dim<1> {
bool operator!=(const Dim<1>& o) const { return !(*this == o); } bool operator!=(const Dim<1>& o) const { return !(*this == o); }
HOSTDEVICE HOSTDEVICE
int& operator[](int idx); int64_t& operator[](int idx);
HOSTDEVICE HOSTDEVICE
int operator[](int idx) const; int64_t operator[](int idx) const;
int head; int64_t head;
}; };
namespace { namespace {
...@@ -100,12 +100,12 @@ template <int i> ...@@ -100,12 +100,12 @@ template <int i>
struct DimGetter { struct DimGetter {
// Return a copy if Dim is const // Return a copy if Dim is const
template <typename D> template <typename D>
HOSTDEVICE static int impl(const D& d) { HOSTDEVICE static int64_t impl(const D& d) {
return DimGetter<i - 1>::impl(d.tail); return DimGetter<i - 1>::impl(d.tail);
} }
// Return a reference if Dim is mutable // Return a reference if Dim is mutable
template <typename D> template <typename D>
HOSTDEVICE static int& impl(D& d) { HOSTDEVICE static int64_t& impl(D& d) {
return DimGetter<i - 1>::impl(d.tail); return DimGetter<i - 1>::impl(d.tail);
} }
}; };
...@@ -115,18 +115,18 @@ template <> ...@@ -115,18 +115,18 @@ template <>
struct DimGetter<0> { struct DimGetter<0> {
// Return a copy if Dim is const // Return a copy if Dim is const
template <typename D> template <typename D>
HOSTDEVICE static int impl(const D& d) { HOSTDEVICE static int64_t impl(const D& d) {
return d.head; return d.head;
} }
// Return a reference if Dim is mutable // Return a reference if Dim is mutable
template <typename D> template <typename D>
HOSTDEVICE static int& impl(D& d) { HOSTDEVICE static int64_t& impl(D& d) {
return d.head; return d.head;
} }
}; };
template <int D> template <int D>
HOSTDEVICE int& indexer(Dim<D>& dim, int idx) { HOSTDEVICE int64_t& indexer(Dim<D>& dim, int idx) {
#ifndef __CUDA_ARCH__ #ifndef __CUDA_ARCH__
if (idx < 0) { if (idx < 0) {
throw std::invalid_argument("Tried to access a negative dimension"); throw std::invalid_argument("Tried to access a negative dimension");
...@@ -141,7 +141,7 @@ HOSTDEVICE int& indexer(Dim<D>& dim, int idx) { ...@@ -141,7 +141,7 @@ HOSTDEVICE int& indexer(Dim<D>& dim, int idx) {
} }
template <> template <>
HOSTDEVICE int& indexer<1>(Dim<1>& dim, int idx) { HOSTDEVICE int64_t& indexer<1>(Dim<1>& dim, int idx) {
#ifndef __CUDA_ARCH__ #ifndef __CUDA_ARCH__
if (idx != 0) { if (idx != 0) {
throw std::invalid_argument("Invalid index"); throw std::invalid_argument("Invalid index");
...@@ -153,7 +153,7 @@ HOSTDEVICE int& indexer<1>(Dim<1>& dim, int idx) { ...@@ -153,7 +153,7 @@ HOSTDEVICE int& indexer<1>(Dim<1>& dim, int idx) {
} }
template <int D> template <int D>
HOSTDEVICE int indexer(const Dim<D>& dim, int idx) { HOSTDEVICE int64_t indexer(const Dim<D>& dim, int idx) {
#ifndef __CUDA_ARCH__ #ifndef __CUDA_ARCH__
if (idx < 0) { if (idx < 0) {
throw std::invalid_argument("Tried to access a negative dimension"); throw std::invalid_argument("Tried to access a negative dimension");
...@@ -168,7 +168,7 @@ HOSTDEVICE int indexer(const Dim<D>& dim, int idx) { ...@@ -168,7 +168,7 @@ HOSTDEVICE int indexer(const Dim<D>& dim, int idx) {
} }
template <> template <>
HOSTDEVICE int indexer<1>(const Dim<1>& dim, int idx) { HOSTDEVICE int64_t indexer<1>(const Dim<1>& dim, int idx) {
#ifndef __CUDA_ARCH__ #ifndef __CUDA_ARCH__
if (idx != 0) { if (idx != 0) {
throw std::invalid_argument("Invalid index"); throw std::invalid_argument("Invalid index");
...@@ -182,73 +182,76 @@ HOSTDEVICE int indexer<1>(const Dim<1>& dim, int idx) { ...@@ -182,73 +182,76 @@ HOSTDEVICE int indexer<1>(const Dim<1>& dim, int idx) {
} // namespace } // namespace
// Static access to constant Dim // Static access to constant Dim
template <int i, int l> template <int i, int l>
HOSTDEVICE int get(const Dim<l>& d) { HOSTDEVICE int64_t get(const Dim<l>& d) {
return DimGetter<i>::impl(d); return DimGetter<i>::impl(d);
} }
// Static access to mutable Dim // Static access to mutable Dim
template <int i, int l> template <int i, int l>
HOSTDEVICE int& get(Dim<l>& d) { HOSTDEVICE int64_t& get(Dim<l>& d) {
return DimGetter<i>::impl(d); return DimGetter<i>::impl(d);
} }
// Dynamic access to constant Dim // Dynamic access to constant Dim
template <int l> template <int l>
HOSTDEVICE int Dim<l>::operator[](int i) const { HOSTDEVICE int64_t Dim<l>::operator[](int i) const {
return indexer(*this, i); return indexer(*this, i);
} }
// Dynamic access to mutable Dim // Dynamic access to mutable Dim
template <int l> template <int l>
HOSTDEVICE int& Dim<l>::operator[](int i) { HOSTDEVICE int64_t& Dim<l>::operator[](int i) {
return indexer(*this, i); return indexer(*this, i);
} }
// Dynamic access to constant Dim // Dynamic access to constant Dim
inline HOSTDEVICE int Dim<1>::operator[](int i) const { inline HOSTDEVICE int64_t Dim<1>::operator[](int i) const {
return indexer(*this, i); return indexer(*this, i);
} }
// Dynamic access to mutable Dim // Dynamic access to mutable Dim
inline HOSTDEVICE int& Dim<1>::operator[](int i) { return indexer(*this, i); } inline HOSTDEVICE int64_t& Dim<1>::operator[](int i) {
return indexer(*this, i);
}
// Dynamic access to constant Dim // Dynamic access to constant Dim
// without std::enable_if will try to instantiate this on get<0>(d) // without std::enable_if will try to instantiate this on get<0>(d)
template <int l> template <int l>
HOSTDEVICE typename std::enable_if<(l > 0), int>::type get(const Dim<l>& d, HOSTDEVICE typename std::enable_if<(l > 0), int64_t>::type get(const Dim<l>& d,
int i) { int i) {
return d[i]; return d[i];
} }
// Dynamic access to mutable Dim // Dynamic access to mutable Dim
template <int l> template <int l>
HOSTDEVICE typename std::enable_if<(l > 0), int&>::type get(Dim<l>& d, int i) { HOSTDEVICE typename std::enable_if<(l > 0), int64_t&>::type get(Dim<l>& d,
int i) {
return d[i]; return d[i];
} }
// Dot product of two dims // Dot product of two dims
template <int i> template <int i>
HOSTDEVICE int linearize(const Dim<i>& a, const Dim<i>& b) { HOSTDEVICE int64_t linearize(const Dim<i>& a, const Dim<i>& b) {
return a.head * b.head + linearize(a.tail, b.tail); return a.head * b.head + linearize(a.tail, b.tail);
} }
// Base case dot product of two Dims // Base case dot product of two Dims
// Notice it is inline because it is no longer a template // Notice it is inline because it is no longer a template
template <> template <>
HOSTDEVICE inline int linearize(const Dim<1>& a, const Dim<1>& b) { HOSTDEVICE inline int64_t linearize(const Dim<1>& a, const Dim<1>& b) {
return a.head * b.head; return a.head * b.head;
} }
// Product of a Dim // Product of a Dim
template <int i> template <int i>
HOSTDEVICE int product(const Dim<i>& a, int prod = 1) { HOSTDEVICE int64_t product(const Dim<i>& a, int prod = 1) {
return prod * a.head * product(a.tail); return prod * a.head * product(a.tail);
} }
// Base case product of a Dim // Base case product of a Dim
// Notice it is inline because it is no longer a template // Notice it is inline because it is no longer a template
template <> template <>
HOSTDEVICE inline int product(const Dim<1>& a, int prod) { HOSTDEVICE inline int64_t product(const Dim<1>& a, int prod) {
return prod * a.head; return prod * a.head;
} }
......
...@@ -8,7 +8,7 @@ __global__ void test(paddle::framework::Dim<2>* o) { ...@@ -8,7 +8,7 @@ __global__ void test(paddle::framework::Dim<2>* o) {
o[0] = paddle::framework::make_dim(5, 6); o[0] = paddle::framework::make_dim(5, 6);
} }
__global__ void dyn_idx_gpu(int* o) { __global__ void dyn_idx_gpu(int64_t* o) {
auto d = paddle::framework::make_dim(5, 6); auto d = paddle::framework::make_dim(5, 6);
o[0] = d[1]; o[0] = d[1];
} }
...@@ -47,9 +47,9 @@ TEST(Dim, Equality) { ...@@ -47,9 +47,9 @@ TEST(Dim, Equality) {
EXPECT_EQ(b[1], 11); EXPECT_EQ(b[1], 11);
// dynamic access on GPU // dynamic access on GPU
thrust::device_vector<int> r(1); thrust::device_vector<int64_t> r(1);
dyn_idx_gpu<<<1, 1>>>(thrust::raw_pointer_cast(r.data())); dyn_idx_gpu<<<1, 1>>>(thrust::raw_pointer_cast(r.data()));
int res = r[0]; int64_t res = r[0];
EXPECT_EQ(res, 6); EXPECT_EQ(res, 6);
// ex_prefix_mul // ex_prefix_mul
......
...@@ -28,7 +28,7 @@ struct EigenDim { ...@@ -28,7 +28,7 @@ struct EigenDim {
static Type From(const DDim& dims) { static Type From(const DDim& dims) {
PADDLE_ENFORCE(arity(dims) == D, "D must match arity(DDim)"); PADDLE_ENFORCE(arity(dims) == D, "D must match arity(DDim)");
Type ret; Type ret;
for (int d = 0; d < arity(dims); d++) { for (int64_t d = 0; d < arity(dims); d++) {
ret[d] = dims[d]; ret[d] = dims[d];
} }
return ret; return ret;
...@@ -63,20 +63,35 @@ struct EigenTensor { ...@@ -63,20 +63,35 @@ struct EigenTensor {
template <typename T, int MajorType = Eigen::RowMajor, template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex> typename IndexType = Eigen::DenseIndex>
struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> {}; struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> {
static typename EigenMatrix::Type Reshape(Tensor& tensor, int num_col_dims) {
int rank = tensor.dims_.size();
PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
"`num_col_dims` must be between (0, rank_of_tensor).");
return EigenMatrix::From(tensor,
flatten_to_2d(tensor.dims(), num_col_dims));
}
static typename EigenMatrix::ConstType Reshape(const Tensor& tensor,
int num_col_dims) {
int rank = tensor.dims_.size();
PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
"`num_col_dims` must be between (0, rank_of_tensor).");
return EigenMatrix::From(tensor,
flatten_to_2d(tensor.dims(), num_col_dims));
}
};
template <typename T, int MajorType = Eigen::RowMajor, template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex> typename IndexType = Eigen::DenseIndex>
struct EigenVector : public EigenTensor<T, 1, MajorType, IndexType> { struct EigenVector : public EigenTensor<T, 1, MajorType, IndexType> {
// Flatten reshapes a Tensor into an EigenVector. // Flatten reshapes a Tensor into an EigenVector.
static typename EigenVector::Type Flatten(Tensor& tensor) { static typename EigenVector::Type Flatten(Tensor& tensor) {
return EigenVector::From( return EigenVector::From(tensor, {product(tensor.dims_)});
tensor, make_ddim({static_cast<int>(product(tensor.dims_))}));
} }
static typename EigenVector::ConstType Flatten(const Tensor& tensor) { static typename EigenVector::ConstType Flatten(const Tensor& tensor) {
return EigenVector::From( return EigenVector::From(tensor, {product(tensor.dims_)});
tensor, make_ddim({static_cast<int>(product(tensor.dims_))}));
} }
}; };
......
...@@ -108,5 +108,24 @@ TEST(Eigen, Matrix) { ...@@ -108,5 +108,24 @@ TEST(Eigen, Matrix) {
} }
} }
TEST(Eigen, MatrixReshape) {
Tensor t;
float* p = t.mutable_data<float>({2, 3, 6, 4}, platform::CPUPlace());
for (int i = 0; i < 2 * 3 * 6 * 4; ++i) {
p[i] = static_cast<float>(i);
}
EigenMatrix<float>::Type em = EigenMatrix<float>::Reshape(t, 2);
ASSERT_EQ(2 * 3, em.dimension(0));
ASSERT_EQ(6 * 4, em.dimension(1));
for (int i = 0; i < 2 * 3; i++) {
for (int j = 0; j < 6 * 4; j++) {
ASSERT_NEAR(i * 6 * 4 + j, em(i, j), 1e-6f);
}
}
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -22,8 +22,14 @@ enum AttrType { ...@@ -22,8 +22,14 @@ enum AttrType {
INTS = 3; INTS = 3;
FLOATS = 4; FLOATS = 4;
STRINGS = 5; STRINGS = 5;
INT_PAIRS = 6;
} }
message IntPair {
required int32 first = 1;
required int32 second = 2;
};
// OpDesc describes an instance of a C++ framework::OperatorBase // OpDesc describes an instance of a C++ framework::OperatorBase
// derived class type. // derived class type.
message OpDesc { message OpDesc {
...@@ -37,6 +43,7 @@ message OpDesc { ...@@ -37,6 +43,7 @@ message OpDesc {
repeated int32 ints = 6; repeated int32 ints = 6;
repeated float floats = 7; repeated float floats = 7;
repeated string strings = 8; repeated string strings = 8;
repeated IntPair int_pairs = 9;
}; };
message Var { message Var {
...@@ -80,3 +87,24 @@ message OpProto { ...@@ -80,3 +87,24 @@ message OpProto {
repeated Attr attrs = 4; repeated Attr attrs = 4;
required string comment = 5; required string comment = 5;
} }
enum DataType {
BOOL = 0;
INT16 = 1;
INT32 = 2;
INT64 = 3;
FP16 = 4;
FP32 = 5;
FP64 = 6;
}
message LoDTensorDesc {
required DataType data_type = 1;
repeated int32 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480]
optional int32 lod_level = 3 [ default = 0 ];
}
message VarDesc {
required string name = 1;
optional LoDTensorDesc lod_tensor = 2;
}
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h" #include "paddle/framework/operator.h"
USE_OP(add_two); USE_OP(add);
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -41,7 +41,7 @@ namespace f = paddle::framework; ...@@ -41,7 +41,7 @@ namespace f = paddle::framework;
TEST(GradOpBuilder, AddTwo) { TEST(GradOpBuilder, AddTwo) {
std::shared_ptr<f::OperatorBase> add_op(f::OpRegistry::CreateOp( std::shared_ptr<f::OperatorBase> add_op(f::OpRegistry::CreateOp(
"add_two", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); "add", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {}));
std::shared_ptr<f::OperatorBase> grad_add_op = std::shared_ptr<f::OperatorBase> grad_add_op =
f::OpRegistry::CreateGradOp(*add_op); f::OpRegistry::CreateGradOp(*add_op);
EXPECT_EQ(grad_add_op->Inputs().size(), 4UL); EXPECT_EQ(grad_add_op->Inputs().size(), 4UL);
......
...@@ -19,8 +19,8 @@ ...@@ -19,8 +19,8 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end) { LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end) {
LOD new_lod; LoD new_lod;
new_lod.reserve(level_end - level_begin); new_lod.reserve(level_end - level_begin);
for (size_t i = level_begin; i < level_end; i++) { for (size_t i = level_begin; i < level_end; i++) {
new_lod.emplace_back(in.at(i)); new_lod.emplace_back(in.at(i));
...@@ -28,10 +28,10 @@ LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end) { ...@@ -28,10 +28,10 @@ LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end) {
return new_lod; return new_lod;
} }
LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin, LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin,
size_t elem_end) { size_t elem_end) {
// slice the lod. // slice the lod.
LOD new_lod; LoD new_lod;
new_lod.reserve(in.size() - level); new_lod.reserve(in.size() - level);
auto start = in.at(level)[elem_begin]; auto start = in.at(level)[elem_begin];
auto end = in.at(level)[elem_end]; auto end = in.at(level)[elem_end];
...@@ -46,13 +46,13 @@ LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin, ...@@ -46,13 +46,13 @@ LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin,
std::transform(new_lod.back().begin(), new_lod.back().end(), std::transform(new_lod.back().begin(), new_lod.back().end(),
new_lod.back().begin(), new_lod.back().begin(),
[start](int v) { return v - start; }); [start](int v) { return v - start; });
PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LOD"); PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LoD");
} }
PADDLE_ENFORCE_LE(new_lod.size(), in.size()); PADDLE_ENFORCE_LE(new_lod.size(), in.size());
return new_lod; return new_lod;
} }
bool operator==(const LOD& a, const LOD& b) { bool operator==(const LoD& a, const LoD& b) {
if (a.size() != b.size()) { if (a.size() != b.size()) {
return false; return false;
} }
...@@ -72,12 +72,12 @@ bool operator==(const LOD& a, const LOD& b) { ...@@ -72,12 +72,12 @@ bool operator==(const LOD& a, const LOD& b) {
return true; return true;
} }
void LODTensor::SliceLevels(size_t level_begin, size_t level_end) { void LoDTensor::SliceLevels(size_t level_begin, size_t level_end) {
auto new_lod = framework::SliceLevels(lod_, level_begin, level_end); auto new_lod = framework::SliceLevels(lod_, level_begin, level_end);
lod_ = new_lod; lod_ = new_lod;
} }
void LODTensor::SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) { void LoDTensor::SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
NumLevels()); NumLevels());
PADDLE_ENFORCE(elem_begin < NumElements(level), PADDLE_ENFORCE(elem_begin < NumElements(level),
......
...@@ -35,34 +35,34 @@ template <typename T> ...@@ -35,34 +35,34 @@ template <typename T>
using Vector = thrust::host_vector<T>; using Vector = thrust::host_vector<T>;
#endif #endif
using LOD = std::vector<Vector<size_t>>; using LoD = std::vector<Vector<size_t>>;
LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end); LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end);
LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin, LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin,
size_t elem_end); size_t elem_end);
bool operator==(const LOD& a, const LOD& b); bool operator==(const LoD& a, const LoD& b);
/* /*
* LODTensor (Level of details Tensor) * LoDTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference. * see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/ */
class LODTensor { class LoDTensor {
public: public:
LODTensor() {} LoDTensor() {}
LODTensor(const LOD& lod, Tensor* t) : lod_(lod), tensor_(t) {} LoDTensor(const LoD& lod, Tensor* t) : lod_(lod), tensor_(t) {}
void set_lod(const LOD& lod) { lod_ = lod; } void set_lod(const LoD& lod) { lod_ = lod; }
void set_tensor(Tensor* tensor) { tensor_ = tensor; } void set_tensor(Tensor* tensor) { tensor_ = tensor; }
Tensor& tensor() { return *tensor_; } Tensor& tensor() { return *tensor_; }
LOD lod() { return lod_; } LoD lod() { return lod_; }
/* /*
* Get a element from LOD. * Get a element from LoD.
*/ */
size_t lod_element(size_t level, size_t elem) const { size_t lod_element(size_t level, size_t elem) const {
PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level,
...@@ -74,7 +74,7 @@ class LODTensor { ...@@ -74,7 +74,7 @@ class LODTensor {
} }
/* /*
* Number of LODTensor's levels, each level has units of data, for example, * Number of LoDTensor's levels, each level has units of data, for example,
* in the sentence's view, article, paragraph, sentence are 3 levels. * in the sentence's view, article, paragraph, sentence are 3 levels.
*/ */
size_t NumLevels() const { return lod_.size(); } size_t NumLevels() const { return lod_.size(); }
...@@ -100,7 +100,7 @@ class LODTensor { ...@@ -100,7 +100,7 @@ class LODTensor {
void SliceInLevel(size_t level, size_t elem_begin, size_t elem_end); void SliceInLevel(size_t level, size_t elem_begin, size_t elem_end);
private: private:
LOD lod_; LoD lod_;
Tensor* tensor_; // not owned Tensor* tensor_; // not owned
}; };
} // namespace framework } // namespace framework
......
...@@ -94,7 +94,7 @@ Let's go on slicing this slice. Its <1,1>-slice is ...@@ -94,7 +94,7 @@ Let's go on slicing this slice. Its <1,1>-slice is
||| |||
``` ```
### The General Slicing Algorithm ### The Slicing Algorithm
The algorithm, with over-simplified data structure, is defined as The algorithm, with over-simplified data structure, is defined as
...@@ -106,17 +106,41 @@ struct LoDTensor { ...@@ -106,17 +106,41 @@ struct LoDTensor {
float* tensor_; float* tensor_;
}; };
LoDTensor Slice(const LoDTensor& lodt, int level, int sequence) { LoDTensor Slice(const LoDTensor& lodt, int level, int sequence);
```
Let us revisit the example above
} ```
3
3 1 2
3 2 4 1 2 3
||| || |||| | || |||
``` ```
### Slicing the Top Level Suppose that we want to retrieve the <1,2>-slice
Please be aware that an RNN operator only slices the top level of a LoD Tensor to get the step inputs. ```
2
2 3
|| |||
```
```c++ we will need to find out the starting position of this slice by summing over all leaf nodes in `LoD` to the left of the slice, i.e., 3 + 2 + 4 + 1 = 10.
LoDTensor Slice(const LoDTensor& lodt, int sequence) {
To avoid the traversal of the LoD tree at slcing time, we can do it at the construction time -- instead of saving the lengths of the next level in the LoD tree, we can save the starting offset of the next level. For example, above LoD Tensor can be transformed into
```
0
0 9 10
0 3 5 9 10 12
||| || |||| | || |||
```
We don't really need the 0 on top, so the LoD Tensor could be
} ```
0 9 10
0 3 5 9 10 12
||| || |||| | || |||
``` ```
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
class LODTensorTester : public ::testing::Test { class LoDTensorTester : public ::testing::Test {
public: public:
virtual void SetUp() override { virtual void SetUp() override {
// tensor's batch_size: 30 // tensor's batch_size: 30
...@@ -29,7 +29,7 @@ class LODTensorTester : public ::testing::Test { ...@@ -29,7 +29,7 @@ class LODTensorTester : public ::testing::Test {
// 0 10 20 // 0 10 20
// 0 5 10 15 20 // 0 5 10 15 20
// 0 2 5 7 10 12 15 20 // 0 2 5 7 10 12 15 20
LOD lod; LoD lod;
lod.push_back(std::vector<size_t>{0, 10, 20}); lod.push_back(std::vector<size_t>{0, 10, 20});
lod.push_back(std::vector<size_t>{0, 5, 10, 15, 20}); lod.push_back(std::vector<size_t>{0, 5, 10, 15, 20});
lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20}); lod.push_back(std::vector<size_t>{0, 2, 5, 7, 10, 12, 15, 17, 20});
...@@ -47,21 +47,21 @@ class LODTensorTester : public ::testing::Test { ...@@ -47,21 +47,21 @@ class LODTensorTester : public ::testing::Test {
protected: protected:
platform::CPUPlace place; platform::CPUPlace place;
Tensor tensor; Tensor tensor;
LODTensor lod_tensor; LoDTensor lod_tensor;
}; };
TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); } TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); }
TEST_F(LODTensorTester, NumElements) { TEST_F(LoDTensorTester, NumElements) {
ASSERT_EQ(lod_tensor.NumElements(0), 2UL); ASSERT_EQ(lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(lod_tensor.NumElements(1), 4UL); ASSERT_EQ(lod_tensor.NumElements(1), 4UL);
ASSERT_EQ(lod_tensor.NumElements(2), 8UL); ASSERT_EQ(lod_tensor.NumElements(2), 8UL);
} }
TEST_F(LODTensorTester, SliceLevels) { TEST_F(LoDTensorTester, SliceLevels) {
// slice 1 level // slice 1 level
for (size_t level = 0; level < 3UL; ++level) { for (size_t level = 0; level < 3UL; ++level) {
LODTensor new_lod_tensor = lod_tensor; LoDTensor new_lod_tensor = lod_tensor;
new_lod_tensor.SliceLevels(level, level + 1); new_lod_tensor.SliceLevels(level, level + 1);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
...@@ -70,7 +70,7 @@ TEST_F(LODTensorTester, SliceLevels) { ...@@ -70,7 +70,7 @@ TEST_F(LODTensorTester, SliceLevels) {
} }
// slice 2 level // slice 2 level
for (size_t level = 0; level < 2UL; ++level) { for (size_t level = 0; level < 2UL; ++level) {
LODTensor new_lod_tensor = lod_tensor; LoDTensor new_lod_tensor = lod_tensor;
new_lod_tensor.SliceLevels(level, level + 2); new_lod_tensor.SliceLevels(level, level + 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level)); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
...@@ -80,9 +80,9 @@ TEST_F(LODTensorTester, SliceLevels) { ...@@ -80,9 +80,9 @@ TEST_F(LODTensorTester, SliceLevels) {
} }
} }
TEST_F(LODTensorTester, SliceInLevel) { TEST_F(LoDTensorTester, SliceInLevel) {
size_t level = 0; size_t level = 0;
LODTensor new_lod_tensor = lod_tensor; LoDTensor new_lod_tensor = lod_tensor;
new_lod_tensor.SliceInLevel(level, 0, 2); new_lod_tensor.SliceInLevel(level, 0, 2);
EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL); EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL);
EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL); EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL);
......
...@@ -21,7 +21,7 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -21,7 +21,7 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
AddOutput("output", "output of cosine op"); AddOutput("output", "output of cosine op");
AddAttr<float>("scale", "scale of cosine op") AddAttr<float>("scale", "scale of cosine op")
.SetDefault(1.0) .SetDefault(1.0)
.LargerThan(0.0); .GreaterThan(0.0);
AddComment("This is cos op"); AddComment("This is cos op");
} }
}; };
...@@ -80,7 +80,7 @@ TEST(OpRegistry, CreateOp) { ...@@ -80,7 +80,7 @@ TEST(OpRegistry, CreateOp) {
paddle::framework::Scope scope; paddle::framework::Scope scope;
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
float scale_get = op->GetAttr<float>("scale"); float scale_get = op->Attr<float>("scale");
ASSERT_EQ(scale_get, scale); ASSERT_EQ(scale_get, scale);
} }
...@@ -121,7 +121,7 @@ TEST(OpRegistry, DefaultValue) { ...@@ -121,7 +121,7 @@ TEST(OpRegistry, DefaultValue) {
paddle::framework::Scope scope; paddle::framework::Scope scope;
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
ASSERT_EQ(op->GetAttr<float>("scale"), 1.0); ASSERT_EQ(op->Attr<float>("scale"), 1.0);
} }
TEST(OpRegistry, CustomChecker) { TEST(OpRegistry, CustomChecker) {
...@@ -172,38 +172,6 @@ TEST(OpRegistry, CustomChecker) { ...@@ -172,38 +172,6 @@ TEST(OpRegistry, CustomChecker) {
paddle::platform::CPUDeviceContext dev_ctx; paddle::platform::CPUDeviceContext dev_ctx;
paddle::framework::Scope scope; paddle::framework::Scope scope;
op->Run(scope, dev_ctx); op->Run(scope, dev_ctx);
int test_attr = op->GetAttr<int>("test_attr"); int test_attr = op->Attr<int>("test_attr");
ASSERT_EQ(test_attr, 4); ASSERT_EQ(test_attr, 4);
} }
\ No newline at end of file
class TestAttrProtoMaker : public pd::OpProtoAndCheckerMaker {
public:
TestAttrProtoMaker(pd::OpProto* proto, pd::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<float>("scale", "scale of test op");
AddAttr<float>("scale", "scale of test op");
}
};
TEST(ProtoMaker, DuplicatedAttr) {
pd::OpProto op_proto;
pd::OpAttrChecker op_checker;
auto proto_maker = TestAttrProtoMaker(&op_proto, &op_checker);
ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet);
}
class TestInOutProtoMaker : public pd::OpProtoAndCheckerMaker {
public:
TestInOutProtoMaker(pd::OpProto* proto, pd::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("input", "input of test op");
AddInput("input", "input of test op");
}
};
TEST(ProtoMaker, DuplicatedInOut) {
pd::OpProto op_proto;
pd::OpAttrChecker op_checker;
auto proto_maker = TestInOutProtoMaker(&op_proto, &op_checker);
ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet);
}
...@@ -69,7 +69,7 @@ class OperatorBase { ...@@ -69,7 +69,7 @@ class OperatorBase {
virtual ~OperatorBase() {} virtual ~OperatorBase() {}
template <typename T> template <typename T>
inline const T& GetAttr(const std::string& name) const { inline const T& Attr(const std::string& name) const {
PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap", PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap",
name); name);
return boost::get<T>(attrs_.at(name)); return boost::get<T>(attrs_.at(name));
...@@ -241,8 +241,8 @@ class InferShapeContext { ...@@ -241,8 +241,8 @@ class InferShapeContext {
const Scope& scope() const { return scope_; } const Scope& scope() const { return scope_; }
template <typename T> template <typename T>
inline const T& GetAttr(const std::string& name) const { inline const T& Attr(const std::string& name) const {
return op_.GetAttr<T>(name); return op_.Attr<T>(name);
} }
size_t InputSize(const std::string& name) const { size_t InputSize(const std::string& name) const {
......
...@@ -102,7 +102,7 @@ class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -102,7 +102,7 @@ class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
AddOutput("y", "output of test op"); AddOutput("y", "output of test op");
AddAttr<float>("scale", "scale of cosine op") AddAttr<float>("scale", "scale of cosine op")
.SetDefault(1.0) .SetDefault(1.0)
.LargerThan(0.0); .GreaterThan(0.0);
AddComment("This is test op"); AddComment("This is test op");
} }
}; };
...@@ -140,7 +140,7 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker ...@@ -140,7 +140,7 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker
AddOutput("ys", "outputs of test op").AsDuplicable(); AddOutput("ys", "outputs of test op").AsDuplicable();
AddAttr<float>("scale", "scale of cosine op") AddAttr<float>("scale", "scale of cosine op")
.SetDefault(1.0) .SetDefault(1.0)
.LargerThan(0.0); .GreaterThan(0.0);
AddComment("This is test op"); AddComment("This is test op");
} }
}; };
...@@ -263,4 +263,38 @@ TEST(Operator, Clone) { ...@@ -263,4 +263,38 @@ TEST(Operator, Clone) {
OperatorClone a("ABC", {}, {}, {}); OperatorClone a("ABC", {}, {}, {});
auto b = a.Clone(); auto b = a.Clone();
ASSERT_EQ(a.Type(), b->Type()); ASSERT_EQ(a.Type(), b->Type());
}
class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker {
public:
TestAttrProtoMaker(paddle::framework::OpProto* proto,
paddle::framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<float>("scale", "scale of test op");
AddAttr<float>("scale", "scale of test op");
}
};
TEST(ProtoMaker, DuplicatedAttr) {
paddle::framework::OpProto op_proto;
paddle::framework::OpAttrChecker op_checker;
auto proto_maker = TestAttrProtoMaker(&op_proto, &op_checker);
ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet);
}
class TestInOutProtoMaker : public paddle::framework::OpProtoAndCheckerMaker {
public:
TestInOutProtoMaker(paddle::framework::OpProto* proto,
paddle::framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("input", "input of test op");
AddInput("input", "input of test op");
}
};
TEST(ProtoMaker, DuplicatedInOut) {
paddle::framework::OpProto op_proto;
paddle::framework::OpAttrChecker op_checker;
auto proto_maker = TestInOutProtoMaker(&op_proto, &op_checker);
ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet);
} }
\ No newline at end of file
...@@ -43,6 +43,9 @@ class Tensor { ...@@ -43,6 +43,9 @@ class Tensor {
template <typename T, size_t D, int MajorType, typename IndexType> template <typename T, size_t D, int MajorType, typename IndexType>
friend struct EigenTensor; friend struct EigenTensor;
template <typename T, int MajorType, typename IndexType>
friend struct EigenMatrix;
template <typename T, int MajorType, typename IndexType> template <typename T, int MajorType, typename IndexType>
friend struct EigenVector; friend struct EigenVector;
......
...@@ -58,7 +58,7 @@ inline T* Tensor::mutable_data(platform::Place place) { ...@@ -58,7 +58,7 @@ inline T* Tensor::mutable_data(platform::Place place) {
"Tensor's numel must be larger than zero to call " "Tensor's numel must be larger than zero to call "
"Tensor::mutable_data. Call Tensor::set_dim first."); "Tensor::mutable_data. Call Tensor::set_dim first.");
/* some versions of boost::variant don't have operator!= */ /* some versions of boost::variant don't have operator!= */
size_t size = product(dims_) * sizeof(T); int64_t size = product(dims_) * sizeof(T);
if (holder_ == nullptr || !(holder_->place() == place) || if (holder_ == nullptr || !(holder_->place() == place) ||
holder_->size() < size + offset_) { holder_->size() < size + offset_) {
if (platform::is_cpu_place(place)) { if (platform::is_cpu_place(place)) {
...@@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { ...@@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
PADDLE_ENFORCE_LT(begin_idx, end_idx, PADDLE_ENFORCE_LT(begin_idx, end_idx,
"Begin index must be less than end index."); "Begin index must be less than end index.");
PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1."); PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1.");
int base = product(dims_) / dims_[0]; size_t base = product(dims_) / dims_[0];
Tensor dst; Tensor dst;
dst.holder_ = holder_; dst.holder_ = holder_;
DDim dst_dims = dims_; DDim dst_dims = dims_;
...@@ -148,5 +148,13 @@ inline Tensor& Tensor::Resize(const DDim& dims) { ...@@ -148,5 +148,13 @@ inline Tensor& Tensor::Resize(const DDim& dims) {
inline const DDim& Tensor::dims() const { return dims_; } inline const DDim& Tensor::dims() const { return dims_; }
template <typename T>
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
Tensor res;
res.ShareDataWith<T>(src);
res.Resize(flatten_to_2d(src.dims(), num_col_dims));
return res;
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -262,3 +262,16 @@ TEST(Tensor, CopyFrom) { ...@@ -262,3 +262,16 @@ TEST(Tensor, CopyFrom) {
} }
#endif #endif
} }
TEST(Tensor, ReshapeToMatrix) {
using namespace paddle::framework;
using namespace paddle::platform;
Tensor src;
int* src_ptr = src.mutable_data<int>({2, 3, 4, 9}, CPUPlace());
for (int i = 0; i < 2 * 3 * 4 * 9; ++i) {
src_ptr[i] = i;
}
Tensor res = ReshapeToMatrix<int>(src, 2);
ASSERT_EQ(res.dims()[0], 2 * 3);
ASSERT_EQ(res.dims()[1], 4 * 9);
}
\ No newline at end of file
...@@ -44,6 +44,7 @@ if(WITH_GPU) ...@@ -44,6 +44,7 @@ if(WITH_GPU)
add_simple_unittest(RowConvOpTest) add_simple_unittest(RowConvOpTest)
add_simple_unittest(BlockExpandOpTest) add_simple_unittest(BlockExpandOpTest)
add_simple_unittest(CropOpTest) add_simple_unittest(CropOpTest)
add_simple_unittest(SwitchOpTest)
endif() endif()
add_simple_unittest(Im2ColTest) add_simple_unittest(Im2ColTest)
......
...@@ -83,9 +83,9 @@ struct EigenBlasGemm { ...@@ -83,9 +83,9 @@ struct EigenBlasGemm {
}; };
#ifdef PADDLE_TYPE_DOUBLE #ifdef PADDLE_TYPE_DOUBLE
template class EigenBlasGemm<double>; template struct EigenBlasGemm<double>;
#else #else
template class EigenBlasGemm<float>; template struct EigenBlasGemm<float>;
#endif #endif
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "GemmFunctor.h"
#include "hl_cpu_gru.cuh"
namespace paddle {
template <DeviceType Device, class T>
struct GruFunctor {
template <class OpResetOutput, class OpFinalOutput>
static void compute(OpResetOutput opResetOutput,
OpFinalOutput opFinalOutput,
hl_gru_value value,
int frameSize,
int batchSize,
hl_activation_mode_t active_node,
hl_activation_mode_t active_gate) {
#ifndef __NVCC__
if (value.prevOutValue) {
BlasGemm<Device, T>::compute(false,
false,
batchSize,
2 * frameSize,
frameSize,
1,
value.prevOutValue,
frameSize,
value.gateWeight,
frameSize * 2,
1,
value.gateValue,
frameSize * 3);
}
forward_reset_output(
opResetOutput, value, frameSize, batchSize, active_gate);
if (value.prevOutValue) {
BlasGemm<Device, T>::compute(false,
false,
batchSize,
frameSize,
frameSize,
1,
value.resetOutputValue,
frameSize,
value.stateWeight,
frameSize,
1,
value.gateValue + frameSize * 2,
frameSize * 3);
}
forward_final_output(
opFinalOutput, value, frameSize, batchSize, active_node);
#endif
}
};
template <DeviceType Device, class T>
struct GruGradFunctor {
template <class OpStateGrad, class OpResetGrad>
static void compute(OpStateGrad opStateGrad,
OpResetGrad opResetGrad,
hl_gru_value value,
hl_gru_grad grad,
int frameSize,
int batchSize,
hl_activation_mode_t active_node,
hl_activation_mode_t active_gate) {
#ifndef __NVCC__
backward_state_grad(
opStateGrad, value, grad, frameSize, batchSize, active_node);
if (value.prevOutValue && grad.prevOutGrad) {
BlasGemm<Device, T>::compute(false,
true,
batchSize,
frameSize,
frameSize,
1,
grad.gateGrad + frameSize * 2,
frameSize * 3,
value.stateWeight,
frameSize,
0,
grad.resetOutputGrad,
frameSize);
if (grad.stateWeightGrad) {
BlasGemm<Device, T>::compute(true,
false,
frameSize,
frameSize,
batchSize,
1,
value.resetOutputValue,
frameSize,
grad.gateGrad + frameSize * 2,
frameSize * 3,
1,
grad.stateWeightGrad,
frameSize);
}
}
backward_reset_grad(
opResetGrad, value, grad, frameSize, batchSize, active_gate);
if (grad.prevOutGrad && value.prevOutValue) {
BlasGemm<Device, T>::compute(false,
true,
batchSize,
frameSize,
frameSize * 2,
1,
grad.gateGrad,
frameSize * 3,
value.gateWeight,
frameSize * 2,
1,
grad.prevOutGrad,
frameSize);
if (grad.gateWeightGrad) {
BlasGemm<Device, T>::compute(true,
false,
frameSize,
frameSize * 2,
batchSize,
1,
value.prevOutValue,
frameSize,
grad.gateGrad,
frameSize * 3,
1,
grad.gateWeightGrad,
frameSize * 2);
}
}
#endif
}
};
} // namespace paddle
...@@ -94,95 +94,4 @@ public: ...@@ -94,95 +94,4 @@ public:
int paddingWidth); int paddingWidth);
}; };
template <class T>
struct Padding {
static void run(const T* src,
T* dest,
int channels,
int inputHeight,
int inputWidth,
int paddingHeight,
int paddingWidth) {
const int destWidth = inputWidth + 2 * paddingWidth;
for (int c = 0; c < channels; c++) {
if (paddingHeight > 0) {
memset(dest, 0, destWidth * paddingHeight * sizeof(T));
dest += destWidth * paddingHeight;
}
for (int i = 0; i < inputHeight; i++) {
// padding head
for (int j = 0; j < paddingWidth; j++) {
*dest++ = T(0);
}
memcpy(dest, src, inputWidth * sizeof(T));
dest += inputWidth;
src += inputWidth;
// padding tail
for (int j = 0; j < paddingWidth; j++) {
*dest++ = T(0);
}
}
if (paddingHeight > 0) {
memset(dest, 0, destWidth * paddingHeight * sizeof(T));
dest += destWidth * paddingHeight;
}
}
}
};
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <>
struct Padding<float> {
static void run(const float* src,
float* dest,
int channels,
int inputHeight,
int inputWidth,
int paddingHeight,
int paddingWidth) {
const int destWidth = inputWidth + 2 * paddingWidth;
for (int c = 0; c < channels; c++) {
if (paddingHeight > 0) {
memset(dest, 0, destWidth * paddingHeight * sizeof(float));
dest += destWidth * paddingHeight;
}
for (int i = 0; i < inputHeight; i++) {
// padding head
for (int j = 0; j < paddingWidth; j++) {
*dest++ = float(0);
}
int step = inputWidth >> 2;
int remain = inputWidth & 3;
for (int s = 0; s < step; s++) {
float32x4_t s0 = vld1q_f32(src);
vst1q_f32(dest, s0);
src += 4;
dest += 4;
}
for (int r = 0; r < remain; r++) {
*dest++ = *src++;
}
// padding tail
for (int j = 0; j < paddingWidth; j++) {
*dest++ = float(0);
}
}
if (paddingHeight > 0) {
memset(dest, 0, destWidth * paddingHeight * sizeof(float));
dest += destWidth * paddingHeight;
}
}
}
};
#endif
} // namespace paddle } // namespace paddle
...@@ -13,18 +13,10 @@ See the License for the specific language governing permissions and ...@@ -13,18 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "MulOp.h" #include "MulOp.h"
/// todo(tianbing), delete it #include "GemmFunctor.h"
#include <iostream>
#include "paddle/math/MathFunctions.h"
#include "paddle/math/SIMDFunctions.h" #include "paddle/math/SIMDFunctions.h"
#include "paddle/utils/ThreadLocal.h" #include "paddle/utils/ThreadLocal.h"
#ifndef PADDLE_TYPE_DOUBLE
#define GEMM paddle::gemm<float>
#else
#define GEMM paddle::gemm<double>
#endif
namespace { namespace {
inline void vecAddTo(real* a, const real* b, real scaleB, size_t len) { inline void vecAddTo(real* a, const real* b, real scaleB, size_t len) {
for (unsigned int i = 0; i < len; ++i) { for (unsigned int i = 0; i < len; ++i) {
...@@ -114,19 +106,20 @@ void MulOp<DEVICE_TYPE_CPU>(CpuMatrix& out, ...@@ -114,19 +106,20 @@ void MulOp<DEVICE_TYPE_CPU>(CpuMatrix& out,
real scaleT, real scaleT,
bool aTrans, bool aTrans,
bool bTrans) { bool bTrans) {
GEMM(aTrans ? CblasTrans : CblasNoTrans, BlasGemm<DEVICE_TYPE_CPU, real>::compute(
bTrans ? CblasTrans : CblasNoTrans, aTrans,
out.getHeight(), bTrans,
out.getWidth(), out.getHeight(),
!aTrans ? a.getWidth() : a.getHeight(), out.getWidth(),
scaleAB, !aTrans ? a.getWidth() : a.getHeight(),
a.getData(), scaleAB,
a.getStride(), a.getData(),
b.getData(), a.getStride(),
b.getStride(), b.getData(),
scaleT, b.getStride(),
out.getData(), scaleT,
out.getStride()); out.getData(),
out.getStride());
} }
/// dense matrix (+)= sparse matrix * dense matrix /// dense matrix (+)= sparse matrix * dense matrix
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "SwitchOp.h"
#include "paddle/math/Vector.h"
namespace paddle {
template <>
void NCHW2NHWC<DEVICE_TYPE_CPU>(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int argType) {
for (int n = 0; n < num; ++n) {
for (int c = 0; c < inC; ++c) {
for (int h = 0; h < inH; ++h) {
for (int w = 0; w < inW; ++w) {
if (argType == ADD_TO) {
outputs[((n * inH + h) * inW + w) * inC + c] += *(inputs++);
} else {
outputs[((n * inH + h) * inW + w) * inC + c] = *(inputs++);
}
}
}
}
}
}
template <>
void NHWC2NCHW<DEVICE_TYPE_CPU>(real* outputs,
const real* inputs,
const int num,
const int inH,
const int inW,
const int inC,
const int argType) {
for (int n = 0; n < num; ++n) {
for (int h = 0; h < inH; ++h) {
for (int w = 0; w < inW; ++w) {
for (int c = 0; c < inC; ++c) {
if (argType == ADD_TO) {
outputs[((n * inC + c) * inH + h) * inW + w] += *(inputs++);
} else {
outputs[((n * inC + c) * inH + h) * inW + w] = *(inputs++);
}
}
}
}
}
}
/**
* \brief Switch dimension order of image input.
* The input and output is a 4D tensor. Switch order
* 'batch_size,channels, height, width' to
* order 'batch_size, height, width, channels'.
*
* Argument in this Function:
* \param inputs input data with order 'batch_size,channels, height, width'.
* \param outputs output data with order 'batch_size, height, width, channels'.
*/
template <DeviceType Device>
class NCHW2NHWCFunc : public FunctionBase {
public:
void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(1UL, inputs.size());
CHECK_EQ(1UL, outputs.size());
size_t num = inputs[0].shape()[0];
size_t inC = inputs[0].shape()[1];
size_t inH = inputs[0].shape()[2];
size_t inW = inputs[0].shape()[3];
NCHW2NHWC<Device>(outputs[0].data<real>(),
inputs[0].data<real>(),
num,
inC,
inH,
inW,
outputs[0].getArgType());
}
};
/**
* \brief Switch dimension order of image input.
* The input and output is a 4D tensor. Switch order
* 'batch_size, height, width, channels' to
* order 'batch_size, channels, height, width'.
*
* Argument in this Function:
* \param inputs input data with order 'batch_size, height, width, channels'.
* \param outputs output data with order 'batch_size, channels, height, width'.
*/
template <DeviceType Device>
class NHWC2NCHWFunc : public FunctionBase {
public:
void init(const FuncConfig& config) override {}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(1UL, inputs.size());
CHECK_EQ(1UL, outputs.size());
size_t num = inputs[0].shape()[0];
size_t inH = inputs[0].shape()[1];
size_t inW = inputs[0].shape()[2];
size_t inC = inputs[0].shape()[3];
NHWC2NCHW<Device>(outputs[0].data<real>(),
inputs[0].data<real>(),
num,
inH,
inW,
inC,
outputs[0].getArgType());
}
};
REGISTER_TYPED_FUNC(NCHW2NHWC, CPU, NCHW2NHWCFunc);
REGISTER_TYPED_FUNC(NHWC2NCHW, CPU, NHWC2NCHWFunc);
#ifndef PADDLE_ONLY_CPU
REGISTER_TYPED_FUNC(NCHW2NHWC, GPU, NCHW2NHWCFunc);
REGISTER_TYPED_FUNC(NHWC2NCHW, GPU, NHWC2NCHWFunc);
#endif
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Function.h"
namespace paddle {
/**
* \brief This funtion switch dimension order of image input.
* The input and output is a 4D tensor. Switch order 'batch_size,
*channels, height, width' to
* order 'batch_size, height, width, channels'.
*
* \param[out] outputs save results.
* \param[in] inputs input data.
* \param[in] num batch size of input data.
* \param[in] inC channel number of input data.
* \param[in] inH height of input data.
* \param[in] inH with of input data.
* \param[in] argType type of output argument.
*/
template <DeviceType Device>
void NCHW2NHWC(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int argtype);
/**
* \brief This funtion switch dimension order of image input.
* The input and output is a 4D tensor. Switch order 'batch_size,
*height, width, channels' to
* order 'batch_size, channels, height, width'.
*
* \param[out] inGrad gradients of previous layer.
* \param[in] outGrad output gradients.
* \param[in] num batch size of input data.
* \param[in] inH height of input data.
* \param[in] inW with of input data.
* \param[in] inC channel number of input data.
* \param[in] argType type of output argument.
*/
template <DeviceType Device>
void NHWC2NCHW(real* inGrad,
const real* outGrad,
const int num,
const int inH,
const int inW,
const int inC,
const int argType);
} // namespace paddle
/* Copyright (c) 2016 Paddle
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "SwitchOp.h"
#include "hl_base.h"
namespace paddle {
__global__ void KeNCHW2NHWC(real* outputs,
const real* inputs,
int inC,
int inH,
int inW,
int nthreads,
int argType) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int w = idx % inW;
const int h = (idx / inW) % inH;
const int c = (idx / inW / inH) % inC;
const int n = idx / inW / inH / inC;
const int off = ((n * inH + h) * inW + w) * inC + c;
if (argType == ADD_TO) {
outputs[off] += inputs[idx];
} else {
outputs[off] = inputs[idx];
}
}
}
template <>
void NCHW2NHWC<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inC,
const int inH,
const int inW,
const int argType) {
size_t nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
KeNCHW2NHWC<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
outputs, inputs, inC, inH, inW, nth, argType);
CHECK_SYNC("NCHW2NHWC");
}
__global__ void KeNHWC2NCHW(real* outputs,
const real* inputs,
int inH,
int inW,
int inC,
int nthreads,
int argType) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < nthreads) {
const int c = idx % inC;
const int w = (idx / inC) % inW;
const int h = (idx / inC / inW) % inH;
const int n = idx / inW / inH / inC;
const int off = ((n * inC + c) * inH + h) * inW + w;
if (argType == ADD_TO) {
outputs[off] += inputs[idx];
} else {
outputs[off] = inputs[idx];
}
}
}
template <>
void NHWC2NCHW<DEVICE_TYPE_GPU>(real* outputs,
const real* inputs,
const int num,
const int inH,
const int inW,
const int inC,
const int argType) {
int nth = num * inC * inH * inW;
int blockSize = 1024;
int gridSize = (nth + 1024 - 1) / 1024;
KeNHWC2NCHW<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>(
outputs, inputs, inH, inW, inC, nth, argType);
CHECK_SYNC("NHWC2NCHW");
}
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "FunctionTest.h"
namespace paddle {
TEST(Pad, real) {
for (size_t numSamples : {1, 4, 8, 16}) {
for (size_t channels : {1, 4, 8, 16}) {
for (size_t imgSizeH : {1, 4, 8, 16}) {
for (size_t imgSizeW : {1, 4, 8, 16}) {
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
for (bool test_grad : {true, false}) {
CpuGpuFuncCompare compare(test_grad ? "NHWC2NCHW" : "NCHW2NHWC",
FuncConfig());
TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW};
TensorShape outDims{numSamples, imgSizeH, imgSizeW, channels};
compare.addInputs(
BufferArg(VALUE_TYPE_FLOAT, test_grad ? outDims : inDims));
compare.addOutputs(BufferArg(
VALUE_TYPE_FLOAT, test_grad ? inDims : outDims, ASSIGN_TO));
compare.run();
}
}
}
}
}
}
} // namespace paddle
此差异已折叠。
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "NeonDepthwiseConv.h"
#include "paddle/function/ConvOp.h"
namespace paddle {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <DeviceType Device>
class NeonDepthwiseConvTransposeFunction : public ConvFunctionBase {
public:
void init(const FuncConfig& config) override {
ConvFunctionBase::init(config);
}
void check(const BufferArgs& inputs, const BufferArgs& outputs) override {
const TensorShape& input = inputs[0].shape();
const TensorShape& filter = inputs[1].shape();
const TensorShape& output = outputs[0].shape();
checkShape(input, filter, output);
}
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
CHECK_EQ(numInputs_, inputs.size());
CHECK_EQ(numOutputs_, outputs.size());
check(inputs, outputs);
const TensorShape& input = inputs[0].shape();
const TensorShape& filter = inputs[1].shape();
const TensorShape& output = outputs[0].shape();
int batchSize = input[0];
int inputChannels = input[1];
int inputHeight = input[2];
int inputWidth = input[3];
int filterHeight = getFilterHeight(filter);
int filterWidth = getFilterWidth(filter);
int outputChannels = output[1];
int outputHeight = output[2];
int outputWidth = output[3];
int filterMultiplier = outputChannels / groups_;
CHECK_EQ(inputChannels, groups_);
// only support strideH() == strideW() and filterHeight == filterWidth.
CHECK_EQ(strideH(), strideW());
CHECK_EQ(paddingH(), paddingW());
CHECK_EQ(filterHeight, filterWidth);
float* inputData = inputs[0].data<float>();
float* filterData = inputs[1].data<float>();
float* outputData = outputs[0].data<float>();
// padding the input, input -> inputPadding
float* inputPadding = inputData;
int padInputHeight =
(inputHeight - 1) * strideH() + 2 * filterHeight - 1 - 2 * paddingH();
int padInputWidth =
(inputWidth - 1) * strideW() + 2 * filterWidth - 1 - 2 * paddingW();
if (padInputHeight > inputHeight || padInputWidth > inputWidth) {
int newSize = batchSize * inputChannels * padInputHeight * padInputWidth;
resizeBuffer<Device>(newSize);
inputPadding = reinterpret_cast<float*>(memory_->getBuf());
if (strideH() == 1) {
neon::Padding<float>::run(inputData,
inputPadding,
batchSize * inputChannels,
inputHeight,
inputWidth,
padInputHeight,
padInputWidth);
} else if (strideH() == 2) {
neon::StridePadding::run(inputData,
inputPadding,
batchSize * inputChannels,
inputHeight,
inputWidth,
padInputHeight,
padInputWidth);
} else {
LOG(FATAL) << "Not supported";
}
}
std::function<void(
const float*, const float*, int, int, int, int, int, int, float*)>
DepthWiseConv;
if (filterWidth == 3) {
DepthWiseConv = neon::DepthwiseConvKernel<3, 1>::run;
} else if (filterWidth == 4) {
DepthWiseConv = neon::DepthwiseConvKernel<4, 1>::run;
} else {
LOG(FATAL) << "Not supported";
}
for (int i = 0; i < batchSize; i++) {
DepthWiseConv(inputPadding,
filterData,
padInputHeight,
padInputWidth,
outputChannels,
outputHeight,
outputWidth,
filterMultiplier,
outputData);
inputPadding += inputChannels * padInputHeight * padInputWidth;
outputData += outputChannels * outputHeight * outputWidth;
}
}
};
#ifndef PADDLE_TYPE_DOUBLE
REGISTER_TYPED_FUNC(NeonDepthwiseConvTranspose,
CPU,
NeonDepthwiseConvTransposeFunction);
#endif
#endif
} // namespace paddle
...@@ -33,12 +33,8 @@ inline float32_t vaddvq_f32(float32x4_t a) { ...@@ -33,12 +33,8 @@ inline float32_t vaddvq_f32(float32x4_t a) {
return vget_lane_f32(vpadd_f32(v, v), 0); return vget_lane_f32(vpadd_f32(v, v), 0);
} }
inline float32x4_t vmlaq_laneq_f32(float32x4_t a, #define vmlaq_laneq_f32(a, b, v, lane) \
float32x4_t b, vmlaq_n_f32(a, b, vgetq_lane_f32(v, lane))
float32x4_t v,
const int lane) {
return vmlaq_n_f32(a, b, vgetq_lane_f32(v, lane));
}
#endif #endif
} // namespace neon } // namespace neon
......
...@@ -42,10 +42,10 @@ bool Conv3DLayer::init(const LayerMap &layerMap, ...@@ -42,10 +42,10 @@ bool Conv3DLayer::init(const LayerMap &layerMap,
if (sharedBiases_) { if (sharedBiases_) {
CHECK_EQ((size_t)numFilters_, biasParameter_->getSize()); CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
biases_ = biases_ =
std::unique_ptr<Weight>(new Weight(1, numFilters_, biasParameter_)); std::unique_ptr<Weight>(new Weight(numFilters_, 1, biasParameter_));
} else { } else {
biases_ = biases_ =
std::unique_ptr<Weight>(new Weight(1, getSize(), biasParameter_)); std::unique_ptr<Weight>(new Weight(getSize(), 1, biasParameter_));
} }
} }
return true; return true;
...@@ -83,8 +83,8 @@ void Conv3DLayer::forward(PassType passType) { ...@@ -83,8 +83,8 @@ void Conv3DLayer::forward(PassType passType) {
int outWidth = getSize(); int outWidth = getSize();
resetOutput(batchSize, outWidth); resetOutput(batchSize, outWidth);
REGISTER_TIMER_INFO("FwdConv3D", getName().c_str());
for (size_t i = 0; i != inputLayers_.size(); ++i) { for (size_t i = 0; i != inputLayers_.size(); ++i) {
REGISTER_TIMER_INFO("FwdConv3D", getName().c_str());
const MatrixPtr &inMat = getInputValue(i); const MatrixPtr &inMat = getInputValue(i);
const MatrixPtr &outMat = getOutputValue(); const MatrixPtr &outMat = getOutputValue();
int M = M_[i]; int M = M_[i];
...@@ -120,7 +120,6 @@ void Conv3DLayer::forward(PassType passType) { ...@@ -120,7 +120,6 @@ void Conv3DLayer::forward(PassType passType) {
} }
} }
if (nullptr != this->biasParameter_) { if (nullptr != this->biasParameter_) {
REGISTER_TIMER_INFO("FwBiasTimer", getName().c_str());
this->addBias(); this->addBias();
} }
forwardActivation(); forwardActivation();
...@@ -134,15 +133,14 @@ void Conv3DLayer::backward(const UpdateCallback &callback) { ...@@ -134,15 +133,14 @@ void Conv3DLayer::backward(const UpdateCallback &callback) {
biases_->getParameterPtr()->incUpdate(callback); biases_->getParameterPtr()->incUpdate(callback);
} }
REGISTER_TIMER_INFO("BwdConv3D", getName().c_str());
for (size_t i = 0; i != inputLayers_.size(); ++i) { for (size_t i = 0; i != inputLayers_.size(); ++i) {
REGISTER_TIMER_INFO("BwdConv3D", getName().c_str());
if (weights_[i]->getWGrad()) { if (weights_[i]->getWGrad()) {
bpropWeights(i); bpropWeights(i);
} }
if (getInputGrad(i)) { if (getInputGrad(i)) {
bpropData(i); bpropData(i);
} }
REGISTER_TIMER_INFO("WeightUpdate", getName().c_str());
weights_[i]->getParameterPtr()->incUpdate(callback); weights_[i]->getParameterPtr()->incUpdate(callback);
} }
} }
...@@ -224,20 +222,31 @@ void Conv3DLayer::bpropData(int i) { ...@@ -224,20 +222,31 @@ void Conv3DLayer::bpropData(int i) {
} }
void Conv3DLayer::bpropBiases() { void Conv3DLayer::bpropBiases() {
MatrixPtr biases = Matrix::create(biases_->getWGrad()->getData(),
1,
biases_->getWGrad()->getElementCnt(),
false,
useGpu_);
MatrixPtr outGradMat = getOutputGrad(); MatrixPtr outGradMat = getOutputGrad();
if (this->sharedBiases_) { if (this->sharedBiases_) {
biases_->getWGrad()->collectSharedBias(*outGradMat, 1.0f); biases->collectSharedBias(*outGradMat, 1.0f);
} else { } else {
biases_->getWGrad()->collectBias(*outGradMat, 1.0f); biases->collectBias(*outGradMat, 1.0f);
} }
} }
void Conv3DLayer::addBias() { void Conv3DLayer::addBias() {
MatrixPtr outMat = getOutputValue(); MatrixPtr outMat = getOutputValue();
MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
1,
biases_->getW()->getElementCnt(),
false,
useGpu_);
if (this->sharedBiases_) { if (this->sharedBiases_) {
outMat->addSharedBias(*(biases_->getW()), 1.0f); outMat->addSharedBias(*(bias), 1.0f);
} else { } else {
outMat->addBias(*(biases_->getW()), 1.0f); outMat->addBias(*(bias), 1.0f);
} }
} }
......
...@@ -42,10 +42,10 @@ bool DeConv3DLayer::init(const LayerMap &layerMap, ...@@ -42,10 +42,10 @@ bool DeConv3DLayer::init(const LayerMap &layerMap,
if (sharedBiases_) { if (sharedBiases_) {
CHECK_EQ((size_t)numFilters_, biasParameter_->getSize()); CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
biases_ = biases_ =
std::unique_ptr<Weight>(new Weight(1, numFilters_, biasParameter_)); std::unique_ptr<Weight>(new Weight(numFilters_, 1, biasParameter_));
} else { } else {
biases_ = biases_ =
std::unique_ptr<Weight>(new Weight(1, getSize(), biasParameter_)); std::unique_ptr<Weight>(new Weight(getSize(), 1, biasParameter_));
} }
} }
return true; return true;
...@@ -84,8 +84,8 @@ void DeConv3DLayer::forward(PassType passType) { ...@@ -84,8 +84,8 @@ void DeConv3DLayer::forward(PassType passType) {
resetOutput(batchSize, outWidth); resetOutput(batchSize, outWidth);
const MatrixPtr outMat = getOutputValue(); const MatrixPtr outMat = getOutputValue();
REGISTER_TIMER_INFO("FwdDeConv3D", getName().c_str());
for (size_t i = 0; i != inputLayers_.size(); ++i) { for (size_t i = 0; i != inputLayers_.size(); ++i) {
REGISTER_TIMER_INFO("FwdDeConv3D", getName().c_str());
const MatrixPtr &inMat = getInputValue(i); const MatrixPtr &inMat = getInputValue(i);
int M = M_[i]; int M = M_[i];
int N = N_[i]; int N = N_[i];
...@@ -120,7 +120,6 @@ void DeConv3DLayer::forward(PassType passType) { ...@@ -120,7 +120,6 @@ void DeConv3DLayer::forward(PassType passType) {
} }
} }
if (nullptr != this->biasParameter_) { if (nullptr != this->biasParameter_) {
REGISTER_TIMER_INFO("FwBiasTimer", getName().c_str());
this->addBias(); this->addBias();
} }
forwardActivation(); forwardActivation();
...@@ -133,12 +132,12 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) { ...@@ -133,12 +132,12 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) {
bpropBiases(); bpropBiases();
biases_->getParameterPtr()->incUpdate(callback); biases_->getParameterPtr()->incUpdate(callback);
} }
REGISTER_TIMER_INFO("BwdDeConv3D", getName().c_str());
for (size_t i = 0; i < inputLayers_.size(); ++i) { for (size_t i = 0; i < inputLayers_.size(); ++i) {
if (weights_[i]->getWGrad() || this->needGradient_) { if (weights_[i]->getWGrad() || this->needGradient_) {
int M = M_[i]; int M = M_[i];
int N = N_[i]; int N = N_[i];
int K = K_[i]; int K = K_[i];
REGISTER_TIMER_INFO("BwdDeConv3D", getName().c_str());
Matrix::resizeOrCreate(colBuf_, K * groups_[i], N, false, useGpu_); Matrix::resizeOrCreate(colBuf_, K * groups_[i], N, false, useGpu_);
const MatrixPtr &inMat = getInputValue(i); const MatrixPtr &inMat = getInputValue(i);
for (int n = 0; n < batchSize; ++n) { for (int n = 0; n < batchSize; ++n) {
...@@ -182,7 +181,6 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) { ...@@ -182,7 +181,6 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) {
} }
} }
} }
REGISTER_TIMER_INFO("WeightUpdate", getName().c_str());
weights_[i]->getParameterPtr()->incUpdate(callback); weights_[i]->getParameterPtr()->incUpdate(callback);
} }
} }
...@@ -191,21 +189,31 @@ void DeConv3DLayer::bpropWeights(int i) {} ...@@ -191,21 +189,31 @@ void DeConv3DLayer::bpropWeights(int i) {}
void DeConv3DLayer::bpropData(int i) {} void DeConv3DLayer::bpropData(int i) {}
void DeConv3DLayer::bpropBiases() { void DeConv3DLayer::bpropBiases() {
MatrixPtr biases = Matrix::create(biases_->getWGrad()->getData(),
1,
biases_->getWGrad()->getElementCnt(),
false,
useGpu_);
const MatrixPtr &outGradMat = getOutputGrad(); const MatrixPtr &outGradMat = getOutputGrad();
if (this->sharedBiases_) { if (this->sharedBiases_) {
biases_->getWGrad()->collectSharedBias(*outGradMat, 1.0f); biases->collectSharedBias(*outGradMat, 1.0f);
} else { } else {
biases_->getWGrad()->collectBias(*outGradMat, 1.0f); biases->collectBias(*outGradMat, 1.0f);
} }
} }
void DeConv3DLayer::addBias() { void DeConv3DLayer::addBias() {
MatrixPtr outMat = getOutputValue(); MatrixPtr outMat = getOutputValue();
MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
1,
biases_->getW()->getElementCnt(),
false,
useGpu_);
if (this->sharedBiases_) { if (this->sharedBiases_) {
outMat->addSharedBias(*(biases_->getW()), 1.0f); outMat->addSharedBias(*(bias), 1.0f);
} else { } else {
outMat->addBias(*(biases_->getW()), 1.0f); outMat->addBias(*(bias), 1.0f);
} }
} }
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include "GruCompute.h" #include "GruCompute.h"
#include "hl_recurrent_apply.cuh" #include "hl_recurrent_apply.cuh"
#include "paddle/function/GruFunctor.h"
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
namespace paddle { namespace paddle {
...@@ -25,13 +26,13 @@ void GruCompute::init(LayerConfig &config) { ...@@ -25,13 +26,13 @@ void GruCompute::init(LayerConfig &config) {
template <> template <>
void GruCompute::forward<0>(hl_gru_value value, int frameSize, int batchSize) { void GruCompute::forward<0>(hl_gru_value value, int frameSize, int batchSize) {
hl_cpu_gru_forward(hppl::forward::gru_resetOutput(), GruFunctor<DEVICE_TYPE_CPU, real>::compute(hppl::forward::gru_resetOutput(),
hppl::forward::gru_finalOutput(), hppl::forward::gru_finalOutput(),
value, value,
frameSize, frameSize,
batchSize, batchSize,
activeNode_, activeNode_,
activeGate_); activeGate_);
} }
template <> template <>
...@@ -39,14 +40,15 @@ void GruCompute::backward<0>(hl_gru_value value, ...@@ -39,14 +40,15 @@ void GruCompute::backward<0>(hl_gru_value value,
hl_gru_grad grad, hl_gru_grad grad,
int frameSize, int frameSize,
int batchSize) { int batchSize) {
hl_cpu_gru_backward(hppl::backward::gru_stateGrad(), GruGradFunctor<DEVICE_TYPE_CPU, real>::compute(
hppl::backward::gru_resetGrad(), hppl::backward::gru_stateGrad(),
value, hppl::backward::gru_resetGrad(),
grad, value,
frameSize, grad,
batchSize, frameSize,
activeNode_, batchSize,
activeGate_); activeNode_,
activeGate_);
} }
} // namespace paddle } // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "SwitchOrderLayer.h"
#include "paddle/utils/Stat.h"
namespace paddle {
REGISTER_LAYER(switch_order, SwitchOrderLayer);
bool SwitchOrderLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
/* Initialize the basic parent class */
Layer::init(layerMap, parameterMap);
auto& img_conf = config_.inputs(0).image_conf();
size_t inH =
img_conf.has_img_size_y() ? img_conf.img_size_y() : img_conf.img_size();
size_t inW = img_conf.img_size();
size_t inC = img_conf.channels();
inDims_ = TensorShape({0, inC, inH, inW});
outDims_ = TensorShape(4);
auto& reshape_conf = config_.reshape_conf();
for (int i = 0; i < reshape_conf.height_axis_size(); i++) {
heightAxis_.push_back(reshape_conf.height_axis(i));
}
for (int i = 0; i < reshape_conf.width_axis_size(); i++) {
widthAxis_.push_back(reshape_conf.width_axis(i));
}
createFunction(nchw2nhwc_, "NCHW2NHWC", FuncConfig());
createFunction(nhwc2nchw_, "NHWC2NCHW", FuncConfig());
return true;
}
void SwitchOrderLayer::setOutDims() {
outDims_.setDim(0, inDims_[0]);
outDims_.setDim(1, inDims_[2]);
outDims_.setDim(2, inDims_[3]);
outDims_.setDim(3, inDims_[1]);
reshapeHeight_ = 1;
for (size_t i = 0; i < heightAxis_.size(); i++) {
reshapeHeight_ *= outDims_[heightAxis_[i]];
}
output_.setFrameHeight(reshapeHeight_);
reshapeWidth_ = 1;
for (size_t i = 0; i < widthAxis_.size(); i++) {
reshapeWidth_ *= outDims_[widthAxis_[i]];
}
output_.setFrameWidth(reshapeWidth_);
}
void SwitchOrderLayer::setInDims() {
MatrixPtr input = inputLayers_[0]->getOutputValue();
size_t batchSize = input->getHeight();
inDims_.setDim(0, batchSize);
int h = inputLayers_[0]->getOutput().getFrameHeight();
if (h != 0) inDims_.setDim(2, h);
int w = inputLayers_[0]->getOutput().getFrameWidth();
if (w != 0) inDims_.setDim(3, w);
int totalCount = input->getElementCnt();
int channels = totalCount / (inDims_[0] * inDims_[2] * inDims_[3]);
if (channels != 0) inDims_.setDim(1, channels);
}
void SwitchOrderLayer::forward(PassType passType) {
Layer::forward(passType);
setInDims();
setOutDims();
resetOutput(outDims_[0], outDims_[1] * outDims_[2] * outDims_[3]);
if (heightAxis_.size() > 0) {
getOutputValue()->reshape(reshapeHeight_, reshapeWidth_);
getOutputGrad()->reshape(reshapeHeight_, reshapeWidth_);
}
// switch NCHW to NHWC
BufferArgs inputs;
BufferArgs outputs;
inputs.addArg(*getInputValue(0), inDims_);
outputs.addArg(*getOutputValue(), outDims_);
nchw2nhwc_[0]->calc(inputs, outputs);
forwardActivation();
}
void SwitchOrderLayer::backward(const UpdateCallback& callback) {
(void)callback;
backwardActivation();
// switch NHWC to NCHW
BufferArgs inputs;
BufferArgs outputs;
inputs.addArg(*getOutputGrad(), outDims_);
outputs.addArg(*getInputGrad(0), inDims_, ADD_TO);
nhwc2nchw_[0]->calc(inputs, outputs);
}
} // namespace paddle
...@@ -12,9 +12,36 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,9 +12,36 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU #pragma once
#include "paddle/operators/scatter_op.h"
namespace ops = paddle::operators; #include "Layer.h"
REGISTER_OP_GPU_KERNEL(scatter,
ops::ScatterOpKernel<paddle::platform::GPUPlace, float>); namespace paddle {
/**
* \brief This layer calculate softmax in image channel dimension.
*/
class SwitchOrderLayer : public Layer {
public:
explicit SwitchOrderLayer(const LayerConfig& config) : Layer(config) {}
~SwitchOrderLayer() {}
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
void forward(PassType passType) override;
void backward(const UpdateCallback& callback = nullptr) override;
void setInDims();
void setOutDims();
protected:
std::vector<std::shared_ptr<FunctionBase>> nchw2nhwc_;
std::vector<std::shared_ptr<FunctionBase>> nhwc2nchw_;
TensorShape inDims_;
TensorShape outDims_;
std::vector<int> heightAxis_;
std::vector<int> widthAxis_;
size_t reshapeHeight_;
size_t reshapeWidth_;
};
} // namespace paddle
...@@ -2008,6 +2008,31 @@ TEST(Layer, CropLayer) { ...@@ -2008,6 +2008,31 @@ TEST(Layer, CropLayer) {
} }
} }
TEST(Layer, SwitchOrderLayer) {
TestConfig config;
// config input_0
config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 0});
LayerInputConfig* input = config.layerConfig.add_inputs();
ImageConfig* img = input->mutable_image_conf();
img->set_channels(4);
img->set_img_size(16);
img->set_img_size_y(16);
ReshapeConfig* reshape = config.layerConfig.mutable_reshape_conf();
reshape->add_height_axis(0);
reshape->add_height_axis(1);
reshape->add_height_axis(2);
reshape->add_width_axis(3);
// config softmax layer
config.layerConfig.set_type("switch_order");
config.layerConfig.set_name("switchOrderLayer");
for (auto useGpu : {false, true}) {
testLayerGrad(config, "switch_order", 100, false, useGpu, true);
}
}
vector<real> randSampling(real range, int n) { vector<real> randSampling(real range, int n) {
CHECK_GE(range, n); CHECK_GE(range, n);
vector<real> num(range); vector<real> num(range);
......
...@@ -84,6 +84,7 @@ LAPACK_ROUTINE_EACH(DYNAMIC_LOAD_LAPACK_WRAP) ...@@ -84,6 +84,7 @@ LAPACK_ROUTINE_EACH(DYNAMIC_LOAD_LAPACK_WRAP)
namespace paddle { namespace paddle {
#ifndef PADDLE_USE_EIGEN_FOR_BLAS
template <> template <>
void gemm<float>(const CBLAS_TRANSPOSE transA, void gemm<float>(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const CBLAS_TRANSPOSE transB,
...@@ -143,6 +144,7 @@ void gemm<double>(const CBLAS_TRANSPOSE transA, ...@@ -143,6 +144,7 @@ void gemm<double>(const CBLAS_TRANSPOSE transA,
C, C,
ldc); ldc);
} }
#endif
template <> template <>
int getrf<float>(const CBLAS_ORDER order, int getrf<float>(const CBLAS_ORDER order,
...@@ -182,6 +184,7 @@ int getri<double>(const CBLAS_ORDER order, ...@@ -182,6 +184,7 @@ int getri<double>(const CBLAS_ORDER order,
return dynload::PADDLE_DGETRI(order, N, A, lda, ipiv); return dynload::PADDLE_DGETRI(order, N, A, lda, ipiv);
} }
#ifndef PADDLE_USE_EIGEN_FOR_BLAS
template <> template <>
void axpy<float>(const int n, const float alpha, const float* x, float* y) { void axpy<float>(const int n, const float alpha, const float* x, float* y) {
cblas_saxpy(n, alpha, x, 1, y, 1); cblas_saxpy(n, alpha, x, 1, y, 1);
...@@ -201,6 +204,7 @@ template <> ...@@ -201,6 +204,7 @@ template <>
double dotProduct<double>(const int n, const double* x, const double* y) { double dotProduct<double>(const int n, const double* x, const double* y) {
return cblas_ddot(n, x, 1, y, 1); return cblas_ddot(n, x, 1, y, 1);
} }
#endif
#if defined(PADDLE_USE_MKL) || defined(PADDLE_USE_MKLML) #if defined(PADDLE_USE_MKL) || defined(PADDLE_USE_MKLML)
......
...@@ -40,7 +40,14 @@ extern "C" { ...@@ -40,7 +40,14 @@ extern "C" {
#ifndef LAPACK_FOUND #ifndef LAPACK_FOUND
extern "C" { extern "C" {
#ifndef PADDLE_USE_EIGEN_FOR_BLAS
#include <cblas.h> #include <cblas.h>
#else
typedef enum CBLAS_ORDER {
CblasRowMajor = 101,
CblasColMajor = 102
} CBLAS_ORDER;
#endif
int LAPACKE_sgetrf( int LAPACKE_sgetrf(
int matrix_layout, int m, int n, float* a, int lda, int* ipiv); int matrix_layout, int m, int n, float* a, int lda, int* ipiv);
int LAPACKE_dgetrf( int LAPACKE_dgetrf(
...@@ -56,6 +63,7 @@ int LAPACKE_dgetri( ...@@ -56,6 +63,7 @@ int LAPACKE_dgetri(
namespace paddle { namespace paddle {
#ifndef PADDLE_USE_EIGEN_FOR_BLAS
template <class T> template <class T>
void gemm(const CBLAS_TRANSPOSE transA, void gemm(const CBLAS_TRANSPOSE transA,
const CBLAS_TRANSPOSE transB, const CBLAS_TRANSPOSE transB,
...@@ -70,6 +78,7 @@ void gemm(const CBLAS_TRANSPOSE transA, ...@@ -70,6 +78,7 @@ void gemm(const CBLAS_TRANSPOSE transA,
const T beta, const T beta,
T* C, T* C,
const int ldc); const int ldc);
#endif
template <class T> template <class T>
int getrf(const CBLAS_ORDER Order, int getrf(const CBLAS_ORDER Order,
...@@ -84,10 +93,21 @@ int getri( ...@@ -84,10 +93,21 @@ int getri(
const CBLAS_ORDER Order, const int N, T* A, const int lda, const int* ipiv); const CBLAS_ORDER Order, const int N, T* A, const int lda, const int* ipiv);
template <class T> template <class T>
void axpy(const int n, const T alpha, const T* x, T* y); void axpy(const int n, const T alpha, const T* x, T* y) {
/// y = y + alpha * x
for (int i = 0; i < n; i++) {
y[i] = y[i] + alpha * x[i];
}
}
template <class T> template <class T>
T dotProduct(const int n, const T* x, const T* y); T dotProduct(const int n, const T* x, const T* y) {
T result = static_cast<T>(0);
for (int i = 0; i < n; i++) {
result += x[i] * y[i];
}
return result;
}
template <class T> template <class T>
void vExp(const int n, const T* a, T* r); void vExp(const int n, const T* a, T* r);
......
...@@ -28,6 +28,7 @@ limitations under the License. */ ...@@ -28,6 +28,7 @@ limitations under the License. */
#include "hl_top_k.h" #include "hl_top_k.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
#include "paddle/function/GemmFunctor.h"
#include "paddle/utils/ThreadLocal.h" #include "paddle/utils/ThreadLocal.h"
#include "SIMDFunctions.h" #include "SIMDFunctions.h"
...@@ -2773,24 +2774,24 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) { ...@@ -2773,24 +2774,24 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) {
CHECK(!isTransposed()) << "Not supported"; CHECK(!isTransposed()) << "Not supported";
size_t a_col, b_col, a_row, b_row; size_t a_col, b_col, a_row, b_row;
CBLAS_TRANSPOSE a_trans, b_trans; bool a_trans, b_trans;
if (!a->isTransposed()) { if (!a->isTransposed()) {
a_col = a->getWidth(); a_col = a->getWidth();
a_row = a->getHeight(); a_row = a->getHeight();
a_trans = CblasNoTrans; a_trans = false;
} else { } else {
a_col = a->getHeight(); a_col = a->getHeight();
a_row = a->getWidth(); a_row = a->getWidth();
a_trans = CblasTrans; a_trans = true;
} }
if (!b->isTransposed()) { if (!b->isTransposed()) {
b_col = b->getWidth(); b_col = b->getWidth();
b_row = b->getHeight(); b_row = b->getHeight();
b_trans = CblasNoTrans; b_trans = false;
} else { } else {
b_col = b->getHeight(); b_col = b->getHeight();
b_row = b->getWidth(); b_row = b->getWidth();
b_trans = CblasTrans; b_trans = true;
} }
CHECK_EQ(a_col, b_row); CHECK_EQ(a_col, b_row);
...@@ -2807,7 +2808,7 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) { ...@@ -2807,7 +2808,7 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) {
int lda = a->getStride(); int lda = a->getStride();
int ldb = b->getStride(); int ldb = b->getStride();
int ldc = getStride(); int ldc = getStride();
gemm<real>( BlasGemm<DEVICE_TYPE_CPU, real>::compute(
a_trans, b_trans, M, N, K, scaleAB, A, lda, B, ldb, scaleT, C, ldc); a_trans, b_trans, M, N, K, scaleAB, A, lda, B, ldb, scaleT, C, ldc);
} }
......
...@@ -1616,6 +1616,10 @@ public: ...@@ -1616,6 +1616,10 @@ public:
}; };
class CpuMatrix : public Matrix { class CpuMatrix : public Matrix {
private:
MatrixPtr sftmaxSum_;
MatrixPtr sftmaxDot_;
public: public:
CpuMatrix(size_t height, size_t width, bool trans = false); CpuMatrix(size_t height, size_t width, bool trans = false);
CpuMatrix(real* data, size_t height, size_t width, bool trans = false) CpuMatrix(real* data, size_t height, size_t width, bool trans = false)
......
此差异已折叠。
...@@ -57,7 +57,6 @@ class AddOpGrad : public framework::OperatorWithKernel { ...@@ -57,7 +57,6 @@ class AddOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker, add_two_grad, ops::AddOpGrad); REGISTER_OP(add, ops::AddOp, ops::AddOpMaker, add_grad, ops::AddOpGrad);
REGISTER_OP_CPU_KERNEL(add_two, REGISTER_OP_CPU_KERNEL(add, ops::AddKernel<paddle::platform::CPUPlace, float>);
ops::AddKernel<paddle::platform::CPUPlace, float>);
...@@ -12,10 +12,7 @@ ...@@ -12,10 +12,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h"
#include "paddle/operators/add_op.h" #include "paddle/operators/add_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(add_two, REGISTER_OP_GPU_KERNEL(add, ops::AddKernel<paddle::platform::GPUPlace, float>);
ops::AddKernel<paddle::platform::GPUPlace, float>);
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册