diff --git a/.travis.yml b/.travis.yml index d8f3c0be9bf1b2641ee559bcdee73bfd3124ed60..83f0f45ad10772184b80f000e5abb9d4dc6a36b1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,6 @@ cache: - $HOME/.ccache - $HOME/.cache/pip - $TRAVIS_BUILD_DIR/build/third_party - - $TRAVIS_BUILD_DIR/build_android/third_party - $TRAVIS_BUILD_DIR/build_ios/third_party sudo: required dist: trusty @@ -13,7 +12,6 @@ os: env: - JOB=build_doc - JOB=check_style - - JOB=build_android matrix: include: - env: JOB=build_ios @@ -30,7 +28,6 @@ addons: - python - python-pip - python2.7-dev - - python-numpy - python-wheel - libboost-dev - curl diff --git a/CMakeLists.txt b/CMakeLists.txt index ba1febe5e4f2c8a4a21da6a0069652724a94e438..52f9b641095f5fd7b1f30a7163acdb47fdb3ca07 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,8 +65,8 @@ if(NOT CMAKE_BUILD_TYPE) endif() if(ANDROID OR IOS) - if(ANDROID AND ${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") - message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 21") + if(ANDROID AND ${CMAKE_SYSTEM_VERSION} VERSION_LESS "16") + message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16") endif() set(WITH_GPU OFF CACHE STRING diff --git a/Dockerfile.android b/Dockerfile.android index c0fa58c384f9ebcae60477ffce49ea4ffa929db9..452aa1574550627c2cce6375e12e154a9763254d 100644 --- a/Dockerfile.android +++ b/Dockerfile.android @@ -4,9 +4,15 @@ MAINTAINER PaddlePaddle Authors ARG UBUNTU_MIRROR RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi' +# ENV variables +ARG ANDROID_ABI + +ENV ANDROID_ABI=${ANDROID_ABI:-"armeabi-v7a"} + ENV HOME=/root \ ANDROID_NDK_HOME=/opt/android-ndk-linux \ - ANDROID_STANDALONE_TOOLCHAIN=/opt/android-toolchain-gcc + ANDROID_ARM_STANDALONE_TOOLCHAIN=/opt/arm-toolchain \ + ANDROID_ARM64_STANDALONE_TOOLCHAIN=/opt/arm64-toolchain RUN apt-get update && \ apt-get install -y \ @@ -15,12 +21,11 @@ RUN apt-get update && \ apt-get clean -y # Install Go and glide -RUN wget -O go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz && \ - tar -C /usr/local -xzf go.tgz && \ +RUN wget -qO- go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \ + tar -xz -C /usr/local && \ mkdir /root/gopath && \ mkdir /root/gopath/bin && \ - mkdir /root/gopath/src && \ - rm go.tgz + mkdir /root/gopath/src ENV GOROOT=/usr/local/go GOPATH=/root/gopath # should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT. ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin @@ -42,7 +47,8 @@ RUN mkdir /opt/android-ndk-tmp && \ wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \ unzip -q android-ndk-r14b-linux-x86_64.zip && \ mv android-ndk-r14b ${ANDROID_NDK_HOME} && \ - ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-21 --install-dir=${ANDROID_STANDALONE_TOOLCHAIN} && \ + ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-23 --install-dir=${ANDROID_ARM_STANDALONE_TOOLCHAIN} && \ + ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm64 --platform=android-23 --install-dir=${ANDROID_ARM64_STANDALONE_TOOLCHAIN} && \ rm -rf /opt/android-ndk-tmp && \ rm -rf ${ANDROID_NDK_HOME} diff --git a/cmake/cross_compiling/android.cmake b/cmake/cross_compiling/android.cmake index 5e3e437a8da9624df35a5c754fe77be73f20361d..84219cfa5587f5b765b2e8f35180797d7053169f 100644 --- a/cmake/cross_compiling/android.cmake +++ b/cmake/cross_compiling/android.cmake @@ -20,6 +20,7 @@ # The supported variables are listed belows: # # ANDROID_STANDALONE_TOOLCHAIN +# ANDROID_TOOLCHAIN # ANDROID_ABI # ANDROID_NATIVE_API_LEVEL # ANDROID_ARM_MODE @@ -57,6 +58,10 @@ IF(NOT DEFINED CMAKE_SYSTEM_VERSION AND ANDROID_NATIVE_API_LEVEL) ENDIF() ENDIF() +IF(NOT DEFINED ANDROID_TOOLCHAIN) + SET(ANDROID_TOOLCHAIN clang) +ENDIF() + IF(NOT DEFINED ANDROID_ABI) SET(ANDROID_ABI "armeabi-v7a") ENDIF() @@ -82,6 +87,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") "${CMAKE_VERSION}), when cross-compiling for Android.") IF(ANDROID_STANDALONE_TOOLCHAIN) + # Use standalone toolchain SET(CMAKE_SYSROOT "${ANDROID_STANDALONE_TOOLCHAIN}/sysroot") IF(NOT CMAKE_SYSTEM_VERSION) @@ -96,26 +102,44 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ENDIF() # Toolchain - SET(ANDROID_TOOLCHAIN "gcc") SET(ANDROID_TOOLCHAIN_ROOT ${ANDROID_STANDALONE_TOOLCHAIN}) - IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$") - SET(ANDROID_TOOLCHAIN_NAME arm-linux-androideabi) - IF(ANDROID_ABI STREQUAL "armeabi") - SET(CMAKE_SYSTEM_PROCESSOR armv5te) - ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a") - SET(CMAKE_SYSTEM_PROCESSOR armv7-a) - ENDIF() - ENDIF() - IF(ANDROID_ABI STREQUAL "arm64-v8a") - SET(ANDROID_TOOLCHAIN_NAME aarch64-linux-android) - SET(CMAKE_SYSTEM_PROCESSOR aarch64) + ELSE(ANDROID_NDK) + # TODO: use android ndk + ENDIF() + + IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$") + SET(ANDROID_TOOLCHAIN_NAME arm-linux-androideabi) + IF(ANDROID_ABI STREQUAL "armeabi") + SET(CMAKE_SYSTEM_PROCESSOR armv5te) + SET(ANDROID_CLANG_TRIPLE armv5te-none-linux-androideabi) + ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a") + SET(CMAKE_SYSTEM_PROCESSOR armv7-a) + SET(ANDROID_CLANG_TRIPLE armv7-none-linux-androideabi) ENDIF() - SET(ANDROID_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_NAME}-") + ELSEIF(ANDROID_ABI STREQUAL "arm64-v8a") + SET(ANDROID_TOOLCHAIN_NAME aarch64-linux-android) + SET(CMAKE_SYSTEM_PROCESSOR aarch64) + SET(ANDROID_CLANG_TRIPLE aarch64-none-linux-android) + ELSE() + MESSAGE(FATAL_ERROR "Invalid Android ABI: ${ANDROID_ABI}.") + ENDIF() + SET(ANDROID_TOOLCHAIN_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_NAME}-") + + IF(ANDROID_TOOLCHAIN STREQUAL clang) + SET(ANDROID_C_COMPILER_NAME clang) + SET(ANDROID_CXX_COMPILER_NAME clang++) + SET(CMAKE_C_COMPILER_TARGET ${ANDROID_CLANG_TRIPLE}) + SET(CMAKE_CXX_COMPILER_TARGET ${ANDROID_CLANG_TRIPLE}) + ELSEIF(ANDROID_TOOLCHAIN STREQUAL gcc) + SET(ANDROID_C_COMPILER_NAME gcc) + SET(ANDROID_CXX_COMPILER_NAME g++) + ELSE() + MESSAGE(FATAL_ERROR "Invalid Android toolchain: ${ANDROID_TOOLCHAIN}") ENDIF() # C compiler IF(NOT CMAKE_C_COMPILER) - SET(ANDROID_C_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}gcc") + SET(ANDROID_C_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}${ANDROID_C_COMPILER_NAME}") ELSE() GET_FILENAME_COMPONENT(ANDROID_C_COMPILER ${CMAKE_C_COMPILER} PROGRAM) ENDIF() @@ -125,7 +149,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") # CXX compiler IF(NOT CMAKE_CXX_COMPILER) - SET(ANDROID_CXX_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}g++") + SET(ANDROID_CXX_COMPILER "${ANDROID_TOOLCHAIN_PREFIX}${ANDROID_CXX_COMPILER_NAME}") ELSE() GET_FILENAME_COMPONENT(ANDROID_CXX_COMPILER ${CMAKE_CXX_COMPILER} PROGRAM) ENDIF() @@ -137,7 +161,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") SET(CMAKE_CXX_COMPILER ${ANDROID_CXX_COMPILER} CACHE PATH "CXX compiler" FORCE) # Toolchain and ABI specific flags. - SET(ANDROID_COMPILER_FLAGS "-ffunction-sections -fdata-sections -finline-limit=64") + SET(ANDROID_COMPILER_FLAGS "-ffunction-sections -fdata-sections") SET(ANDROID_LINKER_FLAGS "-Wl,--gc-sections") IF(ANDROID_ABI STREQUAL "armeabi") @@ -145,8 +169,7 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") -march=armv5te -mtune=xscale -msoft-float) - ENDIF() - IF(ANDROID_ABI STREQUAL "armeabi-v7a") + ELSEIF(ANDROID_ABI STREQUAL "armeabi-v7a") LIST(APPEND ANDROID_COMPILER_FLAGS -march=armv7-a -mfloat-abi=softfp) @@ -156,6 +179,8 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") LIST(APPEND ANDROID_COMPILER_FLAGS -mfpu=vfpv3-d16) ENDIF() LIST(APPEND ANDROID_LINKER_FLAGS -Wl,--fix-cortex-a8) + ELSEIF(ANDROID_ABI STREQUAL "arm64-v8a") + LIST(APPEND ANDROID_COMPILER_FLAGS -march=armv8-a) ENDIF() IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$") @@ -164,10 +189,18 @@ IF("${CMAKE_VERSION}" VERSION_LESS "3.7.0") ELSE() LIST(APPEND ANDROID_COMPILER_FLAGS -mthumb) ENDIF() + IF(ANDROID_TOOLCHAIN STREQUAL clang) + # Disable integrated-as for better compatibility. + LIST(APPEND ANDROID_COMPILER_FLAGS -fno-integrated-as) + ENDIF() ENDIF() - IF(ANDROID_ABI STREQUAL "arm64-v8a") - LIST(APPEND ANDROID_COMPILER_FLAGS -march=armv8-a) + IF(ANDROID_TOOLCHAIN STREQUAL clang) + # CMake automatically forwards all compiler flags to the linker, + # and clang doesn't like having -Wa flags being used for linking. + # To prevent CMake from doing this would require meddling with + # the CMAKE__COMPILE_OBJECT rules, which would get quite messy. + LIST(APPEND ANDROID_LINKER_FLAGS -Qunused-arguments) ENDIF() STRING(REPLACE ";" " " ANDROID_COMPILER_FLAGS "${ANDROID_COMPILER_FLAGS}") diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 563633c517fe717c67644252729643fef4f4c217..1cecf443ae356f7fe41ad2fe839238564c5eb0c4 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -12,6 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +IF(USE_EIGEN_FOR_BLAS) + return() +ENDIF(USE_EIGEN_FOR_BLAS) + INCLUDE(cblas) IF(NOT ${CBLAS_FOUND}) diff --git a/doc/api/v2/config/layer.rst b/doc/api/v2/config/layer.rst index 1329b77bb44f52c66a703740715b890c47234e72..c94627a72806fa2eca77c79da24f7f3ca18f0259 100644 --- a/doc/api/v2/config/layer.rst +++ b/doc/api/v2/config/layer.rst @@ -434,9 +434,9 @@ lambda_cost .. autoclass:: paddle.v2.layer.lambda_cost :noindex: -mse_cost +square_error_cost -------- -.. autoclass:: paddle.v2.layer.mse_cost +.. autoclass:: paddle.v2.layer.square_error_cost :noindex: rank_cost diff --git a/doc/design/functions_operators_layers.md b/doc/design/functions_operators_layers.md index 7a2e8fd0ace2e3f4462b15215de22c31e944b7cb..d23ba56b5773a36d448a99e4abdebc1475ed789c 100644 --- a/doc/design/functions_operators_layers.md +++ b/doc/design/functions_operators_layers.md @@ -86,12 +86,13 @@ def layer.fc(X): We'd like to have Python bindings to operators in package `paddle.operator`, and Python compositions of operators in package `paddle.layer`. So we have the following concepts in above illustrative example: -``` + | C++ functions/functors | mul | add | | | +|------------------------|--------------|--------------|-------------|----------| | C++ operator class | mulOp | addOp | FCOp | | | Python binding | operator.mul | operator.add | operator.fc | | | Python function | | | | layer.fc | -``` + This is how we differentiate layer and operators in PaddlePaddle: diff --git a/doc/design/graph.md b/doc/design/graph.md new file mode 100644 index 0000000000000000000000000000000000000000..51b7f87638f8ddff752328a562fe0dd0fe56cfd1 --- /dev/null +++ b/doc/design/graph.md @@ -0,0 +1,70 @@ +# Design Doc: Computations as a Graph + +A primary goal of the refactorization of PaddlePaddle is a more flexible representation of deep learning computation, in particular, a graph of operators and variables, instead of sequences of layers as before. + +This document explains that the construction of a graph as three steps: + +- construct the forward part +- construct the backward part +- construct the optimization part + +## The Construction of a Graph + +Let us take the problem of image classification as a simple example. The application program that trains the model looks like: + +```python +x = layer.data("images") +l = layer.data("label") +y = layer.fc(x) +cost = layer.mse(y, l) +optimize(cost) +train(cost, reader=mnist.train()) +``` + +### Forward Part + +The first four lines of above program build the forward part of the graph. + +![](images/graph_construction_example_forward_only.png) + +In particular, the first line `x = layer.data("images")` creates variable x and a Feed operator that copies a column from the minibatch to x. `y = layer.fc(x)` creates not only the FC operator and output variable y, but also two parameters, W and b, and the initialization operators. + +Initialization operators are kind of "run-once" operators -- the `Run` method increments a class data member counter so to run at most once. By doing so, a parameter wouldn't be initialized repeatedly, say, in every minibatch. + +In this example, all operators are created as `OpDesc` protobuf messages, and all variables are `VarDesc`. These protobuf messages are saved in a `BlockDesc` protobuf message. + +### Backward Part + +The fifth line `optimize(cost)` calls two functions, `ConstructBackwardGraph` and `ConstructOptimizationGraph`. + +`ConstructBackwardGraph` traverses the forward graph in the `BlockDesc` protobuf message and builds the backward part. + +![](images/graph_construction_example_forward_backward.png) + +According to the chain rule of gradient computation, `ConstructBackwardGraph` would + +1. create a gradient operator G for each operator F, +1. make all inputs, outputs, and outputs' gradient of F as inputs of G, +1. create gradients for all inputs of F, except for those who don't have gradients, like x and l, and +1. make all these gradients as outputs of G. + +### Optimization Part + +For each parameter, like W and b created by `layer.fc`, marked as double circles in above graphs, `ConstructOptimizationGraph` creates an optimization operator to apply its gradient. Here results in the complete graph: + +![](images/graph_construction_example_all.png) + +## Block and Graph + +The word block and graph are interchangable in the desgin of PaddlePaddle. A [Block[(https://github.com/PaddlePaddle/Paddle/pull/3708) is a metaphore of the code and local variables in a pair of curly braces in programming languages, where operators are like statements or instructions. A graph of operators and variables is a representation of the block. + +A Block keeps operators in an array `BlockDesc::ops` + +```protobuf +message BlockDesc { + repeated OpDesc ops = 1; + repeated VarDesc vars = 2; +} +``` + +in the order that there appear in user programs, like the Python program at the beginning of this article. We can imagine that in `ops`, we have some forward operators, followed by some gradient operators, and then some optimization operators. diff --git a/doc/design/images/graph_construction_example.bash b/doc/design/images/graph_construction_example.bash new file mode 100755 index 0000000000000000000000000000000000000000..35e6997abd17588e17a82d448918fc1b3bd7220e --- /dev/null +++ b/doc/design/images/graph_construction_example.bash @@ -0,0 +1,11 @@ +cat ./graph_construction_example.dot | \ + sed 's/color=red/color=red, style=invis/g' | \ + sed 's/color=green/color=green, style=invis/g' | \ + dot -Tpng > graph_construction_example_forward_only.png + +cat ./graph_construction_example.dot | \ + sed 's/color=green/color=green, style=invis/g' | \ + dot -Tpng > graph_construction_example_forward_backward.png + +cat ./graph_construction_example.dot | \ + dot -Tpng > graph_construction_example_all.png diff --git a/doc/design/images/graph_construction_example.dot b/doc/design/images/graph_construction_example.dot new file mode 100644 index 0000000000000000000000000000000000000000..8d1b673abf6b78c851676fa379dc850c4818f0e5 --- /dev/null +++ b/doc/design/images/graph_construction_example.dot @@ -0,0 +1,69 @@ +digraph ImageClassificationGraph { + ///////// The forward part ///////// + FeedX [label="Feed", color=blue, shape=box]; + FeedY [label="Feed", color=blue, shape=box]; + InitW [label="Init", color=blue, shape=diamond]; + Initb [label="Init", color=blue, shape=diamond]; + FC [label="FC", color=blue, shape=box]; + MSE [label="MSE", color=blue, shape=box]; + + x [label="x", color=blue, shape=oval]; + l [label="l", color=blue, shape=oval]; + y [label="y", color=blue, shape=oval]; + W [label="W", color=blue, shape=doublecircle]; + b [label="b", color=blue, shape=doublecircle]; + cost [label="cost", color=blue, shape=oval]; + + FeedX -> x -> FC -> y -> MSE -> cost [color=blue]; + FeedY -> l [color=blue]; + InitW -> W [color=blue]; + Initb -> b [color=blue]; + W -> FC [color=blue]; + b -> FC [color=blue]; + l -> MSE [color=blue]; + + ////////// The backward part ///////// + MSE_Grad [label="MSE_grad", color=red, shape=box]; + FC_Grad [label="FC_grad", color=red, shape=box]; + + d_cost [label="d cost", color=red, shape=oval]; + d_y [label="d y", color=red, shape=oval]; + d_b [label="d b", color=red, shape=oval]; + d_W [label="d W", color=red, shape=oval]; + + cost -> MSE_Grad [color=red]; + d_cost -> MSE_Grad [color=red]; + x -> MSE_Grad [color=red]; + l -> MSE_Grad [color=red]; + y -> MSE_Grad -> d_y [color=red]; + + x -> FC_Grad [color=red]; + y -> FC_Grad [color=red]; + d_y -> FC_Grad [color=red]; + W -> FC_Grad -> d_W [color=red]; + b -> FC_Grad -> d_b [color=red]; + + ////////// The optimizaiton part ////////// + + OPT_W [label="SGD", color=green, shape=box]; + OPT_b [label="SGD", color=green, shape=box]; + + W -> OPT_W [color=green]; + b -> OPT_b [color=green]; + d_W -> OPT_W -> W [color=green]; + d_b -> OPT_b -> b [color=green]; + + ////////// Groupings ////////// + + subgraph clusterMSE { + style=invis; + MSE; + MSE_Grad; + } + + subgraph clusterFC { + style=invis; + FC; + FC_Grad; + } +} diff --git a/doc/design/images/graph_construction_example_all.png b/doc/design/images/graph_construction_example_all.png new file mode 100644 index 0000000000000000000000000000000000000000..181187503472d15779b87284105841168b3945c4 Binary files /dev/null and b/doc/design/images/graph_construction_example_all.png differ diff --git a/doc/design/images/graph_construction_example_forward_backward.png b/doc/design/images/graph_construction_example_forward_backward.png new file mode 100644 index 0000000000000000000000000000000000000000..3049a9315fd616464dec54e33064cb75598ca536 Binary files /dev/null and b/doc/design/images/graph_construction_example_forward_backward.png differ diff --git a/doc/design/images/graph_construction_example_forward_only.png b/doc/design/images/graph_construction_example_forward_only.png new file mode 100644 index 0000000000000000000000000000000000000000..25d19088cbf0b5f68cf734f2ff21eba8af4a2860 Binary files /dev/null and b/doc/design/images/graph_construction_example_forward_only.png differ diff --git a/doc/design/ops/dist_train.md b/doc/design/ops/dist_train.md new file mode 100644 index 0000000000000000000000000000000000000000..fa3c5d7990213cf2b0d236e66e592dd2699da876 --- /dev/null +++ b/doc/design/ops/dist_train.md @@ -0,0 +1,106 @@ +# Design Doc: Operation Graph Based Parameter Server + +## Abstract + +We propose an approach to implement the parameter server. In this +approach, there is no fundamental difference between the trainer and +the parameter server: they both run subgraphs, but subgraphs of +different purposes. + +## Background + +The previous implementations of the parameter server does not run a +subgraph. parameter initialization, optimizer computation, network +communication and checkpointing are implemented twice on both the +trainer and the parameter server. + +It would be great if we can write code once and use them on both the +trainer and the parameter server: reduces code duplication and +improves extensibility. Given that after the current refactor, we are +representing everything as a computing graph on the +trainer. Representing everything as a computing graph on the parameter +server becomes a natural extension. + +## Design + +### Graph Converter + +The *graph converter* converts the user-defined operation (OP) graph +into subgraphs to be scheduled on different nodes with the following +steps: + +1. OP placement: the OPs will be placed on different nodes according + to heuristic that minimizes estimated total computation + time. Currently we will use a simple heuristic that puts parameter + varable on parameter server workers and everything else on trainer + workers. + +1. Add communication OPs to enable the communication between nodes. + +We will need these OPs: *Send*, *Recv*, *Enqueue*, *Dequeue*. + +Below is an example of converting the user defined graph to the +subgraphs for the trainer and the parameter server: + + + +After converting: + + + +1. The parameter variable W and it's optimizer subgraph are placed on the parameter server. +1. Operators are added to the subgraphs. + - *Send* sends data to the connected *Recv* operator. The + scheduler on the receive node will only schedule *Recv* operator + to run when the *Send* operator has ran (the *Send* OP will mark + the *Recv* OP runnable automatically). + - *Enueue* enqueues the input variable, it can block until space + become available in the queue. + - *Dequeue* outputs configurable numbers of tensors from the + queue. It will block until the queue have the required number of + tensors. + + +### Benefits + +- Model parallelism become easier to implement: it's an extension to + the trainer - parameter server approach. we already have the + communication OPs, but need to extend the graph converter's + placement functionality. + +- User-defined optimizer is easier to add - user can now express it as + a subgraph. + +- No more duplication logic inside the trainer and the parameter + server mentioned in the background section. + +### Challenges + +- It might be hard for the graph converter to cut a general graph + (without any hint for which subgraph is the optimizer). We may need + to label which subgraph inside the OP graph is the optimizer. + +- It's important to balance the parameter shards of on multiple + parameter server. If a single parameter is very big (some + word-embedding, fully connected, softmax layer), we need to + automatically partition the single parameter onto different + parameter servers when possible (only element-wise optimizer depends + on the parameter variable). + +### Discussion + +- In the "Aync SGD" figure, the "W" variable on the parameter server + could be read and wrote concurrently, what is our locking strategy? + E.g., each variable have a lock cpp method to be invoked by every + OP, or, have a lock OP. + +- Can the Enqueue OP be implemented under our current tensor design + (puts the input tensor into the queue tensor)? + +- *Dequeue* OP will have variable numbers of output (depends on the + `min_count` attribute), does our current design support it? (similar + question for the *Add* OP) + + +### References: +[1] [TensorFlow: Large-Scale Machine Learning on Heterogeneous Distributed Systems](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf) diff --git a/doc/design/ops/src/dist-graph.graffle b/doc/design/ops/src/dist-graph.graffle new file mode 100644 index 0000000000000000000000000000000000000000..941399c6ced8d5f65b6c595522b770c88259df4b Binary files /dev/null and b/doc/design/ops/src/dist-graph.graffle differ diff --git a/doc/design/ops/src/dist-graph.png b/doc/design/ops/src/dist-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..3546b09f1c2ee3e4f60f519d5e47f823f08051a7 Binary files /dev/null and b/doc/design/ops/src/dist-graph.png differ diff --git a/doc/design/ops/src/local-graph.graffle b/doc/design/ops/src/local-graph.graffle new file mode 100644 index 0000000000000000000000000000000000000000..19e509bd9af3c1e9a3f5e0f16ddd281457a339c5 Binary files /dev/null and b/doc/design/ops/src/local-graph.graffle differ diff --git a/doc/design/ops/src/local-graph.png b/doc/design/ops/src/local-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..ada51200f793a9bb18911e7d63cfdb3244b967d7 Binary files /dev/null and b/doc/design/ops/src/local-graph.png differ diff --git a/doc/design/simple_op_design.md b/doc/design/simple_op_design.md index 5e07c29c56d21728599195d420d3222213d77e7c..fded4a68612396a262121a5a886a8ae573dfa662 100644 --- a/doc/design/simple_op_design.md +++ b/doc/design/simple_op_design.md @@ -147,7 +147,7 @@ class CosineOp { struct CosineOpProtoMaker : public OpProtoMaker { CosineOpProtoMaker(OpProto* proto) : OpProtoMaker(proto) { AddInput("input", "input of cosine op"); - AddAttr("scale", "scale of cosine op", float).Default(1.0).LargerThan(0.0); + AddAttr("scale", "scale of cosine op", float).Default(1.0).GreaterThan(0.0); AddType("cos"); AddComment("This is cos op"); } diff --git a/doc/design/var_desc.md b/doc/design/var_desc.md new file mode 100644 index 0000000000000000000000000000000000000000..86a95c10d5729704f86c285c9fe92db0cf2158be --- /dev/null +++ b/doc/design/var_desc.md @@ -0,0 +1,124 @@ +## Background +PaddlePaddle divides the description of neural network computation graph into two stages: compile time and runtime. + +PaddlePaddle use proto message to describe compile time graph for + +1. Computation graph should be able to be saved to a file. +1. In distributed training, the graph will be serialized and send to multiple workers. + +The computation graph is constructed by Data Node and Operation Node. The concept to represent them is in the table below. + +| |compile time|runtime| +|---|---|---| +|Data|VarDesc(proto)|Variable(cpp)| +|Operation|OpDesc(proto)|Operator(cpp)| + + +## Definition of VarDesc + +A VarDesc should have a name and value, in PaddlePaddle, the value will always be a tensor. Since we use LoDTensor most of the time. We add a LoDTesnorDesc to represent it. + +```proto +message VarDesc { + required string name = 1; + optional LoDTensorDesc lod_tensor = 2; +} +``` + +## Definition of LodTensorDesc + +```proto +enum DataType { + BOOL = 0; + INT16 = 1; + INT32 = 2; + INT64 = 3; + FP16 = 4; + FP32 = 5; + FP64 = 6; +} + +message LoDTensorDesc { + required DataType data_type = 1; + repeated int32 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] + optional int32 lod_level = 3 [default=0]; +} +``` + +## Definition of Variable in Python + +In Python API, layer will take Variable as Input, and return Variable as Output. There should be a class `Variable` in python to help create and manage Variable. + +```python +image = Variable(dims=[-1, 640, 480]) +# fc1 and fc2 are both Variable +fc1 = layer.fc(input=image, output_size=10) +fc2 = layer.fc(input=fc1, output_size=20) +``` +### what should class `Variable` Have +1. `name`.a name of string type is used to mark the value of the Variable. +1. `initializer`. Since our Tensor does not have value. we will always use some Operator to fullfill it when run. So we should have a initialize method to help add the init operator. +1. `operator`. Variable should record which operator produce itself. The reaon is: + - we use pd.eval(targets=[var1, var2]) to run the related ops to get the value of var1 and var2. var.op is used to trace the dependency of the current variable. + +In PaddlePaddle, we use Block to describe Computation Graph, so in the code we will use Block but not Graph. + +```python +import VarDesc +import LoDTensorDesc +import framework + +def AddInitialOperator(variable, initializer): + # add an initialize Operator to block to init this Variable + +class Variable(object): + def __init__(self, name, dims, type, initializer): + self._block = get_default_block() + self._name = name + self.op = None + + tensor_desc = LoDTensorDesc(data_type=type, dims=dims) + _var_desc = VarDesc(name=name, lod_tensor=tensor_desc) + self._var = framework.CreateVar(_var_desc) + self._block.add_var(self) + + # add initial op according to initializer + if initializer is not None: + AddInitialOperator(self, initializer) + + def dims(self): + return self._var.dims() + + def data_type(self): + return self._var.data_type() + + def to_proto(self): + pass +``` + +Then we can use this Variable to create a fc layer in Python. + +```python +import paddle as pd + +def flatten_size(X, num_flatten_dims): + prod = 1 # of last num_flatten_dims + for i in xrange(num_flatten_dims): + prod = prod * X.dims[-i-1] + return prod + +def layer.fc(X, output_size, num_flatten_dims): + W = Variable(pd.random_uniform(), type=FP32, dims=[flatten_size(X, num_flatten_dims), output_size]) + b = Variable(pd.random_uniform(), type=FP32, dims=[output_size]) + out = Variable(type=FP32) + y = operator.fc(X, W, b, output=out) # fc will put fc op input into out + pd.InferShape(y) + return out + +x = Variable(dims=[-1, 640, 480]) +y = layer.fc(x, output_size=100) +z = layer.fc(y, output_size=200) + +paddle.eval(targets=[z], ...) +print(z) +``` diff --git a/doc/getstarted/basic_usage/index_cn.rst b/doc/getstarted/basic_usage/index_cn.rst index 428f58830e0b10c024f31238b7404c6df193eecd..b473944fc7fb89d3e0a0b330933f2226734bb5bd 100644 --- a/doc/getstarted/basic_usage/index_cn.rst +++ b/doc/getstarted/basic_usage/index_cn.rst @@ -55,7 +55,7 @@ PaddlePaddle是源于百度的一个深度学习平台。这份简短的介绍 # 线性计算网络层: ȳ = wx + b ȳ = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) # 计算误差函数,即 ȳ 和真实 y 之间的距离 - cost = mse_cost(input= ȳ, label=y) + cost = square_error_cost(input= ȳ, label=y) outputs(cost) @@ -69,7 +69,7 @@ PaddlePaddle是源于百度的一个深度学习平台。这份简短的介绍 - **数据层**:数据层 `data_layer` 是神经网络的入口,它读入数据并将它们传输到接下来的网络层。这里数据层有两个,分别对应于变量 `x` 和 `y`。 - **全连接层**:全连接层 `fc_layer` 是基础的计算单元,这里利用它建模变量之间的线性关系。计算单元是神经网络的核心,PaddlePaddle支持大量的计算单元和任意深度的网络连接,从而可以拟合任意的函数来学习复杂的数据关系。 - - **回归误差代价层**:回归误差代价层 `mse_cost` 是众多误差代价函数层的一种,它们在训练过程作为网络的出口,用来计算模型的误差,是模型参数优化的目标函数。 + - **回归误差代价层**:回归误差代价层 `square_error_cost` 是众多误差代价函数层的一种,它们在训练过程作为网络的出口,用来计算模型的误差,是模型参数优化的目标函数。 定义了网络结构并保存为 `trainer_config.py` 之后,运行以下训练命令: diff --git a/doc/getstarted/basic_usage/index_en.rst b/doc/getstarted/basic_usage/index_en.rst index 6775da20c2f51000f305b095d40abd27b8fa6c0e..2cc438ebbe0f97345d25354b93b4ebbd43502415 100644 --- a/doc/getstarted/basic_usage/index_en.rst +++ b/doc/getstarted/basic_usage/index_en.rst @@ -49,7 +49,7 @@ To recover this relationship between ``X`` and ``Y``, we use a neural network wi x = data_layer(name='x', size=1) y = data_layer(name='y', size=1) y_predict = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) - cost = mse_cost(input=y_predict, label=y) + cost = square_error_cost(input=y_predict, label=y) outputs(cost) Some of the most fundamental usages of PaddlePaddle are demonstrated: diff --git a/doc/getstarted/concepts/src/train.py b/doc/getstarted/concepts/src/train.py index 7e604f23de38543a00f305d508af0791193f78ba..8aceb23406a476f08639cc6223cdf730b728a705 100644 --- a/doc/getstarted/concepts/src/train.py +++ b/doc/getstarted/concepts/src/train.py @@ -8,7 +8,7 @@ paddle.init(use_gpu=False) x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(2)) y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear()) y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1)) -cost = paddle.layer.mse_cost(input=y_predict, label=y) +cost = paddle.layer.square_error_cost(input=y_predict, label=y) # create parameters parameters = paddle.parameters.create(cost) diff --git a/doc/getstarted/concepts/use_concepts_cn.rst b/doc/getstarted/concepts/use_concepts_cn.rst index f15b11bd780402a3ec1755900e8c648f5d2a7bc5..c243083794bb3c4659242de99b3b2715af9d7c24 100644 --- a/doc/getstarted/concepts/use_concepts_cn.rst +++ b/doc/getstarted/concepts/use_concepts_cn.rst @@ -81,9 +81,9 @@ PaddlePaddle支持不同类型的输入数据,主要包括四种类型,和 .. code-block:: bash y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear()) - cost = paddle.layer.mse_cost(input=y_predict, label=y) + cost = paddle.layer.square_error_cost(input=y_predict, label=y) -其中,x与y为之前描述的输入层;而y_predict是接收x作为输入,接上一个全连接层;cost接收y_predict与y作为输入,接上均方误差层。 +其中,x与y为之前描述的输入层;而y_predict是接收x作为输入,接上一个全连接层;cost接收y_predict与y作为输入,接上平方误差层。 最后一层cost中记录了神经网络的所有拓扑结构,通过组合不同的layer,我们即可完成神经网络的搭建。 @@ -147,4 +147,4 @@ PaddlePaddle支持不同类型的输入数据,主要包括四种类型,和 .. literalinclude:: src/train.py :linenos: -有关线性回归的实际应用,可以参考PaddlePaddle book的 `第一章节 `_。 \ No newline at end of file +有关线性回归的实际应用,可以参考PaddlePaddle book的 `第一章节 `_。 diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index ec79b7f42b2d70df8fcb25faca5bc3a4759e177c..e3892849abe21fc207d2fcbe4adc65184ba771f4 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -23,17 +23,20 @@ - `framework::OperatorWithKernel`:继承自OperatorBase,Op有计算函数,称作有Kernel。 - `class OpProtoAndCheckerMaker`:描述该Op的输入、输出、属性、注释,主要用于Python API接口生成 -依据是否包含kernel,将Op分为两种:包含Kernel的Op和不包含kernel的Op,前者Op的定义继承自`OperatorBase`,后者继承自`OperatorWithKernel`。本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下: +依据是否包含kernel,可以将Op分为两种:包含Kernel的Op和不包含kernel的Op,前者Op的定义继承自`OperatorBase`,后者继承自`OperatorWithKernel`。本教程主要介绍带Kernel的Op如何写,简单总结Op需要包含的内容如下: - - 内容 | 定义位置 --------------- | :---------------------- + + 内容 | 定义位置 +-------------- | :---------------------- OpProtoMake定义 | `.cc`文件,Backward Op不需要定义OpProtoMake -Op定义 | `.cc`文件 -Kernel实现 | CPU、GPU共享Kernel在`.h`文件,否则,CPU可以在`.cc`文件,GPU可在`.cu`文件。 -注册Op | Op注册在`.cc`文件;Kernel注册CPU在`.cc`文件,GPU在`.cu`文件 - - +Op定义 | `.cc`文件 +Kernel实现 | CPU、GPU共享Kernel实现在`.h`文件中,否则,CPU 实现在`.cc`文件中,GPU 实现在`.cu`文件中。 +注册Op | Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,GPU实现在`.cu`文件中 + + +实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc` 、`*_op.cu`(如有)结尾。 + + 下面以矩阵乘操作,即[MulOp](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc)为例来介绍如何写带Kernel的Operator。 @@ -42,9 +45,11 @@ Kernel实现 | CPU、GPU共享Kernel在`.h`文件,否则,CPU可以在` ### 1. 定义ProtoMaker类 -矩阵乘的公式:$Out = X * Y$, 可见该计算由两个输入,一个输出组成。首先定义`ProtoMaker`来描述该Op的输入、输出及注释: - -``` +矩阵乘法的公式:$Out = X * Y$, 可见该计算由两个输入,一个输出组成。 + +首先定义`ProtoMaker`来描述该Op的输入、输出,并添加注释: + +```cpp class MulOpMaker : public framework::OpProtoAndCheckerMaker { public: MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) @@ -59,20 +64,20 @@ The equation is: Out = X * Y } }; ``` - -[`MulOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L43)继承自`framework::OpProtoAndCheckerMaker`,构造函数包括2个: + +[`MulOpMaker`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L43)继承自`framework::OpProtoAndCheckerMaker`,构造函数含有2个参数: - `framework::OpProto` : 前者存储Op的输入输出和参数属性,将用于Python API接口的生成。 - `framework::OpAttrChecker` :后者用于检查参数属性的合法性。 - -构造函数里通过`AddInput`添加输入参数,通过`AddOutput`添加输出参数,通过`AddComment`添加该Op的注释,这些函数会将对应内容添加到`OpProto`中。 -在`MulOp`中添加两个输入`X`和`Y`,添加了一个输出`Out`,并解释了各自含义,该命名尽可能的规范。 +构造函数里通过`AddInput`添加输入参数,通过`AddOutput`添加输出参数,通过`AddComment`添加Op的注释。这些函数会将对应内容添加到`OpProto`中。 - -再举个[`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37)的例子: - -``` +上面的代码在`MulOp`中添加两个输入`X`和`Y`,添加了一个输出`Out`,并解释了各自含义,命名请遵守命名规范。 + + +再以[`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37)为例: + +```cpp template class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -87,17 +92,19 @@ The equation is: Out = scale*X } }; ``` - - 在这个例子里,两处不同: - - - `AddInput("X","...").NotInGradient()` : 表示`X`这个输入不参与`ScaleOp`对应的梯度Op计算之中。 - - `AddAttr("scale", "...").SetDefault(1.0);` : 增加`scale`系数,作为参数属性,并且设置默认值为1.0。 - + +这个例子有两处不同: + +- `AddInput("X","...").NotInGradient()` : 表示`X`这个输入不参与`ScaleOp`对应的梯度Op计算之中,如果Op的某个输入不参与反向梯度的计算,请显示地调用`.NotInGradient()`进行设置。 + +- `AddAttr("scale", "...").SetDefault(1.0);` : 增加`scale`系数,作为参数属性,并且设置默认值为1.0。 + ### 2. 定义Operator类 +下面的点实现了MulOp的定义: -```c++ +```cpp class MulOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -121,33 +128,46 @@ class MulOp : public framework::OperatorWithKernel { ``` [`MulOp`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc#L22)继承自`OperatorWithKernel`。`public`成员: - -```c++ + +```cpp using framework::OperatorWithKernel::OperatorWithKernel; ``` 这句表示使用基类`OperatorWithKernel`的构造函数,也可写成: - -```c++ + +```cpp MulOp(const std::string &type, const framework::VariableNameMap &inputs, const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorWithKernel(type, inputs, outputs, attrs) {} -``` - +``` + 还需要重写`InferShape`接口。`InferShape`为const函数,不能修改Op的成员变量,参数为`const framework::InferShapeContext &ctx`,通过该参数可获取到输入输出以及属性。它的功能是: - 1). 做检查, 尽早报错:检查输入数据维度、类型等是否合法。 - 2). 设置输出Tensor的形状。 -通常`OpProtoMaker`和`Op`类的定义写在`.cc`文件中,和要讲到的注册函数一起放在`.cc`中 +通常`OpProtoMaker`和`Op`类的定义写在`.cc`文件中,和下面将要介绍的注册函数一起放在`.cc`中 ### 3. 定义OpKernel类 -```C++ -template -class MulKernel : public framework::OpKernel { - public: +`MulKernel`继承自`framework::OpKernel`,带有下面两个模板参数: + +- `typename Place`: 表示设备类型,不同设备(CPU、GPU)共享同一个Kernel时,需加该模板参数,不共享则不加,一个不共享的例子是[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)。 + +- `typename T` : 表示数据类型,如`float`, `double`等。 + +需要为`MulKernel`类重写`Compute`接口。 +- `Compute`接受一个输入参数:`const framework::ExecutionContext& context`。 +- 与`InferShapeContext`相比,`ExecutionContext`增加了设备类型,同样可获取到输入输出和属性参数。 +- `Compute`函数里实现`OpKernel`的具体计算逻辑。 + +下面是 `MulKernel` `Compute`的实现: + + ```cpp + template + class MulKernel : public framework::OpKernel { + public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); auto* Y = context.Input("Y"); @@ -157,168 +177,193 @@ class MulKernel : public framework::OpKernel { const_cast(context.device_context_); math::matmul(*X, false, *Y, false, 1, Z, 0, device_context); } -}; -``` + }; + ``` + +需要注意:**不同设备(CPU、GPU)共享一个Op定义,是否则共享同一个`OpKernel`,取决于`Compute`调用的函数是否支持不同设备。** + +`MulOp`的CPU、GPU实现共享同一个`Kernel`。`OpKernel`不共享的例子可以参考:[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)。 + +为了使`OpKernel`的计算过程书写更加简单,并且CPU、GPU的代码可以复用,我们通常借助 Eigen unsupported Tensor模块来实现`Compute`接口。关于在PaddlePaddle中如何使用Eigen库,请参考[使用文档](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/use_eigen_cn.md)。 + + +到此,前向Op实现完成。接下来,需要在`.cc`文件中注册该op和kernel。 +反向Op类的定义,反向OpKernel的定义与前向Op类似,这里不再赘述。**但需注意反向Op没有`ProtoMaker`**。 -`MulKernel`继承自`framework::OpKernel`,带有模板参数: - - - `typename Place`: 表示设备类型,不同设备(CPU、GPU)共享同一个Kernel时,需加该模板参数,不共享则不加,一个不共享的例子是[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)。 - - - `typename T` : 表示数据类型,如`float`, `double`等。 - -`MulKernel`需要重写`Compute`接口,该接口参数为`const framework::ExecutionContext& context`, `ExecutionContext`相比`InferShapeContext`增加了设备类型,同样可获取到输入输出和属性参数,`Compute`函数里写具体实现时。 - -注意,不同设备(CPU、GPU)共享一个Op定义,是否则共享同一个`OpKernel`,取决于`Compute`调用的函数是否支持不同设备。`MulOp`的CPU、GPU实现共享同一个`Kernel`,`OpKernel`不共享的例子可以参考[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)。 - -到此前向Op实现完成,需要在`.cc`文件中注册该op和kernel。反向Op类的定义和Kernel定义与前向Op类似,这里不再重复。但注意,反向Op没有`ProtoMaker`。 - ### 4. 注册Operator -在`.cc`文件中注册前向、反向Op类,注册CPU Kernel。 +- 在`.cc`文件中注册前向、反向Op类,注册CPU Kernel。 -```c++ -namespace ops = paddle::operators; -REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, ops::MulOpGrad); -REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); -REGISTER_OP_CPU_KERNEL(mul_grad, - ops::MulGradKernel); -``` - - - `REGISTER_OP` : 注册`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,并且注册`ops::MulOpGrad`为其反向Op。 - - `REGISTER_OP_WITHOUT_GRADIENT` : 用于注册没有反向的Op。 - - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulKernel`类。 - -在 `.cu`文件中注册GPU Kernel。 - -```c++ -namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel); -REGISTER_OP_GPU_KERNEL(mul_grad, - ops::MulGradKernel); -``` + ```cpp + namespace ops = paddle::operators; + REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulOpGrad); + REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); + REGISTER_OP_CPU_KERNEL(mul_grad, + ops::MulGradKernel); + ``` + + 在上面的代码中: + + - `REGISTER_OP` : 注册`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,注册`ops::MulOpGrad`,类型名为`mul_grad`。 + - `REGISTER_OP_WITHOUT_GRADIENT` : 用于注册没有反向的Op。 + - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulKernel`类。 + + +- 在 `.cu`文件中注册GPU Kernel。 + - 请注意,如果GPU Kernel的实现基于Eigen unsupported模块,那么在 `.cu`的开始请加上宏定义 `#define EIGEN_USE_GPU`,代码示例如下: + + ```cpp + // if use Eigen unsupported module before include head files + #define EIGEN_USE_GPU + + namespace ops = paddle::operators; + REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel); + REGISTER_OP_GPU_KERNEL(mul_grad, + ops::MulGradKernel); + ``` ### 5. 编译 -在[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt)文件中添加编译。 - -``` -op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function) -``` - -下面命令可以编译: - -``` -make mul_op -``` +- 简单**无特殊依赖**的OP无需修改CMakeList.txt文件。[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt) 会自动将 `paddle/operators` 目录下新增的 `*_op.cc` 文件加入编译。 +- 较为复杂、**有额外依赖** 的operator仍需要修改[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt)。如,`mul_op` 依赖 `math_function`,需要在`CMakeLists.txt`中添加如下内容: + + ``` + op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function) + + ``` + +- 运行下面命令可以进行编译: + + ``` + make mul_op + ``` ## 绑定Python -- 绑定Python - - 在 [`paddle/pybind/pybind.cc -`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.cc)文件中添加该类: +- 绑定Python + + 在 [`paddle/pybind/pybind.cc +`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.cc) 使用`USE_OP`告知编译器需要链接的Op,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。 ``` USE_OP(mul); ``` 如果只实现了CPU版本,则使用`USE_CPU_ONLY_OP`: - + ``` USE_CPU_ONLY_OP(gather); ``` - - 使用`USE_OP`告知编译器需要链接该Op的目标文件,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。 - - + + 如果OP不带Kernel,则使用`USE_NO_KENREL_OP`: + + ``` + USE_NO_KENREL_OP(recurrent); + ``` + + - 生成库 - 在 [`paddle/pybind/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/CMakeLists.txt)文件添加类到`DEPS`中,使得该Op可以链接到生成的lib库中。 - - ``` - if(WITH_PYTHON) - cc_library(paddle_pybind SHARED - SRCS pybind.cc - DEPS pybind python backward - mul_op - minus_op) - endif(WITH_PYTHON) - ``` + `paddle/operators` 目录下新增的 `*_op.cc` 文件会被自动添加链接到生成的lib库中。 ## 实现单元测试 -单测包括对比前向Op不同设备(CPU、GPU)的实现、对比反向OP不同设备(CPU、GPU)的实现、反向Op的梯度测试。下面介绍介绍[`MulOp`的单测](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/test_mul_op.py)。 +单测包括对比前向Op不同设备(CPU、GPU)的实现、对比反向OP不同设备(CPU、GPU)的实现、反向Op的梯度测试。下面介绍介绍[`MulOp`的单元测试](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/test_mul_op.py)。 -### 前向Operator单测 +### 前向Operator单元测试 -前向Op单测继承自`unittest.TestCase`,并定义元类`__metaclass__ = OpTestMeta`,具体单测流程在`OpTestMeta`里完成。需在`setUp`函数定义输入输出和属性参数,以及Python对比的输出值。 +前向Op单元测试继承自`unittest.TestCase`,并定义元类`__metaclass__ = OpTestMeta`。各项更加具体的单元测试在`OpTestMeta`里完成。测试前向Operator,需要: -``` -import unittest -import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +1. 在`setUp`函数定义输入、输出,以及相关的属性参数。 +2. 生成随机的输入数据。 +3. 在Python脚本中实现与前向operator相同的计算逻辑,得到输出值,与operator前向计算的输出进行对比。 + + + ```python + import unittest + import numpy as np + from gradient_checker import GradientChecker, create_op + from op_test_util import OpTestMeta -class TestMulOp(unittest.TestCase): - __metaclass__ = OpTestMeta + class TestMulOp(unittest.TestCase): + __metaclass__ = OpTestMeta + def setUp(self): + self.type = "mul" + self.inputs = { + 'X': np.random.random((32, 84)).astype("float32"), + 'Y': np.random.random((84, 100)).astype("float32") + } + self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} + ``` + +上面的代码首先导入依赖的包,下面是对`setUp`函数中操作的重要变量的详细解释: + +- `self.type = "mul" ` : 定义类型,与operator注册时注册的类型一致。 +- `self.inputs` : 定义输入,类型为`numpy.array`,并初始化。 +- `self.outputs` : 定义输出,并在Python脚本中完成与operator同样的计算逻辑,返回Python端的计算结果。 + + +### 反向Operator单元测试 + +反向Op单元测试继承自`GradientChecker`,而`GradientChecker`继承自`unittest.TestCase`,因此,**反向单元测试函数需要以`test_`开头**。 + +```python +class TestMulGradOp(GradientChecker): def setUp(self): - self.type = "mul" + self.op = create_op("mul") self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((84, 100)).astype("float32") } - self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} -``` - 首先需要`import`必要的包,下面详细解释其他值: - - - `self.type = "mul" ` : 定义类型,和注册的类型一致。 - - `self.inputs` : 定义输入,类型为Numpy.array,并初始化。 - - `self.outputs` : 定义输出,并得到Python结算结果。 - - -### 反向Operator单测 - -反向Op单测继承自`GradientChecker`,而`GradientChecker`集成自`unittest.TestCase`,所以反向单测函数需要`test_`开头。 - - ``` - class MulGradOpTest(GradientChecker): - def test_mul(self): - op = create_op("mul") - inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'Y': np.random.random((84, 100)).astype("float32") - } - self.compare_grad(op, inputs) + + def test_cpu_gpu_compare(self): + self.compare_grad(self.op, self.inputs) + + def test_normal(self): # mul op will enlarge the relative error self.check_grad( - op, inputs, set(["X", "Y"]), "Out", max_relative_error=0.5) - ``` + self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5) - - 调用`create_op("mul")`创建反向Op对应的前向Op。 - - 定义输入`inputs`。 - - 调用`compare_grad`函数对比CPU、GPU计算结果。 - - 调用`check_grad`检查梯度稳定性,这里采用数值法检测梯度正确性。 - - 第一个参数`op` : 前向op。 - - 第二个参数`inputs` : 输入词典,词典的Key和`ProtoMaker`定义保持一致。 - - 第三个参数`set(["X", "Y"])` : 指定对输入变量`X`、`Y`做梯度检测。 - - 第四个参数`"Out"` : 指定前向网络最终的输出目标变量`Out` + def test_ignore_x(self): + self.check_grad( + self.op, + self.inputs, ["Y"], + "Out", + max_relative_error=0.5, + no_grad_set={"X"}) + def test_ignore_y(self): + self.check_grad( + self.op, + self.inputs, ["X"], + "Out", + max_relative_error=0.5, + no_grad_set={"Y"}) +``` -### 编译和执行 +下面解释代码中一些关键的地方: -单测完成之后,在[`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt)里添加编译: +- 调用`create_op("mul")`创建反向Op对应的前向Op。 +- 调用`compare_grad`函数对比CPU、GPU计算结果。 +- `test_normal`中调用`check_grad`使用数值法检测梯度正确性和稳定性。 + - 第一个参数`self.op` : 前向Op。 + - 第二个参数`self.inputs` : 输入词典,词典的Key和`ProtoMaker`定义保持一致。 + - 第三个参数`["X", "Y"]` : 指定对输入变量`X`、`Y`做梯度检测。 + - 第四个参数`"Out"` : 指定前向网络最终的输出目标变量`Out` +- `test_ignore_x`和`test_ignore_y`分支用来测试只需要计算一个输入梯度的情况。 -``` -py_test(test_mul_op SRCS test_mul_op.py) -``` -编译时需要打开`WITH_TESTING`, 即 `cmake paddle_dir -DWITH_TESTING=ON`,编译成功之后执行单测命令为: +### 编译和执行单元测试 -``` +`python/paddle/v2/framework/tests` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译。 + +请注意,**不同于Op的编译测试,运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`, 即`cmake paddle_dir -DWITH_TESTING=ON`。编译成功后,执行下面的命令来运行单元测试: + +```bash make test ARGS="-R test_mul_op -V" ``` + 或者: -``` +```bash ctest -R test_mul_op ``` diff --git a/doc/howto/dev/use_eigen_cn.md b/doc/howto/dev/use_eigen_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..1367323b71277984834d9d4f0d9bea0f69478479 --- /dev/null +++ b/doc/howto/dev/use_eigen_cn.md @@ -0,0 +1,146 @@ +## 在Paddle中如何使用Eigen + +神经网络本质上是一个计算图,计算需要的数据存放在`Tensor`中,而计算过程是由`Operartor`来描述的。在执行时,`Operator`调用对应`OpKernel`中的`Compute`接口,实现对`Tensor`的操作。 + + +### Eigen Tensor模块 + +Eigen Tensor模块对element-wise计算提供了强大的支持,并且书写一份代码,可以同时在CPU、GPU执行。但Eigen Tensor是一个正在开发中的模块,因此可能测试不够完备,文档较少。 + +关于Eigen Tensor模块的详细介绍请参考[文档1](https://github.com/RLovelett/eigen/blob/master/unsupported/Eigen/CXX11/src/Tensor/README.md) 和[文档2](https://bitbucket.org/eigen/eigen/src/default/unsupported/Eigen/CXX11/src/Tensor/README.md) + + +### paddle::framework::Tensor + +Paddle Tensor定义在framework目录下,其主要接口如下: + +```cpp +class Tensor { + public: + /*! Return a pointer to mutable memory block. */ + template + inline T* data(); + + /** + * @brief Return a pointer to mutable memory block. + * @note If not exist, then allocation. + */ + template + inline T* mutable_data(platform::Place place); + + /** + * @brief Return a pointer to mutable memory block. + * + * @param[in] dims The dimensions of the memory block. + * @param[in] place The place of the memory block. + * + * @note If not exist, then allocation. + */ + template + inline T* mutable_data(DDim dims, platform::Place place); + + /*! Resize the dimensions of the memory block. */ + inline Tensor& Resize(const DDim& dims); + + /*! Return the dimensions of the memory block. */ + inline const DDim& dims() const; + + private: + /*! holds the memory block if allocated. */ + std::shared_ptr holder_; + + /*! points to dimensions of memory block. */ + DDim dim_; +}; +``` + +`Placeholder`的作用是延迟分配内存,即我们可以先定义一个Tensor,然后使用Resize接口设置Tensor的大小,最后再调用mutable_data接口分配实际的内存。 + +```cpp +paddle::framework::Tensor t; +paddle::platform::CPUPlace place; +// set size first +t.Resize({2, 3}); +// allocate memory on CPU later +t.mutable_data(place); +``` + +### paddle::framework::Tensor使用样例 +下面以AddOp为例说明Tensor的使用过程: + +- InferShape + +在运行神经网络计算图时,我们先调用每个`Operator`的`InferShape`接口,根据输入Tensor的大小来设置输出Tensor的大小,`Resize`接口会被调用。 + +```cpp +void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_EQ(ctx.Input("X")->dims(), + ctx.Input("Y")->dims(), + "Two input of Add Op's dimension must be same."); + ctx.Output("Out")->Resize(ctx.Input("X")->dims()); +} +``` + + +- Run + +`Operator`的`Run`接口最终会调用对应`OpKernel`的`Compute`接口,在这时真正的分配内存,`mutable_data`接口会被调用。 + +```cpp +void Compute(const framework::ExecutionContext& context) const override { + auto* input0 = context.Input("X"); + auto* input1 = context.Input("Y"); + auto* output = context.Output("Out"); + + output->mutable_data(context.GetPlace()); + + auto x = EigenVector::Flatten(*input0); + auto y = EigenVector::Flatten(*input1); + auto z = EigenVector::Flatten(*output); + + auto place = context.GetEigenDevice(); + + z.device(place) = x + y; +} +``` + + +### paddle::framework::Tensor到EigenTensor的转换 + +如上一小节所示,在具体的计算中,我们需要先把输入Tensor和输出Tensor转换为Eigen支持的格式。我们在[eigen.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/eigen.h)中提供了一些全局函数用来实现paddle::framework::Tensor到EigenTensor/EigenMatrix/EigenVector/EigenScalar的转换。 + +以EigenTensor为例,做一个介绍 + +```cpp +Tensor t; +float* p = t.mutable_data(make_ddim({1, 2, 3}), platform::CPUPlace()); +for (int i = 0; i < 1 * 2 * 3; i++) { + p[i] = static_cast(i); +} + +EigenTensor::Type et = EigenTensor::From(t); +``` + +From是EigenTensor模板提供的一个接口,可以实现从paddle::framework::Tensor到对EigenTensor的转换。由于Tensor的rank是模板参数,因此在转换时需要显示的指定。 + +在Eigen中,不同rank的Tensor是不同类型,Vector是rank为1的Tensor。需要额外注意的是,EigenVector::From方法是把paddle中的一维Tensor转为Eigen的一维Tensor,在这里用EigenVector来表示;而EigenVector::Flatten方法是把paddle中的一个Tensor进行reshape操作,压扁成为Eigen的一维Tensor,类型仍然为EigenVector。 + +更多的转换方法请参考eigen_test.cc中的[单元测试](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/eigen_test.cc)。 + + + +### 实现计算 + +当需要完成计算时,我们需要等式左边的EigenTensor调用device接口。在这里需要注意的是,这里的EigenTensor之间的运算只是改变了原有Tensor中的数据,而不会改变原有Tensor的shape信息。 + +```cpp +auto x = EigenVector::Flatten(*input0); +auto y = EigenVector::Flatten(*input1); +auto z = EigenVector::Flatten(*output); +auto place = context.GetEigenDevice(); +z.device(place) = x + y; +``` + +在这段代码中,input0/input1/output可以是任意维度的Tensor。我们调用了EigenVector的Flatten接口,把任意维度的Tensor转为了一维的EigenVector。而在计算结束之后,input0/input1/output的原有shape信息不变。如果想改变原有Tensor的shape信息,可以调用Resize接口进行改变。 + +由于Eigen Tensor模块的文档较少,我们可以参考TensorFlow的[kernels](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/kernels)模块下的相关`OpKernel`的计算代码。 diff --git a/doc/howto/dev/write_docs_cn.rst b/doc/howto/dev/write_docs_cn.rst index 36e5d420c986fc8d88eefee4aa221dba0a0480f2..731a63f945c29ba78538b3d71289b234e569354d 100644 --- a/doc/howto/dev/write_docs_cn.rst +++ b/doc/howto/dev/write_docs_cn.rst @@ -5,15 +5,13 @@ PaddlePaddle的文档包括英文文档 ``doc`` 和中文文档 ``doc_cn`` 两个部分。文档都是通过 `cmake`_ 驱动 `sphinx`_ 编译生成,生成后的文档分别存储在编译目录的 ``doc`` 和 ``doc_cn`` 两个子目录下。 -如何构建PaddlePaddle的文档 -========================== +如何构建文档 +============ -PaddlePaddle的文档构建有直接构建和基于Docker构建两种方式,我们提供了一个构建脚本build_docs.sh来进行构建。 -PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使用基于Docker来构建PaddlePaddle的文档。 +PaddlePaddle的文档构建有两种方式。 - -使用Docker构建PaddlePaddle的文档 --------------------------------- +使用Docker构建 +-------------- 使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 `_ 。安装好Docker之后可以使用源码目录下的脚本构建文档,即 @@ -21,58 +19,46 @@ PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使 cd TO_YOUR_PADDLE_CLONE_PATH cd paddle/scripts/tools/build_docs - bash build_docs.sh with_docker - -编译完成后,会在当前目录生成两个子目录\: - -* doc 英文文档目录 -* doc_cn 中文文档目录 + sh build_docs.sh +编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。 打开浏览器访问对应目录下的index.html即可访问本地文档。 - - -直接构建PaddlePaddle的文档 --------------------------- - -因为PaddlePaddle的v2 api文档生成过程依赖于py_paddle Python包,用户需要首先确认py_paddle包已经安装。 - -.. code-block:: bash - - python -c "import py_paddle" - -如果提示错误,那么用户需要在本地编译安装PaddlePaddle,请参考 `源码编译文档 `_ 。 -注意,用户在首次编译安装PaddlePaddle时,请将WITH_DOC选项关闭。在编译安装正确之后,请再次确认py_paddle包已经安装,即可进行下一步操作。 +直接构建 +-------- 如果提示正确,可以执行以下命令编译生成文档,即 .. code-block:: bash cd TO_YOUR_PADDLE_CLONE_PATH - cd paddle/scripts/tools/build_docs - bash build_docs.sh local - -编译完成之后,会在当前目录生成两个子目录\: - -* doc 英文文档目录 -* doc_cn 中文文档目录 + mkdir -p build + cd build + cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_DOC=ON + make gen_proto_py + make paddle_docs paddle_docs_cn +编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。 打开浏览器访问对应目录下的index.html即可访问本地文档。 -如何书写PaddlePaddle的文档 -========================== +如何书写文档 +============ PaddlePaddle文档使用 `sphinx`_ 自动生成,用户可以参考sphinx教程进行书写。 -如何更新www.paddlepaddle.org文档 -================================ +如何更新文档主题 +================ + +PaddlePaddle文档主题在 `TO_YOUR_PADDLE_CLONE_PATH/doc_theme` 文件夹下,包含所有和前端网页设计相关的文件。 -开发者给PaddlePaddle代码增加的注释以PR的形式提交到github中,提交方式可参见 `贡献文档 `_ 。 +如何更新doc.paddlepaddle.org +============================ + +更新的文档以PR的形式提交到github中,提交方式参见 `贡献文档 `_ 。 目前PaddlePaddle的develop分支的文档是自动触发更新的,用户可以分别查看最新的 `中文文档 `_ 和 `英文文档 `_ 。 - .. _cmake: https://cmake.org/ .. _sphinx: http://www.sphinx-doc.org/en/1.4.8/ diff --git a/doc/howto/usage/k8s/k8s_distributed_cn.md b/doc/howto/usage/k8s/k8s_distributed_cn.md index 3121b3f59df650c0a22d0bd305a6f793b202d30e..a9bebf09558b06993119803458977abedbbfbdd0 100644 --- a/doc/howto/usage/k8s/k8s_distributed_cn.md +++ b/doc/howto/usage/k8s/k8s_distributed_cn.md @@ -213,7 +213,7 @@ I1116 09:10:17.123440 50 Util.cpp:130] Calling runInitFunctions I1116 09:10:17.123764 50 Util.cpp:143] Call runInitFunctions done. [WARNING 2016-11-16 09:10:17,227 default_decorators.py:40] please use keyword arguments in paddle config. [INFO 2016-11-16 09:10:17,239 networks.py:1282] The input order is [movie_id, title, genres, user_id, gender, age, occupation, rating] -[INFO 2016-11-16 09:10:17,239 networks.py:1289] The output order is [__mse_cost_0__] +[INFO 2016-11-16 09:10:17,239 networks.py:1289] The output order is [__square_error_cost_0__] I1116 09:10:17.392917 50 Trainer.cpp:170] trainer mode: Normal I1116 09:10:17.613910 50 PyDataProvider2.cpp:257] loading dataprovider dataprovider::process I1116 09:10:17.680917 50 PyDataProvider2.cpp:257] loading dataprovider dataprovider::process diff --git a/paddle/cuda/include/hl_cpu_gru.cuh b/paddle/cuda/include/hl_cpu_gru.cuh index c0a37ced2a72a1ab410025e2aa45313c23f1349a..e4f6bf42c61694e9826a127c9628730cfd43ada7 100644 --- a/paddle/cuda/include/hl_cpu_gru.cuh +++ b/paddle/cuda/include/hl_cpu_gru.cuh @@ -18,14 +18,6 @@ limitations under the License. */ #ifndef __NVCC__ -#include "paddle/math/MathFunctions.h" - -#ifndef PADDLE_TYPE_DOUBLE -#define CBLAS_GEMM paddle::gemm -#else -#define CBLAS_GEMM paddle::gemm -#endif - template void hl_naive_gru_forward_reset_output(OpResetOutput opResetOutput, real *gateValue, @@ -210,51 +202,6 @@ inline void forward_final_output(OpFinalOutput opFinalOutput, } } -template -void hl_cpu_gru_forward(OpResetOutput opResetOutput, - OpFinalOutput opFinalOutput, - hl_gru_value value, - int frameSize, - int batchSize, - hl_activation_mode_t active_node, - hl_activation_mode_t active_gate) { - if (value.prevOutValue) { - CBLAS_GEMM(CblasNoTrans, - CblasNoTrans, - batchSize, - 2 * frameSize, - frameSize, - 1, - value.prevOutValue, - frameSize, - value.gateWeight, - frameSize * 2, - 1, - value.gateValue, - frameSize * 3); - } - - forward_reset_output(opResetOutput, value, frameSize, batchSize, active_gate); - - if (value.prevOutValue) { - CBLAS_GEMM(CblasNoTrans, - CblasNoTrans, - batchSize, - frameSize, - frameSize, - 1, - value.resetOutputValue, - frameSize, - value.stateWeight, - frameSize, - 1, - value.gateValue + frameSize * 2, - frameSize * 3); - } - - forward_final_output(opFinalOutput, value, frameSize, batchSize, active_node); -} - template void hl_naive_gru_backward_state_grad(OpStateGrad opStateGrad, real *gateValue, @@ -525,86 +472,6 @@ inline void backward_reset_grad(OpResetGrad opResetGrad, } } -template -void hl_cpu_gru_backward(OpStateGrad opStateGrad, - OpResetGrad opResetGrad, - hl_gru_value value, - hl_gru_grad grad, - int frameSize, - int batchSize, - hl_activation_mode_t active_node, - hl_activation_mode_t active_gate) { - backward_state_grad(opStateGrad, value, grad, - frameSize, batchSize, active_node); - - if (value.prevOutValue && grad.prevOutGrad) { - CBLAS_GEMM(CblasNoTrans, - CblasTrans, - batchSize, - frameSize, - frameSize, - 1, - grad.gateGrad + frameSize * 2, - frameSize * 3, - value.stateWeight, - frameSize, - 0, - grad.resetOutputGrad, - frameSize); - - if (grad.stateWeightGrad) { - CBLAS_GEMM(CblasTrans, - CblasNoTrans, - frameSize, - frameSize, - batchSize, - 1, - value.resetOutputValue, - frameSize, - grad.gateGrad + frameSize * 2, - frameSize * 3, - 1, - grad.stateWeightGrad, - frameSize); - } - } - - backward_reset_grad(opResetGrad, value, grad, - frameSize, batchSize, active_gate); - - if (grad.prevOutGrad && value.prevOutValue) { - CBLAS_GEMM(CblasNoTrans, - CblasTrans, - batchSize, - frameSize, - frameSize * 2, - 1, - grad.gateGrad, - frameSize * 3, - value.gateWeight, - frameSize * 2, - 1, - grad.prevOutGrad, - frameSize); - - if (grad.gateWeightGrad) { - CBLAS_GEMM(CblasTrans, - CblasNoTrans, - frameSize, - frameSize * 2, - batchSize, - 1, - value.prevOutValue, - frameSize, - grad.gateGrad, - frameSize * 3, - 1, - grad.gateWeightGrad, - frameSize * 2); - } - } -} - #endif #endif // HL_CPU_GRU_CUH_ diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index c0838d9b759110fd706577386d2c81bda6876223..3371962c635c3731f00a6af2a6e287ece33397cd 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -9,6 +9,7 @@ cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor) +nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor) cc_test(variable_test SRCS variable_test.cc) diff --git a/paddle/framework/attribute.cc b/paddle/framework/attribute.cc index 9eb07acdff1d00dd926f1cee9c24f9f151006d7e..27132eaa0b3b0666fc042faf052dac2e169ba9e7 100644 --- a/paddle/framework/attribute.cc +++ b/paddle/framework/attribute.cc @@ -43,6 +43,10 @@ template <> AttrType AttrTypeID>() { return STRINGS; } +template <> +AttrType AttrTypeID>>() { + return INT_PAIRS; +} Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { switch (attr_desc.type()) { @@ -76,6 +80,14 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { } return val; } + case paddle::framework::AttrType::INT_PAIRS: { + std::vector> val(attr_desc.int_pairs_size()); + for (int i = 0; i < attr_desc.int_pairs_size(); ++i) { + val[i].first = attr_desc.int_pairs(i).first(); + val[i].second = attr_desc.int_pairs(i).second(); + } + return val; + } } PADDLE_ENFORCE(false, "Unknown OpDesc::AttrDesc::type !"); return boost::blank(); diff --git a/paddle/framework/attribute.h b/paddle/framework/attribute.h index 08b47cabd4c2225c50022bd35734dcc2663324d6..2b788a76cafe198abb9aed8ba842e37cc6ff73a6 100644 --- a/paddle/framework/attribute.h +++ b/paddle/framework/attribute.h @@ -28,7 +28,8 @@ namespace paddle { namespace framework { typedef boost::variant, - std::vector, std::vector> + std::vector, std::vector, + std::vector>> Attribute; typedef std::unordered_map AttributeMap; @@ -40,11 +41,23 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc); // check whether a value(attribute) fit a certain limit template -class LargerThanChecker { +class GreaterThanChecker { public: - explicit LargerThanChecker(T lower_bound) : lower_bound_(lower_bound) {} + explicit GreaterThanChecker(T lower_bound) : lower_bound_(lower_bound) {} void operator()(T& value) const { - PADDLE_ENFORCE(value > lower_bound_, "larger_than check fail"); + PADDLE_ENFORCE(value > lower_bound_, "larger_than check fails."); + } + + private: + T lower_bound_; +}; + +template +class EqualGreaterThanChecker { + public: + explicit EqualGreaterThanChecker(T lower_bound) : lower_bound_(lower_bound) {} + void operator()(T& value) const { + PADDLE_ENFORCE_GE(value, lower_bound_, "equal_larger_than check fails."); } private: @@ -109,8 +122,13 @@ class TypedAttrChecker { return *this; } - TypedAttrChecker& LargerThan(const T& lower_bound) { - value_checkers_.push_back(LargerThanChecker(lower_bound)); + TypedAttrChecker& GreaterThan(const T& lower_bound) { + value_checkers_.push_back(GreaterThanChecker(lower_bound)); + return *this; + } + + TypedAttrChecker& EqualGreaterThan(const T& lower_bound) { + value_checkers_.push_back(EqualGreaterThanChecker(lower_bound)); return *this; } diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 6b4c612cd8d9263258e3987914c44002e7bca92c..c5d46622156c56acb98fb77e7db5ee7bca8c937a 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -182,7 +182,7 @@ static std::unique_ptr BackwardRecursive( }); // process recurrent gradient op as a special operator. - if (forwardOp.Type() == "recurrent_op") { + if (forwardOp.Type() == "recurrent") { // NOTE clean up cycle call somewhere (RNN's stepnet constains itself), or // this will result in infinite loop. const auto& rnnop = diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index 9500c92a265d60a696e1e2c422d0f2bd1621ef71..0a6d762bc8be5201ac196b4bc6107c06d07a31d7 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -2,81 +2,99 @@ ## Motivation -In Neural Network, the backpropagation algorithm follows the chain rule, so we need to compound the fundmental gradient operators/expressions together with chain rule . Every forward network need a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. - -## Backward Operator Registry +In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need a module chains the gradient operators/expressions together with to construct the backward pass. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. -A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs and output gradients and then calculate its input gradients. +## Implementation + +In this design doc, we exported only one API for generating the backward pass. + +```c++ +std::unique_ptr Backward(const OperatorBase& forwardOp, + const std::unordered_set& no_grad_vars); +``` + +The implementation behind it can be divided into two parts, **Backward Operator Creating** and **Backward Operator Building**. + +### Backward Operator Registry + +A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs, and output gradients and then calculate its input gradients. | | forward operator | backward operator | ---------------------- | ---------------- |------------------------- | | **Operator::inputs_** | Inputs | Inputs, Outputs, OutputGradients | | **Operator::outputs_** | Outputs | InputGradients | - In most cases, there is a one-to-one correspondence between forward and backward operators. These correspondences are recorded by a global hash map(`OpInfoMap`). To follow the philosophy of minimum core and make operators pluggable, the registry mechanism is introduced. + In most cases, there is a one-to-one correspondence between the forward and backward operators. These correspondences are recorded by a global hash map(`OpInfoMap`). To follow the philosophy of minimum core and make operators pluggable, the registry mechanism is introduced. -For example, we have got a `mul_op`, and we can register it's information and corresponding backward operator by the following macro: +For example, we have got a `mul_op`, and we can register its information and corresponding backward operator by the following macro: ```cpp -REGISTER_OP(mul, MulOp, MulOpMaker, MulOpGrad); +REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad); ``` `mul` is the operator's type. `MulOp` and `MulOpMaker` are the operator class and the operator maker class respectively. `mul_grad` is the type of backward operator, and `MulOpGrad` is its class name. -## Backward Opeartor Creating +### Backward Opeartor Creating -Given a certain forward operator, we can get its corresponding backward opeartor by calling: +Given a certain forward operator, we can get its corresponding backward operator by calling: ```cpp OperatorBase* bwd_op = BuildGradOp(const OperatorBase* fwd_op); -``` +``` The function `BuildGradOp` will sequentially execute following processes: 1. Get the `type_` of given forward operator, and then get the corresponding backward operator's type by looking up the `OpInfoMap`. -2. Build two maps named `inputs` and `outputs` to temporary storage backward operator's inputs and outputs. Copy forward operator's `inputs_` and `outputs_` to map `inputs`, except these are not necessary for gradient computing. +2. Build two maps named `inputs` and `outputs` to temporary storage backward operator's inputs and outputs. Copy forward operator's `inputs_` and `outputs_` to map `inputs`, except these, are not necessary for gradient computing. 3. Add forward inputs' gradient variables into map `output`, adding forward outputs' gradient variables into map `input`. 4. Building backward operator with `inputs`, `outputs` and forward operator's attributes. -## Backward Network Building +### Backward Network Building -A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and put them together. - -In our design, the network itself is also a kind of operator. So the operators contained by a big network may be some small network. - -given a forward network, it generates the backward network. We only care about the Gradients—`OutputGradients`,`InputGradients`. +A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and append them together one by one. There is some corner case need to process specially. 1. Op - when the input forward network is a Op, return its gradient Operator Immediately. + When the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`. 2. NetOp - when the input forward network is a NetOp, it need to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to forward NetOp. + In our design, the network itself is also a kind of operator(**NetOp**). So the operators contained by a big network may be some small network. When the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp. + +3. RnnOp + + RnnOp is a nested stepnet operator. Backward module need to recusively call `Backward` for every stepnet. + +4. Sharing Variables + + **sharing variables**. As illustrated in the pictures, two operator's share the same variable name of W@GRAD, which will overwrite their sharing input variable. + +

+
- **shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwirte their shared input variable. +​ pic 1. Sharing variables in operators. -

-
+

- 1. shared variable in two operators. +​ Sharing variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator to replace the overwrite links. -

+

+
- Share variable between operators or same input variable used in multiple operators lead to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively, and add a generic add operator replace the overwirte links. +​ pic 2. Replace sharing variable's gradient with `Add` operator. -

-
+

- 2. replace shared variable gradient with `Add` Operator +​ Because our framework finds variables accord to their names, we need to rename the output links. We add a suffix of number to represent its position in clockwise. -

+5. Part of Gradient is Zero. + In the whole graph, there is some case of that one operator's gradient is not needed, but its input's gradient is a dependency link of other operator, we need to fill a same shape gradient matrix in the position. In our implement, we insert a special `fillZeroLike` operator. -​ Then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it. +Follow these rules above, then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it. diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index bf8b11e5f5ae801621f84bdbeffb5c4cf2dd8905..ad8003420dc14538d0dae9a1cb19d6459b154576 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -148,14 +148,16 @@ class AddOpMaker : public OpProtoAndCheckerMaker { namespace f = paddle::framework; namespace ops = paddle::operators; using EnforceNotMet = paddle::platform::EnforceNotMet; -REGISTER_OP(rowwise_add, f::NOP, f::RowWiseAddOpMaker, f::NOP); -REGISTER_OP(mul, f::NOP, f::MulOpMaker, f::NOP); -REGISTER_OP(sigmoid, f::NOP, f::SigmoidOpMaker, f::NOP); +REGISTER_OP(rowwise_add, f::NOP, f::RowWiseAddOpMaker, rowwise_add_grad, + f::NOP); +REGISTER_OP(mul, f::NOP, f::MulOpMaker, mul_grad, f::NOP); +REGISTER_OP(sigmoid, f::NOP, f::SigmoidOpMaker, sigmoid_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NOP, f::NoGradOpMaker); REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::NOP, f::FillZeroOpMaker); -REGISTER_OP(add, f::NOP, f::AddOpMaker, f::NOP); +REGISTER_OP(add, f::NOP, f::AddOpMaker, add_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); -REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, f::NOP); +REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, + f::NOP); TEST(Backward, simple_op_grad) { auto fwd = f::OpRegistry::CreateOp( diff --git a/paddle/framework/ddim.cc b/paddle/framework/ddim.cc index cfd3e8dfdec0e92620aef5cd246b4622b779ce19..fc3d508553c0e966978b28d58127bdbff10d45f1 100644 --- a/paddle/framework/ddim.cc +++ b/paddle/framework/ddim.cc @@ -21,16 +21,16 @@ namespace framework { /// @cond HIDDEN template -Dim make_dim(const int* d) { +Dim make_dim(const int64_t* d) { return Dim(*d, make_dim(d + 1)); } template <> -Dim<1> make_dim<1>(const int* d) { +Dim<1> make_dim<1>(const int64_t* d) { return Dim<1>(*d); } -void make_ddim(DDim& ddim, const int* dims, int n) { +void make_ddim(DDim& ddim, const int64_t* dims, int n) { switch (n) { case 1: ddim = make_dim<1>(dims); @@ -67,13 +67,13 @@ void make_ddim(DDim& ddim, const int* dims, int n) { /// @endcond -DDim make_ddim(std::initializer_list dims) { +DDim make_ddim(std::initializer_list dims) { DDim result(make_dim(0)); make_ddim(result, dims.begin(), dims.size()); return result; } -DDim make_ddim(const std::vector& dims) { +DDim make_ddim(const std::vector& dims) { DDim result(make_dim(0)); make_ddim(result, &dims[0], dims.size()); return result; @@ -81,12 +81,12 @@ DDim make_ddim(const std::vector& dims) { /// @cond HIDDEN // XXX For some reason, putting this in an anonymous namespace causes errors -class DynamicMutableIndexer : public boost::static_visitor { +class DynamicMutableIndexer : public boost::static_visitor { public: explicit DynamicMutableIndexer(int idx) : idx_(idx) {} template - int& operator()(Dim& dim) const { + int64_t& operator()(Dim& dim) const { return dim[idx_]; } @@ -94,12 +94,12 @@ class DynamicMutableIndexer : public boost::static_visitor { int idx_; }; -class DynamicConstIndexer : public boost::static_visitor { +class DynamicConstIndexer : public boost::static_visitor { public: explicit DynamicConstIndexer(int idx) : idx_(idx) {} template - int operator()(const Dim& dim) const { + int64_t operator()(const Dim& dim) const { return dim[idx_]; } @@ -109,22 +109,22 @@ class DynamicConstIndexer : public boost::static_visitor { /// @endcond -int& DDim::operator[](int idx) { +int64_t& DDim::operator[](int idx) { return boost::apply_visitor(DynamicMutableIndexer(idx), var); } -int DDim::operator[](int idx) const { +int64_t DDim::operator[](int idx) const { return boost::apply_visitor(DynamicConstIndexer(idx), var); } -ssize_t DDim::size() const { return arity(*this); } +int64_t DDim::size() const { return arity(*this); } bool DDim::operator==(DDim d) const { if (var.which() != d.getVar().which()) { return false; } else { - std::vector v1 = vectorize(*this); - std::vector v2 = vectorize(d); + std::vector v1 = vectorize(*this); + std::vector v2 = vectorize(d); for (unsigned int i = 0; i < v1.size(); i++) { if (v1[i] != v2[i]) { @@ -139,10 +139,10 @@ bool DDim::operator==(DDim d) const { bool DDim::operator!=(DDim d) const { return !(*this == d); } DDim DDim::operator+(DDim d) const { - std::vector v1 = vectorize(*this); - std::vector v2 = vectorize(d); + std::vector v1 = vectorize(*this); + std::vector v2 = vectorize(d); - std::vector v3; + std::vector v3; assert(v1.size() == v2.size()); @@ -154,10 +154,10 @@ DDim DDim::operator+(DDim d) const { } DDim DDim::operator*(DDim d) const { - std::vector v1 = vectorize(*this); - std::vector v2 = vectorize(d); + std::vector v1 = vectorize(*this); + std::vector v2 = vectorize(d); - std::vector v3; + std::vector v3; assert(v1.size() == v2.size()); @@ -168,15 +168,15 @@ DDim DDim::operator*(DDim d) const { return make_ddim(v3); } -int get(const DDim& ddim, int idx) { return ddim[idx]; } +int64_t get(const DDim& ddim, int idx) { return ddim[idx]; } void set(DDim& ddim, int idx, int value) { ddim[idx] = value; } /// @cond HIDDEN struct VectorizeVisitor : public boost::static_visitor<> { - std::vector& vector; + std::vector& vector; - explicit VectorizeVisitor(std::vector& v) : vector(v) {} + explicit VectorizeVisitor(std::vector& v) : vector(v) {} template void operator()(const T& t) { @@ -188,31 +188,31 @@ struct VectorizeVisitor : public boost::static_visitor<> { }; /// @endcond -std::vector vectorize(const DDim& ddim) { - std::vector result; +std::vector vectorize(const DDim& ddim) { + std::vector result; VectorizeVisitor visitor(result); boost::apply_visitor(visitor, ddim); return result; } -struct ProductVisitor : public boost::static_visitor { +struct ProductVisitor : public boost::static_visitor { template - ssize_t operator()(const Dim& dim) { + int64_t operator()(const Dim& dim) { return product(dim); } }; -ssize_t product(const DDim& ddim) { +int64_t product(const DDim& ddim) { ProductVisitor visitor; return boost::apply_visitor(visitor, ddim); } struct SliceVectorizeVisitor : public boost::static_visitor<> { - std::vector& vector; + std::vector& vector; int begin; int end; - SliceVectorizeVisitor(std::vector& v, int b, int e) + SliceVectorizeVisitor(std::vector& v, int b, int e) : vector(v), begin(b), end(e) { PADDLE_ENFORCE(begin < end, "Begin index must be less than end index in ddim slice."); @@ -240,7 +240,7 @@ struct SliceVectorizeVisitor : public boost::static_visitor<> { }; DDim slice_ddim(const DDim& dim, int begin, int end) { - std::vector vec; + std::vector vec; vec.reserve(end - begin); SliceVectorizeVisitor visitor(vec, begin, end); boost::apply_visitor(visitor, dim); @@ -280,8 +280,17 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) { return os; } -DDim::DDim(std::initializer_list init_list) { +DDim::DDim(std::initializer_list init_list) { *this = make_ddim(init_list); } + +DDim flatten_to_2d(const DDim& src, int num_col_dims) { + int rank = src.size(); + return make_ddim({product(slice_ddim(src, 0, num_col_dims)), + product(slice_ddim(src, num_col_dims, rank))}); +} + +DDim flatten_to_1d(const DDim& src) { return make_ddim({product(src)}); } + } // namespace framework } // namespace paddle diff --git a/paddle/framework/ddim.h b/paddle/framework/ddim.h index 95f294b62737be5c3eac39303148ac35da29fe7d..ca29e7e8c7776de6adf3e3b0e8f11f0d4d8487c3 100644 --- a/paddle/framework/ddim.h +++ b/paddle/framework/ddim.h @@ -40,7 +40,7 @@ struct DDim { template explicit DDim(const Dim& in) : var(in) {} - /*implicit*/ DDim(std::initializer_list init_list); + /*implicit*/ DDim(std::initializer_list init_list); template DDim& operator=(const Dim& in) { @@ -48,8 +48,8 @@ struct DDim { return *this; } - int& operator[](int idx); - int operator[](int idx) const; + int64_t& operator[](int idx); + int64_t operator[](int idx) const; template typename Visitor::result_type apply_visitor(Visitor& visitor) { @@ -71,15 +71,15 @@ struct DDim { DDim operator*(DDim d) const; - ssize_t size() const; + int64_t size() const; }; /** - * \brief Make a DDim from std::vector + * \brief Make a DDim from std::vector * * \param dims An vector of ints. Must be sized between [1, 9] */ -DDim make_ddim(const std::vector& dims); +DDim make_ddim(const std::vector& dims); /** * \brief Make a DDim from an initializer list @@ -87,14 +87,14 @@ DDim make_ddim(const std::vector& dims); * \param dims An initializer list of ints. Must be sized between [1, 9] * */ -DDim make_ddim(std::initializer_list dims); +DDim make_ddim(std::initializer_list dims); -int get(const DDim& dim, int idx); +int64_t get(const DDim& dim, int idx); void set(DDim& dim, int idx, int val); -std::vector vectorize(const DDim& ddim); +std::vector vectorize(const DDim& ddim); -ssize_t product(const DDim& ddim); +int64_t product(const DDim& ddim); /** * \brief Slice a ddim @@ -115,6 +115,12 @@ int arity(const DDim& ddim); std::ostream& operator<<(std::ostream&, const DDim&); +// Reshape a tensor to a matrix. The matrix's first dimension(column length) +// will be the product of tensor's first `num_col_dims` dimensions. +DDim flatten_to_2d(const DDim& src, int num_col_dims); + +DDim flatten_to_1d(const DDim& src); + } // namespace framework } // namespace paddle diff --git a/paddle/framework/ddim_test.cc b/paddle/framework/ddim_test.cc index 9d18a2972ce62139430b240b4599854b14290a32..756232b1b56a49d2c91cc2cac950ca508c54fb3f 100644 --- a/paddle/framework/ddim_test.cc +++ b/paddle/framework/ddim_test.cc @@ -12,7 +12,7 @@ TEST(DDim, Equality) { EXPECT_EQ(ddim[2], 5); // construct a DDim from a vector - std::vector vec({9, 1, 5}); + std::vector vec({9, 1, 5}); paddle::framework::DDim vddim = paddle::framework::make_ddim(vec); EXPECT_EQ(ddim[0], 9); EXPECT_EQ(ddim[1], 1); @@ -25,7 +25,7 @@ TEST(DDim, Equality) { EXPECT_EQ(paddle::framework::get(ddim, 0), 6); // vectorize a DDim - std::vector res_vec = paddle::framework::vectorize(vddim); + std::vector res_vec = paddle::framework::vectorize(vddim); EXPECT_EQ(res_vec[0], 9); EXPECT_EQ(res_vec[1], 1); EXPECT_EQ(res_vec[2], 5); diff --git a/paddle/framework/dim.h b/paddle/framework/dim.h index 883fdc55eb929ebc51e8ae05938e9d07374406ce..04d4b0e604e6f73ad94e0ca79d6b69f663bd4076 100644 --- a/paddle/framework/dim.h +++ b/paddle/framework/dim.h @@ -17,13 +17,13 @@ struct Dim { static constexpr int dimensions = i; template - HOSTDEVICE Dim(int _head, Args... _tail) : head(_head), tail(_tail...) { + HOSTDEVICE Dim(int64_t _head, Args... _tail) : head(_head), tail(_tail...) { static_assert(sizeof...(_tail) == i - 1, "Dim initialized with the wrong number of parameters"); } HOSTDEVICE - Dim(int _head, const Dim& _tail) : head(_head), tail(_tail) {} + Dim(int64_t _head, const Dim& _tail) : head(_head), tail(_tail) {} HOSTDEVICE Dim() : head(0), tail() {} @@ -31,12 +31,12 @@ struct Dim { /** Construct a Dim from a linear index and size. Uses Fortran order * indexing. */ HOSTDEVICE - Dim(int idx, const Dim& size) + Dim(int64_t idx, const Dim& size) : head(idx % size.head), tail(idx / size.head, size.tail) {} /** Construct a Dim with each dimension set to the given index */ HOSTDEVICE - Dim(int idx) : head(idx), tail(idx) {} + Dim(int64_t idx) : head(idx), tail(idx) {} HOSTDEVICE bool operator==(const Dim& o) const { @@ -47,13 +47,13 @@ struct Dim { bool operator!=(const Dim& o) const { return !(*this == o); } HOSTDEVICE - int& operator[](int idx); + int64_t& operator[](int idx); HOSTDEVICE - int operator[](int idx) const; + int64_t operator[](int idx) const; HOST std::string to_string() const; - int head; + int64_t head; Dim tail; }; @@ -63,7 +63,7 @@ struct Dim<1> { static constexpr int dimensions = 1; HOSTDEVICE - Dim(int _head) : head(_head) {} + Dim(int64_t _head) : head(_head) {} HOSTDEVICE Dim() : head(0) {} @@ -86,11 +86,11 @@ struct Dim<1> { bool operator!=(const Dim<1>& o) const { return !(*this == o); } HOSTDEVICE - int& operator[](int idx); + int64_t& operator[](int idx); HOSTDEVICE - int operator[](int idx) const; + int64_t operator[](int idx) const; - int head; + int64_t head; }; namespace { @@ -100,12 +100,12 @@ template struct DimGetter { // Return a copy if Dim is const template - HOSTDEVICE static int impl(const D& d) { + HOSTDEVICE static int64_t impl(const D& d) { return DimGetter::impl(d.tail); } // Return a reference if Dim is mutable template - HOSTDEVICE static int& impl(D& d) { + HOSTDEVICE static int64_t& impl(D& d) { return DimGetter::impl(d.tail); } }; @@ -115,18 +115,18 @@ template <> struct DimGetter<0> { // Return a copy if Dim is const template - HOSTDEVICE static int impl(const D& d) { + HOSTDEVICE static int64_t impl(const D& d) { return d.head; } // Return a reference if Dim is mutable template - HOSTDEVICE static int& impl(D& d) { + HOSTDEVICE static int64_t& impl(D& d) { return d.head; } }; template -HOSTDEVICE int& indexer(Dim& dim, int idx) { +HOSTDEVICE int64_t& indexer(Dim& dim, int idx) { #ifndef __CUDA_ARCH__ if (idx < 0) { throw std::invalid_argument("Tried to access a negative dimension"); @@ -141,7 +141,7 @@ HOSTDEVICE int& indexer(Dim& dim, int idx) { } template <> -HOSTDEVICE int& indexer<1>(Dim<1>& dim, int idx) { +HOSTDEVICE int64_t& indexer<1>(Dim<1>& dim, int idx) { #ifndef __CUDA_ARCH__ if (idx != 0) { throw std::invalid_argument("Invalid index"); @@ -153,7 +153,7 @@ HOSTDEVICE int& indexer<1>(Dim<1>& dim, int idx) { } template -HOSTDEVICE int indexer(const Dim& dim, int idx) { +HOSTDEVICE int64_t indexer(const Dim& dim, int idx) { #ifndef __CUDA_ARCH__ if (idx < 0) { throw std::invalid_argument("Tried to access a negative dimension"); @@ -168,7 +168,7 @@ HOSTDEVICE int indexer(const Dim& dim, int idx) { } template <> -HOSTDEVICE int indexer<1>(const Dim<1>& dim, int idx) { +HOSTDEVICE int64_t indexer<1>(const Dim<1>& dim, int idx) { #ifndef __CUDA_ARCH__ if (idx != 0) { throw std::invalid_argument("Invalid index"); @@ -182,73 +182,76 @@ HOSTDEVICE int indexer<1>(const Dim<1>& dim, int idx) { } // namespace // Static access to constant Dim template -HOSTDEVICE int get(const Dim& d) { +HOSTDEVICE int64_t get(const Dim& d) { return DimGetter::impl(d); } // Static access to mutable Dim template -HOSTDEVICE int& get(Dim& d) { +HOSTDEVICE int64_t& get(Dim& d) { return DimGetter::impl(d); } // Dynamic access to constant Dim template -HOSTDEVICE int Dim::operator[](int i) const { +HOSTDEVICE int64_t Dim::operator[](int i) const { return indexer(*this, i); } // Dynamic access to mutable Dim template -HOSTDEVICE int& Dim::operator[](int i) { +HOSTDEVICE int64_t& Dim::operator[](int i) { return indexer(*this, i); } // Dynamic access to constant Dim -inline HOSTDEVICE int Dim<1>::operator[](int i) const { +inline HOSTDEVICE int64_t Dim<1>::operator[](int i) const { return indexer(*this, i); } // Dynamic access to mutable Dim -inline HOSTDEVICE int& Dim<1>::operator[](int i) { return indexer(*this, i); } +inline HOSTDEVICE int64_t& Dim<1>::operator[](int i) { + return indexer(*this, i); +} // Dynamic access to constant Dim // without std::enable_if will try to instantiate this on get<0>(d) template -HOSTDEVICE typename std::enable_if<(l > 0), int>::type get(const Dim& d, - int i) { +HOSTDEVICE typename std::enable_if<(l > 0), int64_t>::type get(const Dim& d, + int i) { return d[i]; } // Dynamic access to mutable Dim template -HOSTDEVICE typename std::enable_if<(l > 0), int&>::type get(Dim& d, int i) { +HOSTDEVICE typename std::enable_if<(l > 0), int64_t&>::type get(Dim& d, + int i) { return d[i]; } // Dot product of two dims template -HOSTDEVICE int linearize(const Dim& a, const Dim& b) { +HOSTDEVICE int64_t linearize(const Dim& a, const Dim& b) { return a.head * b.head + linearize(a.tail, b.tail); } // Base case dot product of two Dims // Notice it is inline because it is no longer a template template <> -HOSTDEVICE inline int linearize(const Dim<1>& a, const Dim<1>& b) { +HOSTDEVICE inline int64_t linearize(const Dim<1>& a, const Dim<1>& b) { return a.head * b.head; } // Product of a Dim template -HOSTDEVICE int product(const Dim& a, int prod = 1) { +HOSTDEVICE int64_t product(const Dim& a, int prod = 1) { return prod * a.head * product(a.tail); } // Base case product of a Dim // Notice it is inline because it is no longer a template template <> -HOSTDEVICE inline int product(const Dim<1>& a, int prod) { +HOSTDEVICE inline int64_t product(const Dim<1>& a, int prod) { return prod * a.head; } diff --git a/paddle/framework/dim_test.cu b/paddle/framework/dim_test.cu index 3898d0a447aa502813b3cb5e86c29eebb814ff84..0a6a87669c900de6cb507dd48f0cfc871defe279 100644 --- a/paddle/framework/dim_test.cu +++ b/paddle/framework/dim_test.cu @@ -8,7 +8,7 @@ __global__ void test(paddle::framework::Dim<2>* o) { o[0] = paddle::framework::make_dim(5, 6); } -__global__ void dyn_idx_gpu(int* o) { +__global__ void dyn_idx_gpu(int64_t* o) { auto d = paddle::framework::make_dim(5, 6); o[0] = d[1]; } @@ -47,9 +47,9 @@ TEST(Dim, Equality) { EXPECT_EQ(b[1], 11); // dynamic access on GPU - thrust::device_vector r(1); + thrust::device_vector r(1); dyn_idx_gpu<<<1, 1>>>(thrust::raw_pointer_cast(r.data())); - int res = r[0]; + int64_t res = r[0]; EXPECT_EQ(res, 6); // ex_prefix_mul diff --git a/paddle/framework/eigen.h b/paddle/framework/eigen.h index a4667cc51fadfc020d3211b7a82356db386fced1..54bbeafcabdeeb1e2c1017c156b3512c83dada3a 100644 --- a/paddle/framework/eigen.h +++ b/paddle/framework/eigen.h @@ -28,7 +28,7 @@ struct EigenDim { static Type From(const DDim& dims) { PADDLE_ENFORCE(arity(dims) == D, "D must match arity(DDim)"); Type ret; - for (int d = 0; d < arity(dims); d++) { + for (int64_t d = 0; d < arity(dims); d++) { ret[d] = dims[d]; } return ret; @@ -63,20 +63,35 @@ struct EigenTensor { template -struct EigenMatrix : public EigenTensor {}; +struct EigenMatrix : public EigenTensor { + static typename EigenMatrix::Type Reshape(Tensor& tensor, int num_col_dims) { + int rank = tensor.dims_.size(); + PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank, + "`num_col_dims` must be between (0, rank_of_tensor)."); + return EigenMatrix::From(tensor, + flatten_to_2d(tensor.dims(), num_col_dims)); + } + + static typename EigenMatrix::ConstType Reshape(const Tensor& tensor, + int num_col_dims) { + int rank = tensor.dims_.size(); + PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank, + "`num_col_dims` must be between (0, rank_of_tensor)."); + return EigenMatrix::From(tensor, + flatten_to_2d(tensor.dims(), num_col_dims)); + } +}; template struct EigenVector : public EigenTensor { // Flatten reshapes a Tensor into an EigenVector. static typename EigenVector::Type Flatten(Tensor& tensor) { - return EigenVector::From( - tensor, make_ddim({static_cast(product(tensor.dims_))})); + return EigenVector::From(tensor, {product(tensor.dims_)}); } static typename EigenVector::ConstType Flatten(const Tensor& tensor) { - return EigenVector::From( - tensor, make_ddim({static_cast(product(tensor.dims_))})); + return EigenVector::From(tensor, {product(tensor.dims_)}); } }; diff --git a/paddle/framework/eigen_test.cc b/paddle/framework/eigen_test.cc index dc1957691b1a202826e10e84c21ac8874df9e378..bc4a2db32cfba66bef2c444e1f822e0d2a57b91e 100644 --- a/paddle/framework/eigen_test.cc +++ b/paddle/framework/eigen_test.cc @@ -108,5 +108,24 @@ TEST(Eigen, Matrix) { } } +TEST(Eigen, MatrixReshape) { + Tensor t; + float* p = t.mutable_data({2, 3, 6, 4}, platform::CPUPlace()); + for (int i = 0; i < 2 * 3 * 6 * 4; ++i) { + p[i] = static_cast(i); + } + + EigenMatrix::Type em = EigenMatrix::Reshape(t, 2); + + ASSERT_EQ(2 * 3, em.dimension(0)); + ASSERT_EQ(6 * 4, em.dimension(1)); + + for (int i = 0; i < 2 * 3; i++) { + for (int j = 0; j < 6 * 4; j++) { + ASSERT_NEAR(i * 6 * 4 + j, em(i, j), 1e-6f); + } + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index ae44a1ffd45dacdc44a72edc630e771e7a2f2990..dfcb5fb6210a08f35193b83e3b5f7cee92f618d7 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -22,8 +22,14 @@ enum AttrType { INTS = 3; FLOATS = 4; STRINGS = 5; + INT_PAIRS = 6; } +message IntPair { + required int32 first = 1; + required int32 second = 2; +}; + // OpDesc describes an instance of a C++ framework::OperatorBase // derived class type. message OpDesc { @@ -37,6 +43,7 @@ message OpDesc { repeated int32 ints = 6; repeated float floats = 7; repeated string strings = 8; + repeated IntPair int_pairs = 9; }; message Var { @@ -80,3 +87,24 @@ message OpProto { repeated Attr attrs = 4; required string comment = 5; } + +enum DataType { + BOOL = 0; + INT16 = 1; + INT32 = 2; + INT64 = 3; + FP16 = 4; + FP32 = 5; + FP64 = 6; +} + +message LoDTensorDesc { + required DataType data_type = 1; + repeated int32 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] + optional int32 lod_level = 3 [ default = 0 ]; +} + +message VarDesc { + required string name = 1; + optional LoDTensorDesc lod_tensor = 2; +} diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index 8a817a3e13ca64d6f8df566891a1059995e041ae..9e3ca563c6765637f8471d142d32cec447f0b977 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -3,7 +3,7 @@ #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" -USE_OP(add_two); +USE_OP(add); namespace paddle { namespace framework { @@ -41,7 +41,7 @@ namespace f = paddle::framework; TEST(GradOpBuilder, AddTwo) { std::shared_ptr add_op(f::OpRegistry::CreateOp( - "add_two", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); + "add", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); std::shared_ptr grad_add_op = f::OpRegistry::CreateGradOp(*add_op); EXPECT_EQ(grad_add_op->Inputs().size(), 4UL); @@ -54,8 +54,8 @@ TEST(GradOpBuilder, AddTwo) { EXPECT_EQ(grad_add_op->Output(f::GradVarName("Y")), f::GradVarName("y")); } -REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, f::NOP); -REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, f::NOP); +REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP); +REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad, f::NOP); TEST(GradOpBuilder, MutiInOut) { std::shared_ptr test_op(f::OpRegistry::CreateOp( diff --git a/paddle/framework/images/duplicate_op2.graffle b/paddle/framework/images/duplicate_op2.graffle index 2b658085d6a55d368c320051ba7f94ec2900f13c..5cec3bc64dbd44dc99e348485969f29bd128ceb1 100644 Binary files a/paddle/framework/images/duplicate_op2.graffle and b/paddle/framework/images/duplicate_op2.graffle differ diff --git a/paddle/framework/images/duplicate_op2.png b/paddle/framework/images/duplicate_op2.png index c5588015d1450fd8c1bda3580680d884494868bb..21cdd5cabf1b5203e1435a75b57770d2f702fa92 100644 Binary files a/paddle/framework/images/duplicate_op2.png and b/paddle/framework/images/duplicate_op2.png differ diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc index 71eac4a10b34c3010a2758120c25754af58f669d..908a1f2fd0abe0aa4016c72dbcbc18dcc144232c 100644 --- a/paddle/framework/lod_tensor.cc +++ b/paddle/framework/lod_tensor.cc @@ -19,8 +19,8 @@ namespace paddle { namespace framework { -LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end) { - LOD new_lod; +LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end) { + LoD new_lod; new_lod.reserve(level_end - level_begin); for (size_t i = level_begin; i < level_end; i++) { new_lod.emplace_back(in.at(i)); @@ -28,10 +28,10 @@ LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end) { return new_lod; } -LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin, +LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin, size_t elem_end) { // slice the lod. - LOD new_lod; + LoD new_lod; new_lod.reserve(in.size() - level); auto start = in.at(level)[elem_begin]; auto end = in.at(level)[elem_end]; @@ -46,13 +46,13 @@ LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin, std::transform(new_lod.back().begin(), new_lod.back().end(), new_lod.back().begin(), [start](int v) { return v - start; }); - PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LOD"); + PADDLE_ENFORCE_EQ(new_lod.back().front(), 0, "error in slice LoD"); } PADDLE_ENFORCE_LE(new_lod.size(), in.size()); return new_lod; } -bool operator==(const LOD& a, const LOD& b) { +bool operator==(const LoD& a, const LoD& b) { if (a.size() != b.size()) { return false; } @@ -72,12 +72,12 @@ bool operator==(const LOD& a, const LOD& b) { return true; } -void LODTensor::SliceLevels(size_t level_begin, size_t level_end) { +void LoDTensor::SliceLevels(size_t level_begin, size_t level_end) { auto new_lod = framework::SliceLevels(lod_, level_begin, level_end); lod_ = new_lod; } -void LODTensor::SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) { +void LoDTensor::SliceInLevel(size_t level, size_t elem_begin, size_t elem_end) { PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, NumLevels()); PADDLE_ENFORCE(elem_begin < NumElements(level), diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 9e6b6b4aca41ed464292b56bf6f2d27514f874f7..568f4e89819c8345d8908634f6fa56f09483a763 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -18,8 +18,10 @@ #ifndef PADDLE_ONLY_CPU #include #include +#include #endif +#include #include "paddle/framework/ddim.h" #include "paddle/framework/tensor.h" #include "paddle/platform/enforce.h" @@ -32,37 +34,38 @@ template using Vector = std::vector; #else template -using Vector = thrust::host_vector; +using Vector = thrust::host_vector< + T, thrust::system::cuda::experimental::pinned_allocator>; #endif -using LOD = std::vector>; +using LoD = std::vector>; -LOD SliceLevels(const LOD& in, size_t level_begin, size_t level_end); +LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end); -LOD SliceInLevel(const LOD& in, size_t level, size_t elem_begin, +LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin, size_t elem_end); -bool operator==(const LOD& a, const LOD& b); +bool operator==(const LoD& a, const LoD& b); /* - * LODTensor (Level of details Tensor) + * LoDTensor (Level of details Tensor) * see https://en.wikipedia.org/wiki/Level_of_details for reference. */ -class LODTensor { +class LoDTensor { public: - LODTensor() {} - LODTensor(const LOD& lod, Tensor* t) : lod_(lod), tensor_(t) {} + LoDTensor() {} + LoDTensor(const LoD& lod, Tensor* t) : lod_(lod), tensor_(t) {} - void set_lod(const LOD& lod) { lod_ = lod; } + void set_lod(const LoD& lod) { lod_ = lod; } void set_tensor(Tensor* tensor) { tensor_ = tensor; } Tensor& tensor() { return *tensor_; } - LOD lod() { return lod_; } + LoD lod() { return lod_; } /* - * Get a element from LOD. + * Get a element from LoD. */ size_t lod_element(size_t level, size_t elem) const { PADDLE_ENFORCE(level < NumLevels(), "level [%d] out of range [%d]", level, @@ -74,7 +77,7 @@ class LODTensor { } /* - * Number of LODTensor's levels, each level has units of data, for example, + * Number of LoDTensor's levels, each level has units of data, for example, * in the sentence's view, article, paragraph, sentence are 3 levels. */ size_t NumLevels() const { return lod_.size(); } @@ -100,7 +103,7 @@ class LODTensor { void SliceInLevel(size_t level, size_t elem_begin, size_t elem_end); private: - LOD lod_; + LoD lod_; Tensor* tensor_; // not owned }; } // namespace framework diff --git a/paddle/framework/lod_tensor.md b/paddle/framework/lod_tensor.md index 8dfe3ee823084cb8c38550a82e761a741eabe135..769b61f175a2f462258c1242d027c04c0abd12a9 100644 --- a/paddle/framework/lod_tensor.md +++ b/paddle/framework/lod_tensor.md @@ -94,7 +94,7 @@ Let's go on slicing this slice. Its <1,1>-slice is ||| ``` -### The General Slicing Algorithm +### The Slicing Algorithm The algorithm, with over-simplified data structure, is defined as @@ -106,17 +106,41 @@ struct LoDTensor { float* tensor_; }; -LoDTensor Slice(const LoDTensor& lodt, int level, int sequence) { +LoDTensor Slice(const LoDTensor& lodt, int level, int sequence); +``` + +Let us revisit the example above -} +``` + 3 +3 1 2 +3 2 4 1 2 3 +||| || |||| | || ||| ``` -### Slicing the Top Level +Suppose that we want to retrieve the <1,2>-slice -Please be aware that an RNN operator only slices the top level of a LoD Tensor to get the step inputs. +``` +2 +2 3 +|| ||| +``` -```c++ -LoDTensor Slice(const LoDTensor& lodt, int sequence) { +we will need to find out the starting position of this slice by summing over all leaf nodes in `LoD` to the left of the slice, i.e., 3 + 2 + 4 + 1 = 10. + +To avoid the traversal of the LoD tree at slcing time, we can do it at the construction time -- instead of saving the lengths of the next level in the LoD tree, we can save the starting offset of the next level. For example, above LoD Tensor can be transformed into + +``` + 0 +0 9 10 +0 3 5 9 10 12 +||| || |||| | || ||| +``` + +We don't really need the 0 on top, so the LoD Tensor could be -} +``` +0 9 10 +0 3 5 9 10 12 +||| || |||| | || ||| ``` diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/framework/lod_tensor_test.cc index 9a351605edb5013bdab2c6193bdd9ce401acc937..1da8553134f377f7a4fbe8008d12fe8d4a0e47f4 100644 --- a/paddle/framework/lod_tensor_test.cc +++ b/paddle/framework/lod_tensor_test.cc @@ -21,7 +21,7 @@ namespace paddle { namespace framework { -class LODTensorTester : public ::testing::Test { +class LoDTensorTester : public ::testing::Test { public: virtual void SetUp() override { // tensor's batch_size: 30 @@ -29,7 +29,7 @@ class LODTensorTester : public ::testing::Test { // 0 10 20 // 0 5 10 15 20 // 0 2 5 7 10 12 15 20 - LOD lod; + LoD lod; lod.push_back(std::vector{0, 10, 20}); lod.push_back(std::vector{0, 5, 10, 15, 20}); lod.push_back(std::vector{0, 2, 5, 7, 10, 12, 15, 17, 20}); @@ -47,21 +47,21 @@ class LODTensorTester : public ::testing::Test { protected: platform::CPUPlace place; Tensor tensor; - LODTensor lod_tensor; + LoDTensor lod_tensor; }; -TEST_F(LODTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); } +TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); } -TEST_F(LODTensorTester, NumElements) { +TEST_F(LoDTensorTester, NumElements) { ASSERT_EQ(lod_tensor.NumElements(0), 2UL); ASSERT_EQ(lod_tensor.NumElements(1), 4UL); ASSERT_EQ(lod_tensor.NumElements(2), 8UL); } -TEST_F(LODTensorTester, SliceLevels) { +TEST_F(LoDTensorTester, SliceLevels) { // slice 1 level for (size_t level = 0; level < 3UL; ++level) { - LODTensor new_lod_tensor = lod_tensor; + LoDTensor new_lod_tensor = lod_tensor; new_lod_tensor.SliceLevels(level, level + 1); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level)); @@ -70,7 +70,7 @@ TEST_F(LODTensorTester, SliceLevels) { } // slice 2 level for (size_t level = 0; level < 2UL; ++level) { - LODTensor new_lod_tensor = lod_tensor; + LoDTensor new_lod_tensor = lod_tensor; new_lod_tensor.SliceLevels(level, level + 2); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level)); @@ -80,9 +80,9 @@ TEST_F(LODTensorTester, SliceLevels) { } } -TEST_F(LODTensorTester, SliceInLevel) { +TEST_F(LoDTensorTester, SliceInLevel) { size_t level = 0; - LODTensor new_lod_tensor = lod_tensor; + LoDTensor new_lod_tensor = lod_tensor; new_lod_tensor.SliceInLevel(level, 0, 2); EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL); EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL); diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..1079a36a2e7b24f6f8a5bcbb296355567305a765 --- /dev/null +++ b/paddle/framework/lod_tensor_test.cu @@ -0,0 +1,52 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include +#include +#include "paddle/framework/lod_tensor.h" +#include "paddle/platform/assert.h" + +#include + +__global__ void test(size_t* a, int size) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; + i += blockDim.x * gridDim.x) { + a[i] *= 2; + } +} + +TEST(LoDTensor, LoDInGPU) { + paddle::framework::Tensor tensor; + paddle::framework::LoDTensor lod_tensor; + paddle::platform::GPUPlace place(0); + + paddle::framework::LoD src_lod; + src_lod.push_back(std::vector{0, 2, 4, 6, 8, 10, 12, 14}); + + tensor.Resize({14, 16}); + tensor.mutable_data(place); + + lod_tensor.set_lod(src_lod); + lod_tensor.set_tensor(&tensor); + CHECK_EQ(lod_tensor.lod_element(0, 2), 4); + CHECK_EQ(lod_tensor.lod_element(0, 4), 8); + + auto lod = lod_tensor.lod(); + + test<<<1, 8>>>(lod[0].data(), lod[0].size()); + cudaDeviceSynchronize(); + + for (size_t i = 0; i < src_lod[0].size(); ++i) { + CHECK_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); + } +} diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 64c7f23ab6b79bad9533f566ca39db3cfd5ac5c5..572dff860a306bb03ba9e6702fec85e4a2ea1b54 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -33,7 +33,8 @@ namespace framework { class OpRegistry { public: template - static void RegisterOp(const std::string& op_type) { + static void RegisterOp(const std::string& op_type, + const std::string& grad_op_type) { PADDLE_ENFORCE(!OpInfoMap::Instance().Has(op_type), "'%s' is registered more than once.", op_type); OpInfo op_info; @@ -42,9 +43,9 @@ class OpRegistry { const VariableNameMap& outputs, const AttributeMap& attrs) { return new OpType(type, inputs, outputs, attrs); }; + op_info.grad_op_type_ = grad_op_type; if (std::type_index(typeid(ProtoMakerType)) != std::type_index(typeid(NOPMaker))) { - op_info.grad_op_type_ = op_type + "_grad"; op_info.proto_ = new OpProto; op_info.checker_ = new OpAttrChecker; auto maker = ProtoMakerType(op_info.proto_, op_info.checker_); @@ -54,14 +55,15 @@ class OpRegistry { op_info.proto_->IsInitialized(), "Fail to initialize %s's OpProto, because %s is not initialized", op_type, op_info.proto_->InitializationErrorString()); - // register gradient op - RegisterOp(op_info.grad_op_type_); } else { - op_info.grad_op_type_ = ""; op_info.proto_ = nullptr; op_info.checker_ = nullptr; } OpInfoMap::Instance().Insert(op_type, op_info); + // register gradient op + if (!grad_op_type.empty()) { + RegisterOp(grad_op_type, ""); + } } static std::unique_ptr CreateOp(const std::string& type, @@ -90,8 +92,10 @@ class Registrar { template class OpRegistrar : public Registrar { public: - explicit OpRegistrar(const char* op_type) { - OpRegistry::RegisterOp(op_type); + explicit OpRegistrar(const char* op_type) { OpRegistrar(op_type, ""); } + OpRegistrar(const char* op_type, const char* grad_op_type) { + OpRegistry::RegisterOp(op_type, + grad_op_type); } }; @@ -117,7 +121,8 @@ class OpKernelRegistrar : public Registrar { /** * Macro to register Operator. */ -#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_class) \ +#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \ + grad_op_class) \ STATIC_ASSERT_GLOBAL_NAMESPACE( \ __reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \ class _OpClass_##op_type##_ : public op_class { \ @@ -132,14 +137,14 @@ class OpKernelRegistrar : public Registrar { }; \ static ::paddle::framework::OpRegistrar< \ _OpClass_##op_type##_, op_maker_class, _OpGradClass_##op_type##_> \ - __op_registrar_##op_type##__(#op_type); \ + __op_registrar_##op_type##__(#op_type, #grad_op_type); \ int TouchOpRegistrar_##op_type() { \ __op_registrar_##op_type##__.Touch(); \ return 0; \ } #define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \ - REGISTER_OP(op_type, op_class, op_maker_class, ::paddle::framework::NOP) + REGISTER_OP(op_type, op_class, op_maker_class, , ::paddle::framework::NOP) /** * Macro to register OperatorKernel. @@ -194,6 +199,8 @@ class OpKernelRegistrar : public Registrar { USE_OP_DEVICE_KERNEL(op_type, GPU) #endif +#define USE_NO_KERNEL_OP(op_type) USE_OP_ITSELF(op_type); + #define USE_CPU_ONLY_OP(op_type) \ USE_OP_ITSELF(op_type); \ USE_OP_DEVICE_KERNEL(op_type, CPU); diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 50c45919c53af22665feeeebe753da283ded2b0c..e00c6e8d904508ec9985537fc703c7c61a14e0de 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -21,7 +21,7 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { AddOutput("output", "output of cosine op"); AddAttr("scale", "scale of cosine op") .SetDefault(1.0) - .LargerThan(0.0); + .GreaterThan(0.0); AddComment("This is cos op"); } }; @@ -80,7 +80,7 @@ TEST(OpRegistry, CreateOp) { paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); - float scale_get = op->GetAttr("scale"); + float scale_get = op->Attr("scale"); ASSERT_EQ(scale_get, scale); } @@ -121,7 +121,7 @@ TEST(OpRegistry, DefaultValue) { paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); - ASSERT_EQ(op->GetAttr("scale"), 1.0); + ASSERT_EQ(op->Attr("scale"), 1.0); } TEST(OpRegistry, CustomChecker) { @@ -172,38 +172,6 @@ TEST(OpRegistry, CustomChecker) { paddle::platform::CPUDeviceContext dev_ctx; paddle::framework::Scope scope; op->Run(scope, dev_ctx); - int test_attr = op->GetAttr("test_attr"); + int test_attr = op->Attr("test_attr"); ASSERT_EQ(test_attr, 4); -} - -class TestAttrProtoMaker : public pd::OpProtoAndCheckerMaker { - public: - TestAttrProtoMaker(pd::OpProto* proto, pd::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddAttr("scale", "scale of test op"); - AddAttr("scale", "scale of test op"); - } -}; - -TEST(ProtoMaker, DuplicatedAttr) { - pd::OpProto op_proto; - pd::OpAttrChecker op_checker; - auto proto_maker = TestAttrProtoMaker(&op_proto, &op_checker); - ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); -} - -class TestInOutProtoMaker : public pd::OpProtoAndCheckerMaker { - public: - TestInOutProtoMaker(pd::OpProto* proto, pd::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("input", "input of test op"); - AddInput("input", "input of test op"); - } -}; - -TEST(ProtoMaker, DuplicatedInOut) { - pd::OpProto op_proto; - pd::OpAttrChecker op_checker; - auto proto_maker = TestInOutProtoMaker(&op_proto, &op_checker); - ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); -} +} \ No newline at end of file diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 790cfc4746b1d34da413fa3c29a266f962c6dde6..e1e122091f7759b1a68f1f982bc2a35e8241f9f0 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -123,6 +123,15 @@ OperatorBase::OperatorBase(const std::string& type, CheckAllInputOutputSet(); } +std::vector OperatorBase::InputVars() const { + std::vector ret_val; + for (auto& o : outputs_) { + ret_val.reserve(ret_val.size() + o.second.size()); + ret_val.insert(ret_val.end(), o.second.begin(), o.second.end()); + } + return ret_val; +} + std::vector OperatorBase::OutputVars(bool has_intermediate) const { std::vector ret_val; if (has_intermediate) { diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 590e335fdc8843ed9edd01a09605163de93f52d9..4600b06009bcef7d0774d25b816aac4733f30795 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -69,7 +69,7 @@ class OperatorBase { virtual ~OperatorBase() {} template - inline const T& GetAttr(const std::string& name) const { + inline const T& Attr(const std::string& name) const { PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap", name); return boost::get(attrs_.at(name)); @@ -94,11 +94,14 @@ class OperatorBase { const VariableNameMap& Inputs() const { return inputs_; } const VariableNameMap& Outputs() const { return outputs_; } + //! Get a input with argument's name described in `op_proto` std::string Input(const std::string& name) const; //! Get a input which has multiple variables. const std::vector& Inputs(const std::string& name) const; + std::vector InputVars() const; + //! Get a output with argument's name described in `op_proto` std::string Output(const std::string& name) const; //! Get an output which has multiple variables. @@ -233,6 +236,15 @@ class InferShapeContext { InferShapeContext(const OperatorBase& op, const Scope& scope) : op_(op), scope_(scope) {} + const OperatorBase& op() const { return op_; } + + const Scope& scope() const { return scope_; } + + template + inline const T& Attr(const std::string& name) const { + return op_.Attr(name); + } + size_t InputSize(const std::string& name) const { return op_.Inputs(name).size(); } @@ -302,9 +314,9 @@ class InferShapeContext { } template - std::vector MultiOutput(const std::string& name) const { + std::vector MultiOutput(const std::string& name) const { auto names = op_.Outputs(name); - std::vector res; + std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { @@ -314,6 +326,7 @@ class InferShapeContext { return res; } + private: const OperatorBase& op_; const Scope& scope_; }; diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 1d7efb7b9403f7c1c6bdbb27a0258f79ae032f43..20bbb11896a4c6f11079669f0b25773f6460594d 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -102,7 +102,7 @@ class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { AddOutput("y", "output of test op"); AddAttr("scale", "scale of cosine op") .SetDefault(1.0) - .LargerThan(0.0); + .GreaterThan(0.0); AddComment("This is test op"); } }; @@ -122,10 +122,10 @@ class CPUKernelTest : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { std::cout << "this is cpu kernel" << std::endl; - std::cout << ctx.op_.DebugString() << std::endl; + std::cout << ctx.op().DebugString() << std::endl; cpu_kernel_run_num++; - ASSERT_EQ(ctx.op_.Input("x"), "IN1"); - ASSERT_EQ(ctx.op_.Output("y"), "OUT1"); + ASSERT_EQ(ctx.op().Input("x"), "IN1"); + ASSERT_EQ(ctx.op().Output("y"), "OUT1"); } }; @@ -140,7 +140,7 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker AddOutput("ys", "outputs of test op").AsDuplicable(); AddAttr("scale", "scale of cosine op") .SetDefault(1.0) - .LargerThan(0.0); + .GreaterThan(0.0); AddComment("This is test op"); } }; @@ -148,7 +148,7 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker class CPUKernalMultiInputsTest : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { - auto xs = ctx.op_.Inputs("xs"); + auto xs = ctx.op().Inputs("xs"); ASSERT_EQ(xs.size(), 3UL); ASSERT_EQ(xs[0], "x0"); ASSERT_EQ(xs[1], "x1"); @@ -172,10 +172,10 @@ class CPUKernalMultiInputsTest : public OpKernel { auto outTensor0 = ctx.MultiOutput("ys"); ASSERT_EQ(outTensor0.size(), 2U); - auto k = ctx.op_.Input("k"); + auto k = ctx.op().Input("k"); ASSERT_EQ(k, "k0"); - auto ys = ctx.op_.Outputs("ys"); + auto ys = ctx.op().Outputs("ys"); ASSERT_EQ(ys.size(), 2UL); ASSERT_EQ(ys[0], "y0"); ASSERT_EQ(ys[1], "y1"); @@ -263,4 +263,38 @@ TEST(Operator, Clone) { OperatorClone a("ABC", {}, {}, {}); auto b = a.Clone(); ASSERT_EQ(a.Type(), b->Type()); +} + +class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { + public: + TestAttrProtoMaker(paddle::framework::OpProto* proto, + paddle::framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddAttr("scale", "scale of test op"); + AddAttr("scale", "scale of test op"); + } +}; + +TEST(ProtoMaker, DuplicatedAttr) { + paddle::framework::OpProto op_proto; + paddle::framework::OpAttrChecker op_checker; + auto proto_maker = TestAttrProtoMaker(&op_proto, &op_checker); + ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); +} + +class TestInOutProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { + public: + TestInOutProtoMaker(paddle::framework::OpProto* proto, + paddle::framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("input", "input of test op"); + AddInput("input", "input of test op"); + } +}; + +TEST(ProtoMaker, DuplicatedInOut) { + paddle::framework::OpProto op_proto; + paddle::framework::OpAttrChecker op_checker; + auto proto_maker = TestInOutProtoMaker(&op_proto, &op_checker); + ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); } \ No newline at end of file diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 643f875491724bf443bd7727391734377ee6180c..4b5a2ae523f2f7fde5445f0534cd99969ad9d59e 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -43,6 +43,9 @@ class Tensor { template friend struct EigenTensor; + template + friend struct EigenMatrix; + template friend struct EigenVector; @@ -78,6 +81,9 @@ class Tensor { /*! Return the dimensions of the memory block. */ inline const DDim& dims() const; + /*! Return the numel of the memory block. */ + inline int64_t numel() const; + /*! Resize the dimensions of the memory block. */ inline Tensor& Resize(const DDim& dims); @@ -159,6 +165,12 @@ class Tensor { /*! points to dimensions of memory block. */ DDim dims_; + /** + * A cache of the number of elements in a tensor. + * Would be 0 for an uninitialized tensor. + */ + int64_t numel_; + /** * @brief A PlaceHolder may be shared by more than one tensor. * diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 7893e233b776425a61d9e3edd43d944a27743188..642b53efc7095d25712ca324638f5fe9b8316c0c 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -24,7 +24,7 @@ inline void Tensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL( holder_, "Tenosr holds no memory. Call Tensor::mutable_data first."); PADDLE_ENFORCE_GE( - holder_->size(), product(dims_) * sizeof(T) + offset_, + holder_->size(), numel() * sizeof(T) + offset_, "Tensor's dims_ is out of bound. Call Tensor::mutable_data " "first to re-allocate memory.\n" "or maybe the required data-type mismatches the data already stored."); @@ -54,11 +54,11 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { template inline T* Tensor::mutable_data(platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); - PADDLE_ENFORCE_GT(product(dims_), 0, + PADDLE_ENFORCE_GT(numel(), 0, "Tensor's numel must be larger than zero to call " "Tensor::mutable_data. Call Tensor::set_dim first."); /* some versions of boost::variant don't have operator!= */ - size_t size = product(dims_) * sizeof(T); + int64_t size = numel() * sizeof(T); if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + offset_) { if (platform::is_cpu_place(place)) { @@ -97,7 +97,7 @@ inline void Tensor::CopyFrom(const Tensor& src, auto dst_ptr = static_cast(mutable_data(dst_place)); - auto size = product(src.dims_) * sizeof(T); + auto size = src.numel() * sizeof(T); if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { memory::Copy(boost::get(dst_place), dst_ptr, @@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { PADDLE_ENFORCE_LT(begin_idx, end_idx, "Begin index must be less than end index."); PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1."); - int base = product(dims_) / dims_[0]; + size_t base = numel() / dims_[0]; Tensor dst; dst.holder_ = holder_; DDim dst_dims = dims_; @@ -143,10 +143,21 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { inline Tensor& Tensor::Resize(const DDim& dims) { dims_ = dims; + numel_ = product(dims_); return *this; } inline const DDim& Tensor::dims() const { return dims_; } +inline int64_t Tensor::numel() const { return numel_; } + +template +inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { + Tensor res; + res.ShareDataWith(src); + res.Resize(flatten_to_2d(src.dims(), num_col_dims)); + return res; +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index 7db38d5caeebccf710334e854faf785ef0f64063..55302ea47120f420e952b26830c8ea4cbcce6435 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -262,3 +262,16 @@ TEST(Tensor, CopyFrom) { } #endif } + +TEST(Tensor, ReshapeToMatrix) { + using namespace paddle::framework; + using namespace paddle::platform; + Tensor src; + int* src_ptr = src.mutable_data({2, 3, 4, 9}, CPUPlace()); + for (int i = 0; i < 2 * 3 * 4 * 9; ++i) { + src_ptr[i] = i; + } + Tensor res = ReshapeToMatrix(src, 2); + ASSERT_EQ(res.dims()[0], 2 * 3); + ASSERT_EQ(res.dims()[1], 4 * 9); +} \ No newline at end of file diff --git a/paddle/function/CMakeLists.txt b/paddle/function/CMakeLists.txt index f43f15e5cacb70b625d7791e1e02ce7780286200..4fd72d64a90ae6f16dd1499ceb7fba6e40fe4cea 100644 --- a/paddle/function/CMakeLists.txt +++ b/paddle/function/CMakeLists.txt @@ -44,6 +44,7 @@ if(WITH_GPU) add_simple_unittest(RowConvOpTest) add_simple_unittest(BlockExpandOpTest) add_simple_unittest(CropOpTest) + add_simple_unittest(SwitchOpTest) endif() add_simple_unittest(Im2ColTest) diff --git a/paddle/function/EigenGemm.cpp b/paddle/function/EigenGemm.cpp index 674141ed39b7f5573948348e3ba3bb526ae43c66..b3e666e860d29d89650d48a23cf44917035a02d7 100644 --- a/paddle/function/EigenGemm.cpp +++ b/paddle/function/EigenGemm.cpp @@ -83,9 +83,9 @@ struct EigenBlasGemm { }; #ifdef PADDLE_TYPE_DOUBLE -template class EigenBlasGemm; +template struct EigenBlasGemm; #else -template class EigenBlasGemm; +template struct EigenBlasGemm; #endif } // namespace paddle diff --git a/paddle/function/GruFunctor.h b/paddle/function/GruFunctor.h new file mode 100644 index 0000000000000000000000000000000000000000..9f6392198ea360502f313cbe15dfae46ece69758 --- /dev/null +++ b/paddle/function/GruFunctor.h @@ -0,0 +1,159 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "GemmFunctor.h" +#include "hl_cpu_gru.cuh" + +namespace paddle { + +template +struct GruFunctor { + template + static void compute(OpResetOutput opResetOutput, + OpFinalOutput opFinalOutput, + hl_gru_value value, + int frameSize, + int batchSize, + hl_activation_mode_t active_node, + hl_activation_mode_t active_gate) { +#ifndef __NVCC__ + if (value.prevOutValue) { + BlasGemm::compute(false, + false, + batchSize, + 2 * frameSize, + frameSize, + 1, + value.prevOutValue, + frameSize, + value.gateWeight, + frameSize * 2, + 1, + value.gateValue, + frameSize * 3); + } + + forward_reset_output( + opResetOutput, value, frameSize, batchSize, active_gate); + + if (value.prevOutValue) { + BlasGemm::compute(false, + false, + batchSize, + frameSize, + frameSize, + 1, + value.resetOutputValue, + frameSize, + value.stateWeight, + frameSize, + 1, + value.gateValue + frameSize * 2, + frameSize * 3); + } + + forward_final_output( + opFinalOutput, value, frameSize, batchSize, active_node); +#endif + } +}; + +template +struct GruGradFunctor { + template + static void compute(OpStateGrad opStateGrad, + OpResetGrad opResetGrad, + hl_gru_value value, + hl_gru_grad grad, + int frameSize, + int batchSize, + hl_activation_mode_t active_node, + hl_activation_mode_t active_gate) { +#ifndef __NVCC__ + backward_state_grad( + opStateGrad, value, grad, frameSize, batchSize, active_node); + + if (value.prevOutValue && grad.prevOutGrad) { + BlasGemm::compute(false, + true, + batchSize, + frameSize, + frameSize, + 1, + grad.gateGrad + frameSize * 2, + frameSize * 3, + value.stateWeight, + frameSize, + 0, + grad.resetOutputGrad, + frameSize); + + if (grad.stateWeightGrad) { + BlasGemm::compute(true, + false, + frameSize, + frameSize, + batchSize, + 1, + value.resetOutputValue, + frameSize, + grad.gateGrad + frameSize * 2, + frameSize * 3, + 1, + grad.stateWeightGrad, + frameSize); + } + } + + backward_reset_grad( + opResetGrad, value, grad, frameSize, batchSize, active_gate); + + if (grad.prevOutGrad && value.prevOutValue) { + BlasGemm::compute(false, + true, + batchSize, + frameSize, + frameSize * 2, + 1, + grad.gateGrad, + frameSize * 3, + value.gateWeight, + frameSize * 2, + 1, + grad.prevOutGrad, + frameSize); + + if (grad.gateWeightGrad) { + BlasGemm::compute(true, + false, + frameSize, + frameSize * 2, + batchSize, + 1, + value.prevOutValue, + frameSize, + grad.gateGrad, + frameSize * 3, + 1, + grad.gateWeightGrad, + frameSize * 2); + } + } +#endif + } +}; + +} // namespace paddle diff --git a/paddle/function/Im2Col.h b/paddle/function/Im2Col.h index 9b91e223a6a28586b11fe7ed4a44421e029a67bb..1e0cff436ff60d5a029e89657d00af2b0bf8b454 100644 --- a/paddle/function/Im2Col.h +++ b/paddle/function/Im2Col.h @@ -94,95 +94,4 @@ public: int paddingWidth); }; -template -struct Padding { - static void run(const T* src, - T* dest, - int channels, - int inputHeight, - int inputWidth, - int paddingHeight, - int paddingWidth) { - const int destWidth = inputWidth + 2 * paddingWidth; - for (int c = 0; c < channels; c++) { - if (paddingHeight > 0) { - memset(dest, 0, destWidth * paddingHeight * sizeof(T)); - dest += destWidth * paddingHeight; - } - - for (int i = 0; i < inputHeight; i++) { - // padding head - for (int j = 0; j < paddingWidth; j++) { - *dest++ = T(0); - } - - memcpy(dest, src, inputWidth * sizeof(T)); - dest += inputWidth; - src += inputWidth; - - // padding tail - for (int j = 0; j < paddingWidth; j++) { - *dest++ = T(0); - } - } - - if (paddingHeight > 0) { - memset(dest, 0, destWidth * paddingHeight * sizeof(T)); - dest += destWidth * paddingHeight; - } - } - } -}; - -#if defined(__ARM_NEON__) || defined(__ARM_NEON) -template <> -struct Padding { - static void run(const float* src, - float* dest, - int channels, - int inputHeight, - int inputWidth, - int paddingHeight, - int paddingWidth) { - const int destWidth = inputWidth + 2 * paddingWidth; - for (int c = 0; c < channels; c++) { - if (paddingHeight > 0) { - memset(dest, 0, destWidth * paddingHeight * sizeof(float)); - dest += destWidth * paddingHeight; - } - - for (int i = 0; i < inputHeight; i++) { - // padding head - for (int j = 0; j < paddingWidth; j++) { - *dest++ = float(0); - } - - int step = inputWidth >> 2; - int remain = inputWidth & 3; - for (int s = 0; s < step; s++) { - float32x4_t s0 = vld1q_f32(src); - vst1q_f32(dest, s0); - src += 4; - dest += 4; - } - for (int r = 0; r < remain; r++) { - *dest++ = *src++; - } - - // padding tail - for (int j = 0; j < paddingWidth; j++) { - *dest++ = float(0); - } - } - - if (paddingHeight > 0) { - memset(dest, 0, destWidth * paddingHeight * sizeof(float)); - dest += destWidth * paddingHeight; - } - } - } -}; - -#endif - } // namespace paddle diff --git a/paddle/function/MulOp.cpp b/paddle/function/MulOp.cpp index 91b4b8ed91b6055babcfbab8f7adb2c55e2747d0..25e41edad54bec0f76a3de4799fab14241407272 100644 --- a/paddle/function/MulOp.cpp +++ b/paddle/function/MulOp.cpp @@ -13,18 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MulOp.h" -/// todo(tianbing), delete it -#include -#include "paddle/math/MathFunctions.h" +#include "GemmFunctor.h" #include "paddle/math/SIMDFunctions.h" #include "paddle/utils/ThreadLocal.h" -#ifndef PADDLE_TYPE_DOUBLE -#define GEMM paddle::gemm -#else -#define GEMM paddle::gemm -#endif - namespace { inline void vecAddTo(real* a, const real* b, real scaleB, size_t len) { for (unsigned int i = 0; i < len; ++i) { @@ -114,19 +106,20 @@ void MulOp(CpuMatrix& out, real scaleT, bool aTrans, bool bTrans) { - GEMM(aTrans ? CblasTrans : CblasNoTrans, - bTrans ? CblasTrans : CblasNoTrans, - out.getHeight(), - out.getWidth(), - !aTrans ? a.getWidth() : a.getHeight(), - scaleAB, - a.getData(), - a.getStride(), - b.getData(), - b.getStride(), - scaleT, - out.getData(), - out.getStride()); + BlasGemm::compute( + aTrans, + bTrans, + out.getHeight(), + out.getWidth(), + !aTrans ? a.getWidth() : a.getHeight(), + scaleAB, + a.getData(), + a.getStride(), + b.getData(), + b.getStride(), + scaleT, + out.getData(), + out.getStride()); } /// dense matrix (+)= sparse matrix * dense matrix diff --git a/paddle/function/SwitchOp.cpp b/paddle/function/SwitchOp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..01e252a8dc0cd5fa1e964efa01d04cf282b3dfe7 --- /dev/null +++ b/paddle/function/SwitchOp.cpp @@ -0,0 +1,140 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "SwitchOp.h" +#include "paddle/math/Vector.h" + +namespace paddle { + +template <> +void NCHW2NHWC(real* outputs, + const real* inputs, + const int num, + const int inC, + const int inH, + const int inW, + const int argType) { + for (int n = 0; n < num; ++n) { + for (int c = 0; c < inC; ++c) { + for (int h = 0; h < inH; ++h) { + for (int w = 0; w < inW; ++w) { + if (argType == ADD_TO) { + outputs[((n * inH + h) * inW + w) * inC + c] += *(inputs++); + } else { + outputs[((n * inH + h) * inW + w) * inC + c] = *(inputs++); + } + } + } + } + } +} + +template <> +void NHWC2NCHW(real* outputs, + const real* inputs, + const int num, + const int inH, + const int inW, + const int inC, + const int argType) { + for (int n = 0; n < num; ++n) { + for (int h = 0; h < inH; ++h) { + for (int w = 0; w < inW; ++w) { + for (int c = 0; c < inC; ++c) { + if (argType == ADD_TO) { + outputs[((n * inC + c) * inH + h) * inW + w] += *(inputs++); + } else { + outputs[((n * inC + c) * inH + h) * inW + w] = *(inputs++); + } + } + } + } + } +} + +/** + * \brief Switch dimension order of image input. + * The input and output is a 4D tensor. Switch order + * 'batch_size,channels, height, width' to + * order 'batch_size, height, width, channels'. + * + * Argument in this Function: + * \param inputs input data with order 'batch_size,channels, height, width'. + * \param outputs output data with order 'batch_size, height, width, channels'. + */ +template +class NCHW2NHWCFunc : public FunctionBase { +public: + void init(const FuncConfig& config) override {} + + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ(1UL, inputs.size()); + CHECK_EQ(1UL, outputs.size()); + + size_t num = inputs[0].shape()[0]; + size_t inC = inputs[0].shape()[1]; + size_t inH = inputs[0].shape()[2]; + size_t inW = inputs[0].shape()[3]; + NCHW2NHWC(outputs[0].data(), + inputs[0].data(), + num, + inC, + inH, + inW, + outputs[0].getArgType()); + } +}; + +/** + * \brief Switch dimension order of image input. + * The input and output is a 4D tensor. Switch order + * 'batch_size, height, width, channels' to + * order 'batch_size, channels, height, width'. + * + * Argument in this Function: + * \param inputs input data with order 'batch_size, height, width, channels'. + * \param outputs output data with order 'batch_size, channels, height, width'. + */ +template +class NHWC2NCHWFunc : public FunctionBase { +public: + void init(const FuncConfig& config) override {} + + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ(1UL, inputs.size()); + CHECK_EQ(1UL, outputs.size()); + + size_t num = inputs[0].shape()[0]; + size_t inH = inputs[0].shape()[1]; + size_t inW = inputs[0].shape()[2]; + size_t inC = inputs[0].shape()[3]; + + NHWC2NCHW(outputs[0].data(), + inputs[0].data(), + num, + inH, + inW, + inC, + outputs[0].getArgType()); + } +}; + +REGISTER_TYPED_FUNC(NCHW2NHWC, CPU, NCHW2NHWCFunc); +REGISTER_TYPED_FUNC(NHWC2NCHW, CPU, NHWC2NCHWFunc); +#ifndef PADDLE_ONLY_CPU +REGISTER_TYPED_FUNC(NCHW2NHWC, GPU, NCHW2NHWCFunc); +REGISTER_TYPED_FUNC(NHWC2NCHW, GPU, NHWC2NCHWFunc); +#endif + +} // namespace paddle diff --git a/paddle/function/SwitchOp.h b/paddle/function/SwitchOp.h new file mode 100644 index 0000000000000000000000000000000000000000..e4c1c3ac922f88c3e5424b5943082810aabfacdb --- /dev/null +++ b/paddle/function/SwitchOp.h @@ -0,0 +1,66 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "Function.h" + +namespace paddle { + +/** + * \brief This funtion switch dimension order of image input. + * The input and output is a 4D tensor. Switch order 'batch_size, + *channels, height, width' to + * order 'batch_size, height, width, channels'. + * + * \param[out] outputs save results. + * \param[in] inputs input data. + * \param[in] num batch size of input data. + * \param[in] inC channel number of input data. + * \param[in] inH height of input data. + * \param[in] inH with of input data. + * \param[in] argType type of output argument. + */ +template +void NCHW2NHWC(real* outputs, + const real* inputs, + const int num, + const int inC, + const int inH, + const int inW, + const int argtype); + +/** + * \brief This funtion switch dimension order of image input. + * The input and output is a 4D tensor. Switch order 'batch_size, + *height, width, channels' to + * order 'batch_size, channels, height, width'. + * + * \param[out] inGrad gradients of previous layer. + * \param[in] outGrad output gradients. + * \param[in] num batch size of input data. + * \param[in] inH height of input data. + * \param[in] inW with of input data. + * \param[in] inC channel number of input data. + * \param[in] argType type of output argument. + */ +template +void NHWC2NCHW(real* inGrad, + const real* outGrad, + const int num, + const int inH, + const int inW, + const int inC, + const int argType); +} // namespace paddle diff --git a/paddle/function/SwitchOpGpu.cu b/paddle/function/SwitchOpGpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..45390a56c3f776ec18a65a6ba2f7149a7a6ef6c3 --- /dev/null +++ b/paddle/function/SwitchOpGpu.cu @@ -0,0 +1,98 @@ +/* Copyright (c) 2016 Paddle + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "SwitchOp.h" +#include "hl_base.h" + +namespace paddle { + +__global__ void KeNCHW2NHWC(real* outputs, + const real* inputs, + int inC, + int inH, + int inW, + int nthreads, + int argType) { + const int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx < nthreads) { + const int w = idx % inW; + const int h = (idx / inW) % inH; + const int c = (idx / inW / inH) % inC; + const int n = idx / inW / inH / inC; + + const int off = ((n * inH + h) * inW + w) * inC + c; + if (argType == ADD_TO) { + outputs[off] += inputs[idx]; + } else { + outputs[off] = inputs[idx]; + } + } +} + +template <> +void NCHW2NHWC(real* outputs, + const real* inputs, + const int num, + const int inC, + const int inH, + const int inW, + const int argType) { + size_t nth = num * inC * inH * inW; + int blockSize = 1024; + int gridSize = (nth + 1024 - 1) / 1024; + KeNCHW2NHWC<<>>( + outputs, inputs, inC, inH, inW, nth, argType); + CHECK_SYNC("NCHW2NHWC"); +} + +__global__ void KeNHWC2NCHW(real* outputs, + const real* inputs, + int inH, + int inW, + int inC, + int nthreads, + int argType) { + const int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx < nthreads) { + const int c = idx % inC; + const int w = (idx / inC) % inW; + const int h = (idx / inC / inW) % inH; + const int n = idx / inW / inH / inC; + + const int off = ((n * inC + c) * inH + h) * inW + w; + if (argType == ADD_TO) { + outputs[off] += inputs[idx]; + } else { + outputs[off] = inputs[idx]; + } + } +} + +template <> +void NHWC2NCHW(real* outputs, + const real* inputs, + const int num, + const int inH, + const int inW, + const int inC, + const int argType) { + int nth = num * inC * inH * inW; + int blockSize = 1024; + int gridSize = (nth + 1024 - 1) / 1024; + KeNHWC2NCHW<<>>( + outputs, inputs, inH, inW, inC, nth, argType); + CHECK_SYNC("NHWC2NCHW"); +} + +} // namespace paddle diff --git a/paddle/function/SwitchOpTest.cpp b/paddle/function/SwitchOpTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..03b0dd66ddcbab713969ed747601ecb1b2eb7955 --- /dev/null +++ b/paddle/function/SwitchOpTest.cpp @@ -0,0 +1,44 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "FunctionTest.h" + +namespace paddle { + +TEST(Pad, real) { + for (size_t numSamples : {1, 4, 8, 16}) { + for (size_t channels : {1, 4, 8, 16}) { + for (size_t imgSizeH : {1, 4, 8, 16}) { + for (size_t imgSizeW : {1, 4, 8, 16}) { + VLOG(3) << " numSamples=" << numSamples << " channels=" << channels + << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW; + for (bool test_grad : {true, false}) { + CpuGpuFuncCompare compare(test_grad ? "NHWC2NCHW" : "NCHW2NHWC", + FuncConfig()); + TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW}; + TensorShape outDims{numSamples, imgSizeH, imgSizeW, channels}; + compare.addInputs( + BufferArg(VALUE_TYPE_FLOAT, test_grad ? outDims : inDims)); + compare.addOutputs(BufferArg( + VALUE_TYPE_FLOAT, test_grad ? inDims : outDims, ASSIGN_TO)); + compare.run(); + } + } + } + } + } +} + +} // namespace paddle diff --git a/paddle/function/neon/NeonDepthwiseConv.cpp b/paddle/function/neon/NeonDepthwiseConv.cpp index f09e98587d1681d29a79a9cb0303c2d4356c6935..18126152ea0b4ebfe4ec5c8084479787814ed173 100644 --- a/paddle/function/neon/NeonDepthwiseConv.cpp +++ b/paddle/function/neon/NeonDepthwiseConv.cpp @@ -12,468 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "neon_util.h" +#include "NeonDepthwiseConv.h" #include "paddle/function/ConvOp.h" -#include "paddle/function/Im2Col.h" namespace paddle { -namespace neon { - #if defined(__ARM_NEON__) || defined(__ARM_NEON) -template -struct DepthwiseConvKernel {}; - -inline float32_t conv3x3(float32x4_t r0, - float32x4_t r1, - float32x4_t r2, - float32x4_t k0, - float32x4_t k1, - float32x4_t k2) { - float32x4_t tmp; - tmp = vmulq_f32(r0, k0); - tmp = vmlaq_f32(tmp, r1, k1); - tmp = vmlaq_f32(tmp, r2, k2); - return vaddvq_f32(tmp); -} - -inline float32_t conv4x4(float32x4_t r0, - float32x4_t r1, - float32x4_t r2, - float32x4_t r3, - float32x4_t k0, - float32x4_t k1, - float32x4_t k2, - float32x4_t k3) { - float32x4_t tmp; - tmp = vmulq_f32(r0, k0); - tmp = vmlaq_f32(tmp, r1, k1); - tmp = vmlaq_f32(tmp, r2, k2); - tmp = vmlaq_f32(tmp, r3, k3); - return vaddvq_f32(tmp); -} - -/** - * Each step calculates four elements of the output. - * First step: - * R0[0, 1, 2, 3...] * K[0][0] - * R0[1, 2, 3, 4...] * K[0][1] - * R0[2, 3, 4, 5...] * K[0][2] - * R1[0, 1, 2, 3...] * K[1][0] - * R1[1, 2, 3, 4...] * K[1][1] - * R1[2, 3, 4, 5...] * K[1][2] - * R2[0, 1, 2, 3...] * K[2][0] - * R2[1, 2, 3, 4...] * K[2][1] - * + R2[2, 3, 4, 5...] * K[2][2] - * ------------------------------ - * Output[0, 1, 2, 3] - */ -template <> -struct DepthwiseConvKernel<3, 1> { - static void run(const float* inputData, - const float* filterData, - int inputHeight, - int inputWidth, - int outputChannels, - int outputHeight, - int outputWidth, - int filterMultiplier, - float* outputData) { - const int steps = outputWidth >> 2; - const int remain = outputWidth & 3; - for (int c = 0; c < outputChannels; c++, filterData += 9) { - // Load the filters - float32x4_t k[3]; - k[0] = vld1q_f32(filterData); - k[1] = vld1q_f32(filterData + 3); - k[2] = vld1q_f32(filterData + 6); - k[0] = vsetq_lane_f32(0.f, k[0], 3); - k[1] = vsetq_lane_f32(0.f, k[1], 3); - k[2] = vsetq_lane_f32(0.f, k[2], 3); - - const float* r0 = - inputData + (c / filterMultiplier) * (inputHeight * inputWidth); - const float* r1 = r0 + inputWidth; - const float* r2 = r0 + inputWidth * 2; - float32x4_t input[3][3]; - for (int h = 0; h < outputHeight; h++) { - for (int s = 0; s < steps; s++) { - // Load the inputs - float32x4_t tmp; - input[0][0] = vld1q_f32(r0); - tmp = vld1q_f32(r0 + 4); - input[0][1] = vextq_f32(input[0][0], tmp, 1); - input[0][2] = vextq_f32(input[0][0], tmp, 2); - input[1][0] = vld1q_f32(r1); - tmp = vld1q_f32(r1 + 4); - input[1][1] = vextq_f32(input[1][0], tmp, 1); - input[1][2] = vextq_f32(input[1][0], tmp, 2); - input[2][0] = vld1q_f32(r2); - tmp = vld1q_f32(r2 + 4); - input[2][1] = vextq_f32(input[2][0], tmp, 1); - input[2][2] = vextq_f32(input[2][0], tmp, 2); - - float32x4_t tmp1 = vdupq_n_f32(0.f); - float32x4_t tmp2 = vdupq_n_f32(0.f); - tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2); - tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0); - tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1); - tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2); - tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2); - tmp1 = vaddq_f32(tmp1, tmp2); - - vst1q_f32(outputData, tmp1); - r0 += 4; - r1 += 4; - r2 += 4; - outputData += 4; - } - - for (int r = 0; r < remain; r++) { - float32x4_t i0 = vld1q_f32(r0); - float32x4_t i1 = vld1q_f32(r1); - float32x4_t i2 = vld1q_f32(r2); - *outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]); - r0++; - r1++; - r2++; - outputData++; - } - - r0 += 2; - r1 += 2; - r2 += 2; - } - } - } -}; - -/** - * Each step calculates four elements of the output. - * First step: - * R0[0, 2, 4, 6...] * K[0][0] - * R0[1, 3, 5, 7...] * K[0][1] - * R0[2, 4, 6, 8...] * K[0][2] - * R1[0, 2, 4, 6...] * K[1][0] - * R1[1, 3, 5, 7...] * K[1][1] - * R1[2, 4, 6, 8...] * K[1][2] - * R2[0, 2, 4, 6...] * K[2][0] - * R2[1, 3, 5, 7...] * K[2][1] - * R2[2, 4, 6, 8...] * K[2][2] - * ------------------------------ - * Output[0, 1, 2, 3] - */ -template <> -struct DepthwiseConvKernel<3, 2> { - static void run(const float* inputData, - const float* filterData, - int inputHeight, - int inputWidth, - int outputChannels, - int outputHeight, - int outputWidth, - int filterMultiplier, - float* outputData) { - const int steps = outputWidth >> 2; - const int remain = outputWidth & 3; - for (int c = 0; c < outputChannels; c++, filterData += 9) { - // Load the filters - float32x4_t k[3]; - k[0] = vld1q_f32(filterData); - k[1] = vld1q_f32(filterData + 3); - k[2] = vld1q_f32(filterData + 6); - k[0] = vsetq_lane_f32(0.f, k[0], 3); - k[1] = vsetq_lane_f32(0.f, k[1], 3); - k[2] = vsetq_lane_f32(0.f, k[2], 3); - - const float* start = - inputData + (c / filterMultiplier) * (inputHeight * inputWidth); - float32x4_t input[3][3]; - for (int h = 0; h < outputHeight; h++) { - const float* r0 = start + 2 * h * inputWidth; - const float* r1 = start + (2 * h + 1) * inputWidth; - const float* r2 = start + (2 * h + 2) * inputWidth; - for (int s = 0; s < steps; s++) { - // Load the inputs - float32x4_t data1; - float32x4x2_t data2; - - data2 = vld2q_f32(r0); - input[0][0] = data2.val[0]; - input[0][1] = data2.val[1]; - data1 = vld1q_f32(r0 + 8); - input[0][2] = vextq_f32(data2.val[0], data1, 1); - - data2 = vld2q_f32(r1); - input[1][0] = data2.val[0]; - input[1][1] = data2.val[1]; - data1 = vld1q_f32(r1 + 8); - input[1][2] = vextq_f32(data2.val[0], data1, 1); - - data2 = vld2q_f32(r2); - input[2][0] = data2.val[0]; - input[2][1] = data2.val[1]; - data1 = vld1q_f32(r2 + 8); - input[2][2] = vextq_f32(data2.val[0], data1, 1); - - float32x4_t tmp1 = vdupq_n_f32(0.f); - float32x4_t tmp2 = vdupq_n_f32(0.f); - tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2); - tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0); - tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1); - tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2); - tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2); - tmp1 = vaddq_f32(tmp1, tmp2); - - vst1q_f32(outputData, tmp1); - r0 += 8; - r1 += 8; - r2 += 8; - outputData += 4; - } - - for (int r = 0; r < remain; r++) { - float32x4_t i0 = vld1q_f32(r0); - float32x4_t i1 = vld1q_f32(r1); - float32x4_t i2 = vld1q_f32(r2); - *outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]); - r0 += 2; - r1 += 2; - r2 += 2; - outputData++; - } - } - } - } -}; - -/** - * Each step calculates four elements of the output. - */ -template <> -struct DepthwiseConvKernel<4, 1> { - static void run(const float* inputData, - const float* filterData, - int inputHeight, - int inputWidth, - int outputChannels, - int outputHeight, - int outputWidth, - int filterMultiplier, - float* outputData) { - const int steps = outputWidth >> 2; - const int remain = outputWidth & 3; - for (int c = 0; c < outputChannels; c++, filterData += 16) { - // Load the filters - float32x4_t k[4]; - k[0] = vld1q_f32(filterData); - k[1] = vld1q_f32(filterData + 4); - k[2] = vld1q_f32(filterData + 8); - k[3] = vld1q_f32(filterData + 12); - - const float* r0 = - inputData + (c / filterMultiplier) * (inputHeight * inputWidth); - const float* r1 = r0 + inputWidth; - const float* r2 = r0 + inputWidth * 2; - const float* r3 = r0 + inputWidth * 3; - float32x4_t input[4][4]; - for (int h = 0; h < outputHeight; h++) { - for (int s = 0; s < steps; s++) { - // Load the inputs - float32x4_t tmp; - input[0][0] = vld1q_f32(r0); - tmp = vld1q_f32(r0 + 4); - input[0][1] = vextq_f32(input[0][0], tmp, 1); - input[0][2] = vextq_f32(input[0][0], tmp, 2); - input[0][3] = vextq_f32(input[0][0], tmp, 3); - - input[1][0] = vld1q_f32(r1); - tmp = vld1q_f32(r1 + 4); - input[1][1] = vextq_f32(input[1][0], tmp, 1); - input[1][2] = vextq_f32(input[1][0], tmp, 2); - input[1][3] = vextq_f32(input[1][0], tmp, 3); - - input[2][0] = vld1q_f32(r2); - tmp = vld1q_f32(r2 + 4); - input[2][1] = vextq_f32(input[2][0], tmp, 1); - input[2][2] = vextq_f32(input[2][0], tmp, 2); - input[2][3] = vextq_f32(input[2][0], tmp, 3); - - input[3][0] = vld1q_f32(r3); - tmp = vld1q_f32(r3 + 4); - input[3][1] = vextq_f32(input[3][0], tmp, 1); - input[3][2] = vextq_f32(input[3][0], tmp, 2); - input[3][3] = vextq_f32(input[3][0], tmp, 3); - - float32x4_t tmp1 = vdupq_n_f32(0.f); - float32x4_t tmp2 = vdupq_n_f32(0.f); - tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2); - tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3); - tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2); - tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3); - tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2); - tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3); - tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2); - tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3); - tmp1 = vaddq_f32(tmp1, tmp2); - - vst1q_f32(outputData, tmp1); - r0 += 4; - r1 += 4; - r2 += 4; - r3 += 4; - outputData += 4; - } - - for (int r = 0; r < remain; r++) { - float32x4_t i0 = vld1q_f32(r0); - float32x4_t i1 = vld1q_f32(r1); - float32x4_t i2 = vld1q_f32(r2); - float32x4_t i3 = vld1q_f32(r3); - *outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]); - r0++; - r1++; - r2++; - r3++; - outputData++; - } - - r0 += 3; - r1 += 3; - r2 += 3; - r3 += 3; - } - } - } -}; - -/** - * Each step calculates four elements of the output. - */ -template <> -struct DepthwiseConvKernel<4, 2> { - static void run(const float* inputData, - const float* filterData, - int inputHeight, - int inputWidth, - int outputChannels, - int outputHeight, - int outputWidth, - int filterMultiplier, - float* outputData) { - const int steps = outputWidth >> 2; - const int remain = outputWidth & 3; - for (int c = 0; c < outputChannels; c++, filterData += 16) { - // Load the filters - float32x4_t k[4]; - k[0] = vld1q_f32(filterData); - k[1] = vld1q_f32(filterData + 4); - k[2] = vld1q_f32(filterData + 8); - k[3] = vld1q_f32(filterData + 12); - - const float* start = - inputData + (c / filterMultiplier) * (inputHeight * inputWidth); - float32x4_t input[4][4]; - for (int h = 0; h < outputHeight; h++) { - const float* r0 = start + 2 * h * inputWidth; - const float* r1 = start + (2 * h + 1) * inputWidth; - const float* r2 = start + (2 * h + 2) * inputWidth; - const float* r3 = start + (2 * h + 3) * inputWidth; - for (int s = 0; s < steps; s++) { - // Load the inputs - float32x4x2_t data1; - float32x4x2_t data2; - - data1 = vld2q_f32(r0); - data2 = vld2q_f32(r0 + 8); - input[0][0] = data1.val[0]; - input[0][1] = data1.val[1]; - input[0][2] = vextq_f32(data1.val[0], data2.val[0], 1); - input[0][3] = vextq_f32(data1.val[1], data2.val[1], 1); - - data1 = vld2q_f32(r1); - data2 = vld2q_f32(r1 + 8); - input[1][0] = data1.val[0]; - input[1][1] = data1.val[1]; - input[1][2] = vextq_f32(data1.val[0], data2.val[0], 1); - input[1][3] = vextq_f32(data1.val[1], data2.val[1], 1); - - data1 = vld2q_f32(r2); - data2 = vld2q_f32(r2 + 8); - input[2][0] = data1.val[0]; - input[2][1] = data1.val[1]; - input[2][2] = vextq_f32(data1.val[0], data2.val[0], 1); - input[2][3] = vextq_f32(data1.val[1], data2.val[1], 1); - - data1 = vld2q_f32(r3); - data2 = vld2q_f32(r3 + 8); - input[3][0] = data1.val[0]; - input[3][1] = data1.val[1]; - input[3][2] = vextq_f32(data1.val[0], data2.val[0], 1); - input[3][3] = vextq_f32(data1.val[1], data2.val[1], 1); - - float32x4_t tmp1 = vdupq_n_f32(0.f); - float32x4_t tmp2 = vdupq_n_f32(0.f); - tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2); - tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3); - tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2); - tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3); - tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2); - tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3); - tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0); - tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1); - tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2); - tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3); - tmp1 = vaddq_f32(tmp1, tmp2); - - vst1q_f32(outputData, tmp1); - r0 += 8; - r1 += 8; - r2 += 8; - r3 += 8; - outputData += 4; - } - - for (int r = 0; r < remain; r++) { - float32x4_t i0 = vld1q_f32(r0); - float32x4_t i1 = vld1q_f32(r1); - float32x4_t i2 = vld1q_f32(r2); - float32x4_t i3 = vld1q_f32(r3); - *outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]); - r0 += 2; - r1 += 2; - r2 += 2; - r3 += 2; - outputData++; - } - } - } - } -}; - template class NeonDepthwiseConvFunction : public ConvFunctionBase { public: @@ -497,16 +42,16 @@ public: const TensorShape& filter = inputs[1].shape(); const TensorShape& output = outputs[0].shape(); - size_t batchSize = input[0]; - size_t inputChannels = input[1]; - size_t inputHeight = input[2]; - size_t inputWidth = input[3]; - size_t filterHeight = getFilterHeight(filter); - size_t filterWidth = getFilterWidth(filter); - size_t outputChannels = output[1]; - size_t outputHeight = output[2]; - size_t outputWidth = output[3]; - size_t filterMultiplier = outputChannels / groups_; + int batchSize = input[0]; + int inputChannels = input[1]; + int inputHeight = input[2]; + int inputWidth = input[3]; + int filterHeight = getFilterHeight(filter); + int filterWidth = getFilterWidth(filter); + int outputChannels = output[1]; + int outputHeight = output[2]; + int outputWidth = output[3]; + int filterMultiplier = outputChannels / groups_; CHECK_EQ(inputChannels, groups_); // only support strideH() == strideW() and filterHeight == filterWidth. @@ -519,22 +64,19 @@ public: // padding the input float* inputPadding = inputData; + int padInputHeight = inputHeight + 2 * paddingH(); + int padInputWidth = inputWidth + 2 * paddingW(); if (paddingH() > 0 || paddingW() > 0) { - int newSize = batchSize * inputChannels * (inputHeight + 2 * paddingH()) * - (inputWidth + 2 * paddingW()); + int newSize = batchSize * inputChannels * padInputHeight * padInputWidth; resizeBuffer(newSize); inputPadding = reinterpret_cast(memory_->getBuf()); - Padding::run(inputData, - inputPadding, - batchSize * inputChannels, - inputHeight, - inputWidth, - paddingH(), - paddingW()); - - // height and width of padding data - inputHeight += 2 * paddingH(); - inputWidth += 2 * paddingW(); + neon::Padding::run(inputData, + inputPadding, + batchSize * inputChannels, + inputHeight, + inputWidth, + padInputHeight, + padInputWidth); } std::function::run; + DepthWiseConv = neon::DepthwiseConvKernel<3, 1>::run; } else if (filterWidth == 3 && strideW() == 2) { - DepthWiseConv = DepthwiseConvKernel<3, 2>::run; + DepthWiseConv = neon::DepthwiseConvKernel<3, 2>::run; } else if (filterWidth == 4 && strideW() == 1) { - DepthWiseConv = DepthwiseConvKernel<4, 1>::run; + DepthWiseConv = neon::DepthwiseConvKernel<4, 1>::run; } else if (filterWidth == 4 && strideW() == 2) { - DepthWiseConv = DepthwiseConvKernel<4, 2>::run; + DepthWiseConv = neon::DepthwiseConvKernel<4, 2>::run; } else { LOG(FATAL) << "Not supported"; } - for (size_t i = 0; i < batchSize; i++) { + for (int i = 0; i < batchSize; i++) { DepthWiseConv(inputPadding, filterData, - inputHeight, - inputWidth, + padInputHeight, + padInputWidth, outputChannels, outputHeight, outputWidth, filterMultiplier, outputData); - inputPadding += inputChannels * inputHeight * inputWidth; + inputPadding += inputChannels * padInputHeight * padInputWidth; outputData += outputChannels * outputHeight * outputWidth; } } }; +#ifndef PADDLE_TYPE_DOUBLE REGISTER_TYPED_FUNC(NeonDepthwiseConv, CPU, NeonDepthwiseConvFunction); +#endif #endif -} // namespace neon } // namespace paddle diff --git a/paddle/function/neon/NeonDepthwiseConv.h b/paddle/function/neon/NeonDepthwiseConv.h new file mode 100644 index 0000000000000000000000000000000000000000..aefeea78badbca3d0d09e292e4e1e148618f8ac6 --- /dev/null +++ b/paddle/function/neon/NeonDepthwiseConv.h @@ -0,0 +1,631 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "neon_util.h" + +namespace paddle { + +namespace neon { + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) + +template +struct DepthwiseConvKernel {}; + +inline float32_t conv3x3(float32x4_t r0, + float32x4_t r1, + float32x4_t r2, + float32x4_t k0, + float32x4_t k1, + float32x4_t k2) { + float32x4_t tmp; + tmp = vmulq_f32(r0, k0); + tmp = vmlaq_f32(tmp, r1, k1); + tmp = vmlaq_f32(tmp, r2, k2); + return vaddvq_f32(tmp); +} + +inline float32_t conv4x4(float32x4_t r0, + float32x4_t r1, + float32x4_t r2, + float32x4_t r3, + float32x4_t k0, + float32x4_t k1, + float32x4_t k2, + float32x4_t k3) { + float32x4_t tmp; + tmp = vmulq_f32(r0, k0); + tmp = vmlaq_f32(tmp, r1, k1); + tmp = vmlaq_f32(tmp, r2, k2); + tmp = vmlaq_f32(tmp, r3, k3); + return vaddvq_f32(tmp); +} + +/** + * Each step calculates four elements of the output. + * First step: + * R0[0, 1, 2, 3...] * K[0][0] + * R0[1, 2, 3, 4...] * K[0][1] + * R0[2, 3, 4, 5...] * K[0][2] + * R1[0, 1, 2, 3...] * K[1][0] + * R1[1, 2, 3, 4...] * K[1][1] + * R1[2, 3, 4, 5...] * K[1][2] + * R2[0, 1, 2, 3...] * K[2][0] + * R2[1, 2, 3, 4...] * K[2][1] + * + R2[2, 3, 4, 5...] * K[2][2] + * ------------------------------ + * Output[0, 1, 2, 3] + */ +template <> +struct DepthwiseConvKernel<3, 1> { + static void run(const float* inputData, + const float* filterData, + int inputHeight, + int inputWidth, + int outputChannels, + int outputHeight, + int outputWidth, + int filterMultiplier, + float* outputData) { + const int steps = outputWidth >> 2; + const int remain = outputWidth & 3; + for (int c = 0; c < outputChannels; c++, filterData += 9) { + // Load the filters + float32x4_t k[3]; + k[0] = vld1q_f32(filterData); + k[1] = vld1q_f32(filterData + 3); + k[2] = vld1q_f32(filterData + 6); + k[0] = vsetq_lane_f32(0.f, k[0], 3); + k[1] = vsetq_lane_f32(0.f, k[1], 3); + k[2] = vsetq_lane_f32(0.f, k[2], 3); + + const float* r0 = + inputData + (c / filterMultiplier) * (inputHeight * inputWidth); + const float* r1 = r0 + inputWidth; + const float* r2 = r0 + inputWidth * 2; + float32x4_t input[3][3]; + for (int h = 0; h < outputHeight; h++) { + for (int s = 0; s < steps; s++) { + // Load the inputs + float32x4_t tmp; + input[0][0] = vld1q_f32(r0); + tmp = vld1q_f32(r0 + 4); + input[0][1] = vextq_f32(input[0][0], tmp, 1); + input[0][2] = vextq_f32(input[0][0], tmp, 2); + input[1][0] = vld1q_f32(r1); + tmp = vld1q_f32(r1 + 4); + input[1][1] = vextq_f32(input[1][0], tmp, 1); + input[1][2] = vextq_f32(input[1][0], tmp, 2); + input[2][0] = vld1q_f32(r2); + tmp = vld1q_f32(r2 + 4); + input[2][1] = vextq_f32(input[2][0], tmp, 1); + input[2][2] = vextq_f32(input[2][0], tmp, 2); + + float32x4_t tmp1 = vdupq_n_f32(0.f); + float32x4_t tmp2 = vdupq_n_f32(0.f); + tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2); + tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0); + tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1); + tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2); + tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2); + tmp1 = vaddq_f32(tmp1, tmp2); + + vst1q_f32(outputData, tmp1); + r0 += 4; + r1 += 4; + r2 += 4; + outputData += 4; + } + + for (int r = 0; r < remain; r++) { + float32x4_t i0 = vld1q_f32(r0); + float32x4_t i1 = vld1q_f32(r1); + float32x4_t i2 = vld1q_f32(r2); + *outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]); + r0++; + r1++; + r2++; + outputData++; + } + + r0 += 2; + r1 += 2; + r2 += 2; + } + } + } +}; + +/** + * Each step calculates four elements of the output. + * First step: + * R0[0, 2, 4, 6...] * K[0][0] + * R0[1, 3, 5, 7...] * K[0][1] + * R0[2, 4, 6, 8...] * K[0][2] + * R1[0, 2, 4, 6...] * K[1][0] + * R1[1, 3, 5, 7...] * K[1][1] + * R1[2, 4, 6, 8...] * K[1][2] + * R2[0, 2, 4, 6...] * K[2][0] + * R2[1, 3, 5, 7...] * K[2][1] + * R2[2, 4, 6, 8...] * K[2][2] + * ------------------------------ + * Output[0, 1, 2, 3] + */ +template <> +struct DepthwiseConvKernel<3, 2> { + static void run(const float* inputData, + const float* filterData, + int inputHeight, + int inputWidth, + int outputChannels, + int outputHeight, + int outputWidth, + int filterMultiplier, + float* outputData) { + const int steps = outputWidth >> 2; + const int remain = outputWidth & 3; + for (int c = 0; c < outputChannels; c++, filterData += 9) { + // Load the filters + float32x4_t k[3]; + k[0] = vld1q_f32(filterData); + k[1] = vld1q_f32(filterData + 3); + k[2] = vld1q_f32(filterData + 6); + k[0] = vsetq_lane_f32(0.f, k[0], 3); + k[1] = vsetq_lane_f32(0.f, k[1], 3); + k[2] = vsetq_lane_f32(0.f, k[2], 3); + + const float* start = + inputData + (c / filterMultiplier) * (inputHeight * inputWidth); + float32x4_t input[3][3]; + for (int h = 0; h < outputHeight; h++) { + const float* r0 = start + 2 * h * inputWidth; + const float* r1 = start + (2 * h + 1) * inputWidth; + const float* r2 = start + (2 * h + 2) * inputWidth; + for (int s = 0; s < steps; s++) { + // Load the inputs + float32x4_t data1; + float32x4x2_t data2; + + data2 = vld2q_f32(r0); + input[0][0] = data2.val[0]; + input[0][1] = data2.val[1]; + data1 = vld1q_f32(r0 + 8); + input[0][2] = vextq_f32(data2.val[0], data1, 1); + + data2 = vld2q_f32(r1); + input[1][0] = data2.val[0]; + input[1][1] = data2.val[1]; + data1 = vld1q_f32(r1 + 8); + input[1][2] = vextq_f32(data2.val[0], data1, 1); + + data2 = vld2q_f32(r2); + input[2][0] = data2.val[0]; + input[2][1] = data2.val[1]; + data1 = vld1q_f32(r2 + 8); + input[2][2] = vextq_f32(data2.val[0], data1, 1); + + float32x4_t tmp1 = vdupq_n_f32(0.f); + float32x4_t tmp2 = vdupq_n_f32(0.f); + tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2); + tmp2 = vmlaq_laneq_f32(tmp2, input[1][0], k[1], 0); + tmp1 = vmlaq_laneq_f32(tmp1, input[1][1], k[1], 1); + tmp2 = vmlaq_laneq_f32(tmp2, input[1][2], k[1], 2); + tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2); + tmp1 = vaddq_f32(tmp1, tmp2); + + vst1q_f32(outputData, tmp1); + r0 += 8; + r1 += 8; + r2 += 8; + outputData += 4; + } + + for (int r = 0; r < remain; r++) { + float32x4_t i0 = vld1q_f32(r0); + float32x4_t i1 = vld1q_f32(r1); + float32x4_t i2 = vld1q_f32(r2); + *outputData = conv3x3(i0, i1, i2, k[0], k[1], k[2]); + r0 += 2; + r1 += 2; + r2 += 2; + outputData++; + } + } + } + } +}; + +/** + * Each step calculates four elements of the output. + */ +template <> +struct DepthwiseConvKernel<4, 1> { + static void run(const float* inputData, + const float* filterData, + int inputHeight, + int inputWidth, + int outputChannels, + int outputHeight, + int outputWidth, + int filterMultiplier, + float* outputData) { + const int steps = outputWidth >> 2; + const int remain = outputWidth & 3; + for (int c = 0; c < outputChannels; c++, filterData += 16) { + // Load the filters + float32x4_t k[4]; + k[0] = vld1q_f32(filterData); + k[1] = vld1q_f32(filterData + 4); + k[2] = vld1q_f32(filterData + 8); + k[3] = vld1q_f32(filterData + 12); + + const float* r0 = + inputData + (c / filterMultiplier) * (inputHeight * inputWidth); + const float* r1 = r0 + inputWidth; + const float* r2 = r0 + inputWidth * 2; + const float* r3 = r0 + inputWidth * 3; + float32x4_t input[4][4]; + for (int h = 0; h < outputHeight; h++) { + for (int s = 0; s < steps; s++) { + // Load the inputs + float32x4_t tmp; + input[0][0] = vld1q_f32(r0); + tmp = vld1q_f32(r0 + 4); + input[0][1] = vextq_f32(input[0][0], tmp, 1); + input[0][2] = vextq_f32(input[0][0], tmp, 2); + input[0][3] = vextq_f32(input[0][0], tmp, 3); + + input[1][0] = vld1q_f32(r1); + tmp = vld1q_f32(r1 + 4); + input[1][1] = vextq_f32(input[1][0], tmp, 1); + input[1][2] = vextq_f32(input[1][0], tmp, 2); + input[1][3] = vextq_f32(input[1][0], tmp, 3); + + input[2][0] = vld1q_f32(r2); + tmp = vld1q_f32(r2 + 4); + input[2][1] = vextq_f32(input[2][0], tmp, 1); + input[2][2] = vextq_f32(input[2][0], tmp, 2); + input[2][3] = vextq_f32(input[2][0], tmp, 3); + + input[3][0] = vld1q_f32(r3); + tmp = vld1q_f32(r3 + 4); + input[3][1] = vextq_f32(input[3][0], tmp, 1); + input[3][2] = vextq_f32(input[3][0], tmp, 2); + input[3][3] = vextq_f32(input[3][0], tmp, 3); + + float32x4_t tmp1 = vdupq_n_f32(0.f); + float32x4_t tmp2 = vdupq_n_f32(0.f); + tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2); + tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3); + tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2); + tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3); + tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2); + tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3); + tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2); + tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3); + tmp1 = vaddq_f32(tmp1, tmp2); + + vst1q_f32(outputData, tmp1); + r0 += 4; + r1 += 4; + r2 += 4; + r3 += 4; + outputData += 4; + } + + for (int r = 0; r < remain; r++) { + float32x4_t i0 = vld1q_f32(r0); + float32x4_t i1 = vld1q_f32(r1); + float32x4_t i2 = vld1q_f32(r2); + float32x4_t i3 = vld1q_f32(r3); + *outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]); + r0++; + r1++; + r2++; + r3++; + outputData++; + } + + r0 += 3; + r1 += 3; + r2 += 3; + r3 += 3; + } + } + } +}; + +/** + * Each step calculates four elements of the output. + */ +template <> +struct DepthwiseConvKernel<4, 2> { + static void run(const float* inputData, + const float* filterData, + int inputHeight, + int inputWidth, + int outputChannels, + int outputHeight, + int outputWidth, + int filterMultiplier, + float* outputData) { + const int steps = outputWidth >> 2; + const int remain = outputWidth & 3; + for (int c = 0; c < outputChannels; c++, filterData += 16) { + // Load the filters + float32x4_t k[4]; + k[0] = vld1q_f32(filterData); + k[1] = vld1q_f32(filterData + 4); + k[2] = vld1q_f32(filterData + 8); + k[3] = vld1q_f32(filterData + 12); + + const float* start = + inputData + (c / filterMultiplier) * (inputHeight * inputWidth); + float32x4_t input[4][4]; + for (int h = 0; h < outputHeight; h++) { + const float* r0 = start + 2 * h * inputWidth; + const float* r1 = start + (2 * h + 1) * inputWidth; + const float* r2 = start + (2 * h + 2) * inputWidth; + const float* r3 = start + (2 * h + 3) * inputWidth; + for (int s = 0; s < steps; s++) { + // Load the inputs + float32x4x2_t data1; + float32x4x2_t data2; + + data1 = vld2q_f32(r0); + data2 = vld2q_f32(r0 + 8); + input[0][0] = data1.val[0]; + input[0][1] = data1.val[1]; + input[0][2] = vextq_f32(data1.val[0], data2.val[0], 1); + input[0][3] = vextq_f32(data1.val[1], data2.val[1], 1); + + data1 = vld2q_f32(r1); + data2 = vld2q_f32(r1 + 8); + input[1][0] = data1.val[0]; + input[1][1] = data1.val[1]; + input[1][2] = vextq_f32(data1.val[0], data2.val[0], 1); + input[1][3] = vextq_f32(data1.val[1], data2.val[1], 1); + + data1 = vld2q_f32(r2); + data2 = vld2q_f32(r2 + 8); + input[2][0] = data1.val[0]; + input[2][1] = data1.val[1]; + input[2][2] = vextq_f32(data1.val[0], data2.val[0], 1); + input[2][3] = vextq_f32(data1.val[1], data2.val[1], 1); + + data1 = vld2q_f32(r3); + data2 = vld2q_f32(r3 + 8); + input[3][0] = data1.val[0]; + input[3][1] = data1.val[1]; + input[3][2] = vextq_f32(data1.val[0], data2.val[0], 1); + input[3][3] = vextq_f32(data1.val[1], data2.val[1], 1); + + float32x4_t tmp1 = vdupq_n_f32(0.f); + float32x4_t tmp2 = vdupq_n_f32(0.f); + tmp1 = vmlaq_laneq_f32(tmp1, input[0][0], k[0], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[0][1], k[0], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[0][2], k[0], 2); + tmp2 = vmlaq_laneq_f32(tmp2, input[0][3], k[0], 3); + tmp1 = vmlaq_laneq_f32(tmp1, input[1][0], k[1], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[1][1], k[1], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[1][2], k[1], 2); + tmp2 = vmlaq_laneq_f32(tmp2, input[1][3], k[1], 3); + tmp1 = vmlaq_laneq_f32(tmp1, input[2][0], k[2], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[2][1], k[2], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[2][2], k[2], 2); + tmp2 = vmlaq_laneq_f32(tmp2, input[2][3], k[2], 3); + tmp1 = vmlaq_laneq_f32(tmp1, input[3][0], k[3], 0); + tmp2 = vmlaq_laneq_f32(tmp2, input[3][1], k[3], 1); + tmp1 = vmlaq_laneq_f32(tmp1, input[3][2], k[3], 2); + tmp2 = vmlaq_laneq_f32(tmp2, input[3][3], k[3], 3); + tmp1 = vaddq_f32(tmp1, tmp2); + + vst1q_f32(outputData, tmp1); + r0 += 8; + r1 += 8; + r2 += 8; + r3 += 8; + outputData += 4; + } + + for (int r = 0; r < remain; r++) { + float32x4_t i0 = vld1q_f32(r0); + float32x4_t i1 = vld1q_f32(r1); + float32x4_t i2 = vld1q_f32(r2); + float32x4_t i3 = vld1q_f32(r3); + *outputData = conv4x4(i0, i1, i2, i3, k[0], k[1], k[2], k[3]); + r0 += 2; + r1 += 2; + r2 += 2; + r3 += 2; + outputData++; + } + } + } + } +}; + +template +struct Padding { + static void run(const T* input, + T* inputPadding, + int channels, + int inputHeight, + int inputWidth, + int padInputHeight, + int padInputWidth) { + const int paddingHeight = (padInputHeight - inputHeight) / 2; + const int paddingWidth = (padInputWidth - inputWidth) / 2; + for (int c = 0; c < channels; c++) { + if (paddingHeight > 0) { + memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(T)); + inputPadding += padInputWidth * paddingHeight; + } + + for (int i = 0; i < inputHeight; i++) { + // padding head + for (int j = 0; j < paddingWidth; j++) { + *inputPadding++ = T(0); + } + + memcpy(inputPadding, input, inputWidth * sizeof(T)); + inputPadding += inputWidth; + input += inputWidth; + + // padding tail + for (int j = 0; j < paddingWidth; j++) { + *inputPadding++ = T(0); + } + } + + if (paddingHeight > 0) { + memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(T)); + inputPadding += padInputWidth * paddingHeight; + } + } + } +}; + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) +template <> +struct Padding { + static void run(const float* input, + float* inputPadding, + int channels, + int inputHeight, + int inputWidth, + int padInputHeight, + int padInputWidth) { + const int paddingHeight = (padInputHeight - inputHeight) / 2; + const int paddingWidth = (padInputWidth - inputWidth) / 2; + for (int c = 0; c < channels; c++) { + if (paddingHeight > 0) { + memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float)); + inputPadding += padInputWidth * paddingHeight; + } + + for (int i = 0; i < inputHeight; i++) { + // padding head + for (int j = 0; j < paddingWidth; j++) { + *inputPadding++ = float(0); + } + + int step = inputWidth >> 2; + int remain = inputWidth & 3; + for (int s = 0; s < step; s++) { + float32x4_t s0 = vld1q_f32(input); + vst1q_f32(inputPadding, s0); + input += 4; + inputPadding += 4; + } + for (int r = 0; r < remain; r++) { + *inputPadding++ = *input++; + } + + // padding tail + for (int j = 0; j < paddingWidth; j++) { + *inputPadding++ = float(0); + } + } + + if (paddingHeight > 0) { + memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float)); + inputPadding += padInputWidth * paddingHeight; + } + } + } +}; + +// for stride is 2 +struct StridePadding { + static void run(const float* input, + float* inputPadding, + int channels, + int inputHeight, + int inputWidth, + int padInputHeight, + int padInputWidth) { + const int paddingHeight = (padInputHeight - (inputHeight * 2 - 1)) / 2; + const int paddingWidth = (padInputWidth - (inputWidth * 2 - 1)) / 2; + for (int c = 0; c < channels; c++) { + if (paddingHeight > 0) { + memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float)); + inputPadding += padInputWidth * paddingHeight; + } + + for (int i = 0; i < inputHeight; i++) { + // padding head + for (int j = 0; j < paddingWidth; j++) { + *inputPadding++ = float(0); + } + + int step = inputWidth >> 2; + int remain = inputWidth & 3; + float32x4_t s1 = vdupq_n_f32(0.f); + for (int s = 0; s < step; s++) { + float32x4_t s0 = vld1q_f32(input); + float32x4x2_t v = {s0, s1}; + vst2q_f32(inputPadding, v); + input += 4; + inputPadding += 8; + } + for (int r = 0; r < remain; r++) { + *inputPadding++ = *input++; + *inputPadding++ = float(0); + } + inputPadding--; + + // padding tail + for (int j = 0; j < paddingWidth; j++) { + *inputPadding++ = float(0); + } + if (i != inputHeight - 1) { + memset(inputPadding, 0, padInputWidth * sizeof(float)); + inputPadding += padInputWidth; + } + } + + if (paddingHeight > 0) { + memset(inputPadding, 0, padInputWidth * paddingHeight * sizeof(float)); + inputPadding += padInputWidth * paddingHeight; + } + } + } +}; + +#endif + +#endif + +} // namespace neon +} // namespace paddle diff --git a/paddle/function/neon/NeonDepthwiseConvTranspose.cpp b/paddle/function/neon/NeonDepthwiseConvTranspose.cpp new file mode 100644 index 0000000000000000000000000000000000000000..49ca4bc8a0947ba329bd991e9f7d001623901a67 --- /dev/null +++ b/paddle/function/neon/NeonDepthwiseConvTranspose.cpp @@ -0,0 +1,136 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "NeonDepthwiseConv.h" +#include "paddle/function/ConvOp.h" + +namespace paddle { + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) + +template +class NeonDepthwiseConvTransposeFunction : public ConvFunctionBase { +public: + void init(const FuncConfig& config) override { + ConvFunctionBase::init(config); + } + + void check(const BufferArgs& inputs, const BufferArgs& outputs) override { + const TensorShape& input = inputs[0].shape(); + const TensorShape& filter = inputs[1].shape(); + const TensorShape& output = outputs[0].shape(); + checkShape(input, filter, output); + } + + void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { + CHECK_EQ(numInputs_, inputs.size()); + CHECK_EQ(numOutputs_, outputs.size()); + check(inputs, outputs); + + const TensorShape& input = inputs[0].shape(); + const TensorShape& filter = inputs[1].shape(); + const TensorShape& output = outputs[0].shape(); + + int batchSize = input[0]; + int inputChannels = input[1]; + int inputHeight = input[2]; + int inputWidth = input[3]; + int filterHeight = getFilterHeight(filter); + int filterWidth = getFilterWidth(filter); + int outputChannels = output[1]; + int outputHeight = output[2]; + int outputWidth = output[3]; + int filterMultiplier = outputChannels / groups_; + CHECK_EQ(inputChannels, groups_); + + // only support strideH() == strideW() and filterHeight == filterWidth. + CHECK_EQ(strideH(), strideW()); + CHECK_EQ(paddingH(), paddingW()); + CHECK_EQ(filterHeight, filterWidth); + + float* inputData = inputs[0].data(); + float* filterData = inputs[1].data(); + float* outputData = outputs[0].data(); + + // padding the input, input -> inputPadding + float* inputPadding = inputData; + int padInputHeight = + (inputHeight - 1) * strideH() + 2 * filterHeight - 1 - 2 * paddingH(); + int padInputWidth = + (inputWidth - 1) * strideW() + 2 * filterWidth - 1 - 2 * paddingW(); + + if (padInputHeight > inputHeight || padInputWidth > inputWidth) { + int newSize = batchSize * inputChannels * padInputHeight * padInputWidth; + resizeBuffer(newSize); + inputPadding = reinterpret_cast(memory_->getBuf()); + if (strideH() == 1) { + neon::Padding::run(inputData, + inputPadding, + batchSize * inputChannels, + inputHeight, + inputWidth, + padInputHeight, + padInputWidth); + } else if (strideH() == 2) { + neon::StridePadding::run(inputData, + inputPadding, + batchSize * inputChannels, + inputHeight, + inputWidth, + padInputHeight, + padInputWidth); + } else { + LOG(FATAL) << "Not supported"; + } + } + + std::function + DepthWiseConv; + + if (filterWidth == 3) { + DepthWiseConv = neon::DepthwiseConvKernel<3, 1>::run; + } else if (filterWidth == 4) { + DepthWiseConv = neon::DepthwiseConvKernel<4, 1>::run; + } else { + LOG(FATAL) << "Not supported"; + } + + for (int i = 0; i < batchSize; i++) { + DepthWiseConv(inputPadding, + filterData, + padInputHeight, + padInputWidth, + outputChannels, + outputHeight, + outputWidth, + filterMultiplier, + outputData); + inputPadding += inputChannels * padInputHeight * padInputWidth; + outputData += outputChannels * outputHeight * outputWidth; + } + } +}; + +#ifndef PADDLE_TYPE_DOUBLE + +REGISTER_TYPED_FUNC(NeonDepthwiseConvTranspose, + CPU, + NeonDepthwiseConvTransposeFunction); + +#endif + +#endif + +} // namespace paddle diff --git a/paddle/function/neon/neon_util.h b/paddle/function/neon/neon_util.h index 56b3febe2d27bb4fbf57e49079b3ad071d556914..e2db0450675084345ad55559d8988c5375801cc9 100644 --- a/paddle/function/neon/neon_util.h +++ b/paddle/function/neon/neon_util.h @@ -33,12 +33,8 @@ inline float32_t vaddvq_f32(float32x4_t a) { return vget_lane_f32(vpadd_f32(v, v), 0); } -inline float32x4_t vmlaq_laneq_f32(float32x4_t a, - float32x4_t b, - float32x4_t v, - const int lane) { - return vmlaq_n_f32(a, b, vgetq_lane_f32(v, lane)); -} +#define vmlaq_laneq_f32(a, b, v, lane) \ + vmlaq_n_f32(a, b, vgetq_lane_f32(v, lane)) #endif } // namespace neon diff --git a/paddle/gserver/evaluators/CTCErrorEvaluator.cpp b/paddle/gserver/evaluators/CTCErrorEvaluator.cpp index 132119015f967c6e8d055792de8afe8450df5ec6..92087fa32b1e48b50fbf447ec6f3c43e2a510220 100644 --- a/paddle/gserver/evaluators/CTCErrorEvaluator.cpp +++ b/paddle/gserver/evaluators/CTCErrorEvaluator.cpp @@ -14,18 +14,20 @@ limitations under the License. */ #include "Evaluator.h" #include "paddle/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/utils/StringUtil.h" namespace paddle { /** * calculate sequence-to-sequence edit distance */ -class CTCErrorEvaluator : public NotGetableEvaluator { +class CTCErrorEvaluator : public Evaluator { private: MatrixPtr outActivations_; int numTimes_, numClasses_, numSequences_, blank_; real deletions_, insertions_, substitutions_; int seqClassficationError_; + mutable std::unordered_map evalResults_; std::vector path2String(const std::vector& path) { std::vector str; @@ -183,6 +185,18 @@ private: return stringAlignment(gtStr, recogStr); } + void storeLocalValues() const { + evalResults_["error"] = numSequences_ ? totalScore_ / numSequences_ : 0; + evalResults_["deletion_error"] = + numSequences_ ? deletions_ / numSequences_ : 0; + evalResults_["insertion_error"] = + numSequences_ ? insertions_ / numSequences_ : 0; + evalResults_["substitution_error"] = + numSequences_ ? substitutions_ / numSequences_ : 0; + evalResults_["sequence_error"] = + (real)seqClassficationError_ / numSequences_; + } + public: CTCErrorEvaluator() : numTimes_(0), @@ -245,16 +259,12 @@ public: } virtual void printStats(std::ostream& os) const { - os << config_.name() << "=" - << (numSequences_ ? totalScore_ / numSequences_ : 0); - os << " deletions error" - << "=" << (numSequences_ ? deletions_ / numSequences_ : 0); - os << " insertions error" - << "=" << (numSequences_ ? insertions_ / numSequences_ : 0); - os << " substitutions error" - << "=" << (numSequences_ ? substitutions_ / numSequences_ : 0); - os << " sequences error" - << "=" << (real)seqClassficationError_ / numSequences_; + storeLocalValues(); + os << config_.name() << " error = " << evalResults_["error"]; + os << " deletions error = " << evalResults_["deletion_error"]; + os << " insertions error = " << evalResults_["insertion_error"]; + os << " substitution error = " << evalResults_["substitution_error"]; + os << " sequence error = " << evalResults_["sequence_error"]; } virtual void distributeEval(ParameterClient2* client) { @@ -272,6 +282,37 @@ public: seqClassficationError_ = (int)buf[4]; numSequences_ = (int)buf[5]; } + + void getNames(std::vector* names) { + storeLocalValues(); + names->reserve(names->size() + evalResults_.size()); + for (auto it = evalResults_.begin(); it != evalResults_.end(); ++it) { + names->push_back(config_.name() + "." + it->first); + } + } + + real getValue(const std::string& name, Error* err) const { + storeLocalValues(); + + std::vector buffers; + paddle::str::split(name, '.', &buffers); + auto it = evalResults_.find(buffers[buffers.size() - 1]); + + if (it == evalResults_.end()) { + *err = Error("Evaluator does not have the key %s", name.c_str()); + return 0.0f; + } + + return it->second; + } + + std::string getType(const std::string& name, Error* err) const { + this->getValue(name, err); + if (!err->isOK()) { + return ""; + } + return "ctc_edit_distance"; + } }; REGISTER_EVALUATOR(ctc_edit_distance, CTCErrorEvaluator); diff --git a/paddle/gserver/evaluators/ChunkEvaluator.cpp b/paddle/gserver/evaluators/ChunkEvaluator.cpp index 1658282f3a5f79b128ce8685e92fd5cf9db2e41a..a2ab15eedee4aaa7b47504d50e25300359f18173 100644 --- a/paddle/gserver/evaluators/ChunkEvaluator.cpp +++ b/paddle/gserver/evaluators/ChunkEvaluator.cpp @@ -268,7 +268,13 @@ public: } // get type of evaluator - std::string getTypeImpl() const { return "chunk"; } + std::string getType(const std::string& name, Error* err) const { + this->getValue(name, err); + if (!err->isOK()) { + return ""; + } + return "chunk"; + } private: void storeLocalValues() const { diff --git a/paddle/gserver/evaluators/Evaluator.h b/paddle/gserver/evaluators/Evaluator.h index b114500e2b7c1e460a02c78b99b5f1a8fb63b8c3..90203553e0a5fe8cc8183274f374da178bae30d0 100644 --- a/paddle/gserver/evaluators/Evaluator.h +++ b/paddle/gserver/evaluators/Evaluator.h @@ -211,6 +211,7 @@ public: *err = Error("Not implemented"); return .0f; } + std::string getType(const std::string& name, Error* err) const { *err = Error("Not implemented"); return ""; @@ -331,6 +332,7 @@ private: protected: std::string getTypeImpl() const; }; + /** * @brief precision, recall and f1 score Evaluator * \f[ @@ -358,6 +360,12 @@ public: virtual void distributeEval(ParameterClient2* client); + void getNames(std::vector* names); + + real getValue(const std::string& name, Error* err) const; + + std::string getType(const std::string& name, Error* err) const; + struct StatsInfo { /// numbers of true positives double TP; @@ -428,11 +436,6 @@ private: mutable std::unordered_map values_; void storeLocalValues() const; - // Evaluator interface -public: - void getNames(std::vector* names); - real getValue(const std::string& name, Error* err) const; - std::string getType(const std::string& name, Error* err) const; }; /* diff --git a/paddle/gserver/layers/BatchNormBaseLayer.cpp b/paddle/gserver/layers/BatchNormBaseLayer.cpp index 1ceaaaa206ee3cbc5421238574c7f310011ccaa5..f7a80e23e1bd49549bec57b360587adc6b423794 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.cpp +++ b/paddle/gserver/layers/BatchNormBaseLayer.cpp @@ -62,14 +62,18 @@ void BatchNormBaseLayer::calFeatureMapSize() { const ImageConfig& conf = config_.inputs(0).image_conf(); imageH_ = inputLayers_[0]->getOutput().getFrameHeight(); imageW_ = inputLayers_[0]->getOutput().getFrameWidth(); + imageD_ = inputLayers_[0]->getOutput().getFrameDepth(); + + if (0 == imageD_) imageD_ = conf.img_size_z(); if (imageH_ == 0 && imageW_ == 0) { imageH_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); imageW_ = conf.img_size(); } else { getOutput().setFrameHeight(imageH_); getOutput().setFrameWidth(imageW_); + getOutput().setFrameDepth(imageD_); } - imgPixels_ = imageH_ * imageW_; + imgPixels_ = imageH_ * imageW_ * imageD_; } } // namespace paddle diff --git a/paddle/gserver/layers/BatchNormBaseLayer.h b/paddle/gserver/layers/BatchNormBaseLayer.h index 230bafc31d96bbd49481a7ed135be6888688627e..e721d2d267a31cae46407673b8b1281e87055608 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.h +++ b/paddle/gserver/layers/BatchNormBaseLayer.h @@ -80,6 +80,7 @@ protected: /// Height or width of input image feature. /// Both of them are 1 if the input is fully-connected layer. + int imageD_; int imageH_; int imageW_; /// Height * Width. diff --git a/paddle/gserver/layers/Conv3DLayer.cpp b/paddle/gserver/layers/Conv3DLayer.cpp index 7cc9937cce37cbbc4640fbb88312841c23b757c0..9deda2de989a55d34510560c49b213ea1a52fd07 100644 --- a/paddle/gserver/layers/Conv3DLayer.cpp +++ b/paddle/gserver/layers/Conv3DLayer.cpp @@ -42,10 +42,10 @@ bool Conv3DLayer::init(const LayerMap &layerMap, if (sharedBiases_) { CHECK_EQ((size_t)numFilters_, biasParameter_->getSize()); biases_ = - std::unique_ptr(new Weight(1, numFilters_, biasParameter_)); + std::unique_ptr(new Weight(numFilters_, 1, biasParameter_)); } else { biases_ = - std::unique_ptr(new Weight(1, getSize(), biasParameter_)); + std::unique_ptr(new Weight(getSize(), 1, biasParameter_)); } } return true; @@ -83,8 +83,8 @@ void Conv3DLayer::forward(PassType passType) { int outWidth = getSize(); resetOutput(batchSize, outWidth); + REGISTER_TIMER_INFO("FwdConv3D", getName().c_str()); for (size_t i = 0; i != inputLayers_.size(); ++i) { - REGISTER_TIMER_INFO("FwdConv3D", getName().c_str()); const MatrixPtr &inMat = getInputValue(i); const MatrixPtr &outMat = getOutputValue(); int M = M_[i]; @@ -120,7 +120,6 @@ void Conv3DLayer::forward(PassType passType) { } } if (nullptr != this->biasParameter_) { - REGISTER_TIMER_INFO("FwBiasTimer", getName().c_str()); this->addBias(); } forwardActivation(); @@ -134,15 +133,14 @@ void Conv3DLayer::backward(const UpdateCallback &callback) { biases_->getParameterPtr()->incUpdate(callback); } + REGISTER_TIMER_INFO("BwdConv3D", getName().c_str()); for (size_t i = 0; i != inputLayers_.size(); ++i) { - REGISTER_TIMER_INFO("BwdConv3D", getName().c_str()); if (weights_[i]->getWGrad()) { bpropWeights(i); } if (getInputGrad(i)) { bpropData(i); } - REGISTER_TIMER_INFO("WeightUpdate", getName().c_str()); weights_[i]->getParameterPtr()->incUpdate(callback); } } @@ -224,20 +222,31 @@ void Conv3DLayer::bpropData(int i) { } void Conv3DLayer::bpropBiases() { + MatrixPtr biases = Matrix::create(biases_->getWGrad()->getData(), + 1, + biases_->getWGrad()->getElementCnt(), + false, + useGpu_); MatrixPtr outGradMat = getOutputGrad(); + if (this->sharedBiases_) { - biases_->getWGrad()->collectSharedBias(*outGradMat, 1.0f); + biases->collectSharedBias(*outGradMat, 1.0f); } else { - biases_->getWGrad()->collectBias(*outGradMat, 1.0f); + biases->collectBias(*outGradMat, 1.0f); } } void Conv3DLayer::addBias() { MatrixPtr outMat = getOutputValue(); + MatrixPtr bias = Matrix::create(biases_->getW()->getData(), + 1, + biases_->getW()->getElementCnt(), + false, + useGpu_); if (this->sharedBiases_) { - outMat->addSharedBias(*(biases_->getW()), 1.0f); + outMat->addSharedBias(*(bias), 1.0f); } else { - outMat->addBias(*(biases_->getW()), 1.0f); + outMat->addBias(*(bias), 1.0f); } } diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.cpp b/paddle/gserver/layers/CudnnBatchNormLayer.cpp index 44ba2c4b7d1562d2ce839b5f4b4de1af35e6925f..49a9540c0b6e36b59ed786287ff5c4569b69a6a5 100644 --- a/paddle/gserver/layers/CudnnBatchNormLayer.cpp +++ b/paddle/gserver/layers/CudnnBatchNormLayer.cpp @@ -37,7 +37,7 @@ bool CudnnBatchNormLayer::init(const LayerMap& layerMap, } void CudnnBatchNormLayer::reshape(int batchSize) { - hl_tensor_reshape(ioDesc_, batchSize, channels_, imageH_, imageW_); + hl_tensor_reshape(ioDesc_, batchSize, channels_, imageH_ * imageD_, imageW_); } void CudnnBatchNormLayer::forward(PassType passType) { @@ -104,7 +104,7 @@ void CudnnBatchNormLayer::forward(PassType passType) { EPS, batchSize, channels_, - imageH_, + imageH_ * imageD_, imageW_); } } diff --git a/paddle/gserver/layers/DeConv3DLayer.cpp b/paddle/gserver/layers/DeConv3DLayer.cpp index 7d5c772c89d260264a59f4cc4439bb8a44c605a4..3eea638649e8ebfdd7efa18615977a9e1344c695 100644 --- a/paddle/gserver/layers/DeConv3DLayer.cpp +++ b/paddle/gserver/layers/DeConv3DLayer.cpp @@ -42,10 +42,10 @@ bool DeConv3DLayer::init(const LayerMap &layerMap, if (sharedBiases_) { CHECK_EQ((size_t)numFilters_, biasParameter_->getSize()); biases_ = - std::unique_ptr(new Weight(1, numFilters_, biasParameter_)); + std::unique_ptr(new Weight(numFilters_, 1, biasParameter_)); } else { biases_ = - std::unique_ptr(new Weight(1, getSize(), biasParameter_)); + std::unique_ptr(new Weight(getSize(), 1, biasParameter_)); } } return true; @@ -53,27 +53,27 @@ bool DeConv3DLayer::init(const LayerMap &layerMap, size_t DeConv3DLayer::getSize() { CHECK_NE(inputLayers_.size(), 0UL); - outputH_.clear(); - outputW_.clear(); - outputD_.clear(); + imgSizeW_.clear(); + imgSizeH_.clear(); + imgSizeD_.clear(); N_.clear(); NOut_.clear(); size_t layerSize = 0; for (size_t i = 0; i < inputLayers_.size(); ++i) { - outputW_.push_back( - imageSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i], true)); - outputH_.push_back(imageSize( - imgSizeH_[i], filterSizeY_[i], paddingY_[i], strideY_[i], true)); - outputD_.push_back(imageSize( - imgSizeD_[i], filterSizeZ_[i], paddingZ_[i], strideZ_[i], true)); - NOut_.push_back(outputD_[i] * outputH_[i] * outputW_[i]); - N_.push_back(imgSizeD_[i] * imgSizeH_[i] * imgSizeW_[i]); + imgSizeW_.push_back( + imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i], true)); + imgSizeH_.push_back(imageSize( + outputH_[i], filterSizeY_[i], paddingY_[i], strideY_[i], true)); + imgSizeD_.push_back(imageSize( + outputD_[i], filterSizeZ_[i], paddingZ_[i], strideZ_[i], true)); + NOut_.push_back(imgSizeD_[i] * imgSizeH_[i] * imgSizeW_[i]); + N_.push_back(outputD_[i] * outputH_[i] * outputW_[i]); CHECK(layerSize == 0 || N_[i] * size_t(numFilters_) == layerSize); layerSize += NOut_[i] * numFilters_; } - getOutput().setFrameHeight(outputH_[0]); - getOutput().setFrameWidth(outputW_[0]); - getOutput().setFrameDepth(outputD_[0]); + getOutput().setFrameHeight(imgSizeH_[0]); + getOutput().setFrameWidth(imgSizeW_[0]); + getOutput().setFrameDepth(imgSizeD_[0]); return layerSize; } @@ -84,8 +84,8 @@ void DeConv3DLayer::forward(PassType passType) { resetOutput(batchSize, outWidth); const MatrixPtr outMat = getOutputValue(); + REGISTER_TIMER_INFO("FwdDeConv3D", getName().c_str()); for (size_t i = 0; i != inputLayers_.size(); ++i) { - REGISTER_TIMER_INFO("FwdDeConv3D", getName().c_str()); const MatrixPtr &inMat = getInputValue(i); int M = M_[i]; int N = N_[i]; @@ -103,9 +103,9 @@ void DeConv3DLayer::forward(PassType passType) { } colBuf_->col2Vol(outMat->getData() + n * outMat->getStride(), numFilters_, - outputD_[i], - outputH_[i], - outputW_[i], + imgSizeD_[i], + imgSizeH_[i], + imgSizeW_[i], filterSizeZ_[i], filterSizeY_[i], filterSize_[i], @@ -120,7 +120,6 @@ void DeConv3DLayer::forward(PassType passType) { } } if (nullptr != this->biasParameter_) { - REGISTER_TIMER_INFO("FwBiasTimer", getName().c_str()); this->addBias(); } forwardActivation(); @@ -133,21 +132,21 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) { bpropBiases(); biases_->getParameterPtr()->incUpdate(callback); } + REGISTER_TIMER_INFO("BwdDeConv3D", getName().c_str()); for (size_t i = 0; i < inputLayers_.size(); ++i) { if (weights_[i]->getWGrad() || this->needGradient_) { int M = M_[i]; int N = N_[i]; int K = K_[i]; - REGISTER_TIMER_INFO("BwdDeConv3D", getName().c_str()); Matrix::resizeOrCreate(colBuf_, K * groups_[i], N, false, useGpu_); const MatrixPtr &inMat = getInputValue(i); for (int n = 0; n < batchSize; ++n) { colBuf_->vol2Col( getOutputGrad()->getData() + n * getOutputGrad()->getStride(), numFilters_, - outputD_[i], - outputH_[i], - outputW_[i], + imgSizeD_[i], + imgSizeH_[i], + imgSizeW_[i], filterSizeZ_[i], filterSizeY_[i], filterSize_[i], @@ -182,7 +181,6 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) { } } } - REGISTER_TIMER_INFO("WeightUpdate", getName().c_str()); weights_[i]->getParameterPtr()->incUpdate(callback); } } @@ -191,21 +189,31 @@ void DeConv3DLayer::bpropWeights(int i) {} void DeConv3DLayer::bpropData(int i) {} void DeConv3DLayer::bpropBiases() { + MatrixPtr biases = Matrix::create(biases_->getWGrad()->getData(), + 1, + biases_->getWGrad()->getElementCnt(), + false, + useGpu_); const MatrixPtr &outGradMat = getOutputGrad(); if (this->sharedBiases_) { - biases_->getWGrad()->collectSharedBias(*outGradMat, 1.0f); + biases->collectSharedBias(*outGradMat, 1.0f); } else { - biases_->getWGrad()->collectBias(*outGradMat, 1.0f); + biases->collectBias(*outGradMat, 1.0f); } } void DeConv3DLayer::addBias() { MatrixPtr outMat = getOutputValue(); + MatrixPtr bias = Matrix::create(biases_->getW()->getData(), + 1, + biases_->getW()->getElementCnt(), + false, + useGpu_); if (this->sharedBiases_) { - outMat->addSharedBias(*(biases_->getW()), 1.0f); + outMat->addSharedBias(*(bias), 1.0f); } else { - outMat->addBias(*(biases_->getW()), 1.0f); + outMat->addBias(*(bias), 1.0f); } } diff --git a/paddle/gserver/layers/DetectionOutputLayer.cpp b/paddle/gserver/layers/DetectionOutputLayer.cpp index 8ab838e191314ab25469631626c0b0564d7fffda..0cf0a92bf4bd8f9b8eba2016b2377d9dfb18c70a 100644 --- a/paddle/gserver/layers/DetectionOutputLayer.cpp +++ b/paddle/gserver/layers/DetectionOutputLayer.cpp @@ -139,7 +139,13 @@ void DetectionOutputLayer::forward(PassType passType) { allDecodedBBoxes, &allIndices); - resetOutput(numKept, 7); + if (numKept > 0) { + resetOutput(numKept, 7); + } else { + MatrixPtr outV = getOutputValue(); + outV = NULL; + return; + } MatrixPtr outV = getOutputValue(); getDetectionOutput(confBuffer_->getData(), numKept, diff --git a/paddle/gserver/layers/DetectionUtil.cpp b/paddle/gserver/layers/DetectionUtil.cpp index 3e61adc66e60c54250e4f323452aa13045310879..d83674f45a70212a8adc94a31ff58eb0e01baa00 100644 --- a/paddle/gserver/layers/DetectionUtil.cpp +++ b/paddle/gserver/layers/DetectionUtil.cpp @@ -469,7 +469,7 @@ size_t getDetectionIndices( const size_t numClasses, const size_t backgroundId, const size_t batchSize, - const size_t confThreshold, + const real confThreshold, const size_t nmsTopK, const real nmsThreshold, const size_t keepTopK, diff --git a/paddle/gserver/layers/DetectionUtil.h b/paddle/gserver/layers/DetectionUtil.h index fe4f9f075e4cf011c97f68f49598a828d62327b3..641ed873b4c8645b6455e5ef5e63593e3005b770 100644 --- a/paddle/gserver/layers/DetectionUtil.h +++ b/paddle/gserver/layers/DetectionUtil.h @@ -275,7 +275,7 @@ size_t getDetectionIndices( const size_t numClasses, const size_t backgroundId, const size_t batchSize, - const size_t confThreshold, + const real confThreshold, const size_t nmsTopK, const real nmsThreshold, const size_t keepTopK, diff --git a/paddle/gserver/layers/GruCompute.cpp b/paddle/gserver/layers/GruCompute.cpp index 06907768e98f4bad952706cffbbd65d1f86cc6df..148516391c6cad8feff34b9bd1c10c27d1a8a0e6 100644 --- a/paddle/gserver/layers/GruCompute.cpp +++ b/paddle/gserver/layers/GruCompute.cpp @@ -14,6 +14,7 @@ limitations under the License. */ #include "GruCompute.h" #include "hl_recurrent_apply.cuh" +#include "paddle/function/GruFunctor.h" #include "paddle/utils/Util.h" namespace paddle { @@ -25,13 +26,13 @@ void GruCompute::init(LayerConfig &config) { template <> void GruCompute::forward<0>(hl_gru_value value, int frameSize, int batchSize) { - hl_cpu_gru_forward(hppl::forward::gru_resetOutput(), - hppl::forward::gru_finalOutput(), - value, - frameSize, - batchSize, - activeNode_, - activeGate_); + GruFunctor::compute(hppl::forward::gru_resetOutput(), + hppl::forward::gru_finalOutput(), + value, + frameSize, + batchSize, + activeNode_, + activeGate_); } template <> @@ -39,14 +40,15 @@ void GruCompute::backward<0>(hl_gru_value value, hl_gru_grad grad, int frameSize, int batchSize) { - hl_cpu_gru_backward(hppl::backward::gru_stateGrad(), - hppl::backward::gru_resetGrad(), - value, - grad, - frameSize, - batchSize, - activeNode_, - activeGate_); + GruGradFunctor::compute( + hppl::backward::gru_stateGrad(), + hppl::backward::gru_resetGrad(), + value, + grad, + frameSize, + batchSize, + activeNode_, + activeGate_); } } // namespace paddle diff --git a/paddle/gserver/layers/Layer.h b/paddle/gserver/layers/Layer.h index edef36194aabdb9c122ec3423deb036169a34d7c..4002a3d0747a86ab7b495ffe52247521831b71b8 100644 --- a/paddle/gserver/layers/Layer.h +++ b/paddle/gserver/layers/Layer.h @@ -49,6 +49,12 @@ struct LayerState { }; typedef std::shared_ptr LayerStatePtr; +/// Paddle device ID, MKLDNN is -2, CPU is -1 +enum PADDLE_DEVICE_ID { + MKLDNN_DEVICE = -2, + CPU_DEVICE = -1, +}; + /** * @brief Base class for layer. * Define necessary variables and functions for every layer. @@ -59,11 +65,6 @@ protected: LayerConfig config_; /// whether to use GPU bool useGpu_; - /// Paddle device ID, MKLDNN is -2, CPU is -1 - enum PADDLE_DEVICE_ID { - MKLDNN_DEVICE = -2, - CPU_DEVICE = -1, - }; /// Device Id. MKLDNN is -2, CPU is -1, and GPU is 0, 1, 2 ... int deviceId_; /// Input layers diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index 8318c8c519a4cec1610eadd28320ee5ce0b4147d..f70343251ad4fbb99f9614618f6d1bff1174f15e 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -14,7 +14,6 @@ limitations under the License. */ #include "MKLDNNFcLayer.h" #include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" using namespace mkldnn; // NOLINT typedef memory::format format; @@ -40,6 +39,8 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap, oc_ = getSize(); oh_ = 1; ow_ = 1; + ih_ = 1; + iw_ = 1; // input size can not change in FC iLayerSize_ = inputLayers_[0]->getSize(); @@ -77,111 +78,86 @@ void MKLDNNFcLayer::convertWeightsToPaddle() { wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim); } -void MKLDNNFcLayer::convertOutputToOtherDevice() { - copyOutputInfoToOtherDevice(); - // find other cpu device and reorder output to cpu device - int cnt = 0; - for (size_t i = 0; i < outputOtherDevice_.size(); i++) { - if (outputOtherDevice_[i].deviceId == CPU_DEVICE) { - // fc cpu output value do not need convert - // just share point - outputOtherDevice_[i].value = output_.value; - ++cnt; - } - } - - if (cnt > 1) { - LOG(WARNING) << "should not have more than one CPU devie"; - } -} +void MKLDNNFcLayer::reshape( + int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) { + reshapeInput(bs, ih, iw); -void MKLDNNFcLayer::reshape() { - const Argument& input = getInput(0, getPrev(0)->getDeviceId()); - int batchSize = input.getBatchSize(); - if (bs_ == batchSize) { - return; - } - bs_ = batchSize; - ih_ = input.getFrameHeight(); - iw_ = input.getFrameWidth(); - if (ih_ == 0) { - ih_ = 1; - } - if (iw_ == 0) { - iw_ = 1; - } CHECK_EQ(iLayerSize_, inputLayers_[0]->getSize()); - ic_ = iLayerSize_ / (ih_ * iw_); - CHECK_EQ(size_t(ic_ * ih_ * iw_), iLayerSize_) << "not divisible"; - CHECK_EQ(size_t(oc_), getSize()); - printSizeInfo(); + ic = iLayerSize_ / (ih * iw); + CHECK_EQ(size_t(ic * ih * iw), iLayerSize_) << "not divisible"; + CHECK_EQ(size_t(oc), getSize()); - // reset output - output_.setFrameHeight(oh_); - output_.setFrameWidth(ow_); - resetOutput(bs_, oc_); + reshapeOutput(oh, ow); + resizeOutput(bs, oc); - // reset mkldnn forward - resetFwd(); - needResetBwd_ = true; - - convertWeightsFromPaddle(); + printSizeInfo(); } -void MKLDNNFcLayer::resetFwd() { +void MKLDNNFcLayer::resetFwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + pipeline.clear(); bool hasBias = biases_ && biases_->getW(); - const MatrixPtr& wgt = weight_->getW(); - const MatrixPtr& bias = hasBias ? biases_->getW() : nullptr; - const MatrixPtr& out = output_.value; + const MatrixPtr& wgtVal = weight_->getW(); + const MatrixPtr& biasVal = hasBias ? biases_->getW() : nullptr; + const MatrixPtr& outVal = output_.value; if (inputIsOnlyMKLDNN()) { - const MatrixPtr& in = getInputValue(0); - inVal_ = std::dynamic_pointer_cast(in); - CHECK(inVal_) << "Input should be MKLDNNMatrix"; + const MatrixPtr& inVal = getInputValue(0); + in = std::dynamic_pointer_cast(inVal); + CHECK(in) << "Input should be MKLDNNMatrix"; } else { CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet"; - const MatrixPtr& in = getInputValue(0, CPU_DEVICE); - inVal_ = MKLDNNMatrix::create( - in, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_); - } - inVal_->downSpatial(); - wgtVal_ = MKLDNNMatrix::create( - wgt, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_); - wgtVal_->downSpatial(); - biasVal_ = - hasBias ? MKLDNNMatrix::create(bias, {oc_}, format::x, engine_) : nullptr; - outVal_ = MKLDNNMatrix::create(out, {bs_, oc_}, format::nc, engine_); + const MatrixPtr& inVal = getInputValue(0, CPU_DEVICE); + in = MKLDNNMatrix::create( + inVal, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_); + } + in->downSpatial(); + wgt = MKLDNNMatrix::create( + wgtVal, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_); + wgt->downSpatial(); + bias = hasBias ? MKLDNNMatrix::create(biasVal, {oc_}, format::x, engine_) + : nullptr; + out = MKLDNNMatrix::create(outVal, {bs_, oc_}, format::nc, engine_); // change original output value to mkldnn output value - output_.value = std::dynamic_pointer_cast(outVal_); + output_.value = std::dynamic_pointer_cast(out); if (!outputIsOnlyMKLDNN()) { - convertOutputToOtherDevice(); + // fc cpu output value do not need create convert + // just share point + getOutput(CPU_DEVICE).value->setData(output_.value->getData()); } // create forward handle prop_kind pk = prop_kind::forward; fc_fwd::desc fwdDesc = hasBias ? fc_fwd::desc(pk, - inVal_->getMemoryDesc(), - wgtVal_->getMemoryDesc(), - biasVal_->getMemoryDesc(), - outVal_->getMemoryDesc()) + in->getMemoryDesc(), + wgt->getMemoryDesc(), + bias->getMemoryDesc(), + out->getMemoryDesc()) : fc_fwd::desc(pk, - inVal_->getMemoryDesc(), - wgtVal_->getMemoryDesc(), - outVal_->getMemoryDesc()); + in->getMemoryDesc(), + wgt->getMemoryDesc(), + out->getMemoryDesc()); fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_); if (hasBias) { - fwd_.reset(new fc_fwd(fwdPD, *inVal_, *wgtVal_, *biasVal_, *outVal_)); + fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *bias, *out)); } else { - fwd_.reset(new fc_fwd(fwdPD, *inVal_, *wgtVal_, *outVal_)); + fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *out)); } printValueFormatFlow(); - pipelineFwd_.clear(); - pipelineFwd_.push_back(*fwd_); + pipeline.push_back(*fwd_); } -void MKLDNNFcLayer::resetBwd() { +void MKLDNNFcLayer::resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + pipeline.clear(); if (!needResetBwd_) { return; } @@ -190,8 +166,8 @@ void MKLDNNFcLayer::resetBwd() { /// backward weight CHECK(inVal_) << "Should have input value"; - const MatrixPtr& wgt = weight_->getWGrad(); - const MatrixPtr& bias = hasBias ? biases_->getWGrad() : nullptr; + const MatrixPtr& wgtGrad = weight_->getWGrad(); + const MatrixPtr& biasGrad = hasBias ? biases_->getWGrad() : nullptr; // TODO(TJ): merge outgrad int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE; @@ -202,101 +178,66 @@ void MKLDNNFcLayer::resetBwd() { // for CPU device: // fc do not need to convert from cpu device since output is always nc format // only need create from cpu device - const MatrixPtr& out = getOutput(device).grad; - outGrad_ = MKLDNNMatrix::create(out, outVal_->getPrimitiveDesc()); - wgtGrad_ = MKLDNNMatrix::create(wgt, wgtVal_->getPrimitiveDesc()); - biasGrad_ = hasBias ? MKLDNNMatrix::create(bias, biasVal_->getPrimitiveDesc()) - : nullptr; + const MatrixPtr& outGrad = getOutput(device).grad; + out = MKLDNNMatrix::create(outGrad, outVal_->getPrimitiveDesc()); + wgt = MKLDNNMatrix::create(wgtGrad, wgtVal_->getPrimitiveDesc()); + bias = hasBias ? MKLDNNMatrix::create(biasGrad, biasVal_->getPrimitiveDesc()) + : nullptr; // create memory primitive desc fc_fwd::desc fwdDesc = fc_fwd::desc(prop_kind::forward, inVal_->getMemoryDesc(), - wgtGrad_->getMemoryDesc(), - outGrad_->getMemoryDesc()); + wgt->getMemoryDesc(), + out->getMemoryDesc()); fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_); fc_bwdWgt::desc bwdWgtDesc = hasBias ? fc_bwdWgt::desc(inVal_->getMemoryDesc(), - wgtGrad_->getMemoryDesc(), - biasGrad_->getMemoryDesc(), - outGrad_->getMemoryDesc()) + wgt->getMemoryDesc(), + bias->getMemoryDesc(), + out->getMemoryDesc()) : fc_bwdWgt::desc(inVal_->getMemoryDesc(), - wgtGrad_->getMemoryDesc(), - outGrad_->getMemoryDesc()); + wgt->getMemoryDesc(), + out->getMemoryDesc()); fc_bwdWgt::primitive_desc bwdWgtPD = fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, fwdPD); if (hasBias) { - bwdWgt_.reset( - new fc_bwdWgt(bwdWgtPD, *inVal_, *outGrad_, *wgtGrad_, *biasGrad_)); + bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt, *bias)); } else { - bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *outGrad_, *wgtGrad_)); + bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt)); } - pipelineBwd_.clear(); - pipelineBwd_.push_back(*bwdWgt_); + pipeline.push_back(*bwdWgt_); /// backward data - device = inputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE; - const MatrixPtr& in = getInputGrad(0, device); - if (in == nullptr) { + const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad; + if (inGrad == nullptr) { return; } - if (getInput(0, device).getAllCount() > 1) { - // TODO(TJ): use outputMaps_ ways when merge outgrad done + if (getInput(0, MKLDNN_DEVICE).getAllCount() > 1) { + // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done } else { - inGrad_ = MKLDNNMatrix::create(in, inVal_->getPrimitiveDesc()); + in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc()); } - fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(inVal_->getMemoryDesc(), - wgtGrad_->getMemoryDesc(), - outGrad_->getMemoryDesc()); + fc_bwdData::desc bwdDataDesc = fc_bwdData::desc( + inVal_->getMemoryDesc(), wgt->getMemoryDesc(), out->getMemoryDesc()); fc_bwdData::primitive_desc bwdDataPD = fc_bwdData::primitive_desc(bwdDataDesc, engine_, fwdPD); CHECK(wgtVal_) << "Should have weight memory"; - bwdData_.reset(new fc_bwdData(bwdDataPD, *outGrad_, *wgtVal_, *inGrad_)); + bwdData_.reset(new fc_bwdData(bwdDataPD, *out, *wgtVal_, *in)); printGradFormatFlow(); - pipelineBwd_.push_back(*bwdData_); + pipeline.push_back(*bwdData_); } -void MKLDNNFcLayer::forward(PassType passType) { - Layer::forward(passType); - reshape(); - - { - REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str()); - syncInputValue(); - - // just submit forward pipeline - stream_->submit(pipelineFwd_); - } - - /* activation */ { - REGISTER_TIMER_INFO("FwActTimer", getName().c_str()); - forwardActivation(); - } +void MKLDNNFcLayer::updateInputData() { + inVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); } -void MKLDNNFcLayer::backward(const UpdateCallback& callback) { - /* Do derivation */ { - REGISTER_TIMER_INFO("BpActTimer", getName().c_str()); - backwardActivation(); - } - - { - REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str()); - resetBwd(); - - syncOutputGrad(); - // just sumbmit backward pipeline - stream_->submit(pipelineBwd_); - } - - { - REGISTER_TIMER_INFO("WeightUpdate", getName().c_str()); - weight_->getParameterPtr()->incUpdate(callback); - if (biases_ && biases_->getWGrad()) { - biases_->getParameterPtr()->incUpdate(callback); - } +void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) { + weight_->getParameterPtr()->incUpdate(callback); + if (biases_ && biases_->getWGrad()) { + biases_->getParameterPtr()->incUpdate(callback); } } } // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h index e138a6faf181c412949218458e7ecf800a0d6a07..3119f863496df092da13c08bf733f13c42e53780 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.h +++ b/paddle/gserver/layers/MKLDNNFcLayer.h @@ -45,35 +45,28 @@ public: bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) override; - void convertWeightsFromPaddle() override; + void reshape( + int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override; - void convertWeightsToPaddle() override; + void resetFwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) override; - void forward(PassType passType) override; + void resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) override; - void backward(const UpdateCallback& callback) override; + void updateInputData() override; -protected: - /** - * reshape the input image sizes - * and reset output buffer size - * and reset mkldnn forward - */ - void reshape(); - - /** - * reset the forward primitve and memory - * only would be called when input size changes - */ - void resetFwd(); - - /** - * reset the backward primitve and memory for mkldnn fc - * only would be called when needed - */ - void resetBwd(); - - void convertOutputToOtherDevice() override; + void updateWeights(const UpdateCallback& callback) override; + + void convertWeightsFromPaddle() override; + + void convertWeightsToPaddle() override; }; } // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index b983b833d510b823c5d4cff0b9390173e4cefc89..169679c8297542cac4a43f5a8e1af311ad9282df 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -19,6 +19,7 @@ limitations under the License. */ #include "MKLDNNBase.h" #include "mkldnn.hpp" #include "paddle/math/MKLDNNMatrix.h" +#include "paddle/utils/Stat.h" DECLARE_bool(use_mkldnn); @@ -33,6 +34,8 @@ typedef std::shared_ptr MKLDNNLayerPtr; */ class MKLDNNLayer : public Layer { protected: + // input value element count + size_t inputElemenCnt_; // batch size int bs_; // input image channel, height and width @@ -52,7 +55,7 @@ protected: std::vector pipelineFwd_; std::vector pipelineBwd_; - // MKLDNNMatrixPtr + // MKLDNNMatrixPtr with internal format MKLDNNMatrixPtr inVal_; MKLDNNMatrixPtr inGrad_; MKLDNNMatrixPtr outVal_; @@ -65,6 +68,7 @@ protected: public: explicit MKLDNNLayer(const LayerConfig& config) : Layer(config), + inputElemenCnt_(0), bs_(0), ic_(0), ih_(0), @@ -95,12 +99,104 @@ public: if (!Layer::init(layerMap, parameterMap)) { return false; } + checkCPUOutputsNumber(); stream_.reset(new MKLDNNStream()); engine_ = CPUEngine::Instance().getEngine(); return true; } + void forward(PassType passType) override { + passType_ = passType; + + { + REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str()); + CHECK(!inputLayers_.empty()); + copySeqInfoToOutputs(); + size_t elemenCnt = inputLayers_[0]->getOutput().value->getElementCnt(); + if (inputElemenCnt_ != elemenCnt) { + // reset when input total sizes changed, not only the batchsize + inputElemenCnt_ = elemenCnt; + reshape(bs_, ic_, ih_, iw_, oc_, oh_, ow_); + resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_); + convertWeightsFromPaddle(); + needResetBwd_ = true; + } + + if (inputLayers_[0]->getType() == "data") { + updateInputData(); + } + + stream_->submit(pipelineFwd_); + } + + /* activation */ { + REGISTER_TIMER_INFO("FwActTimer", getName().c_str()); + forwardActivation(); + } + } + + void backward(const UpdateCallback& callback) override { + /* Do derivation */ { + REGISTER_TIMER_INFO("BpActTimer", getName().c_str()); + backwardActivation(); + } + + { + REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str()); + if (needResetBwd_) { + resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_); + needResetBwd_ = false; + } + + stream_->submit(pipelineBwd_); + } + + { + REGISTER_TIMER_INFO("WeightUpdate", getName().c_str()); + updateWeights(callback); + } + } + + /** + * reshape the input image sizes + * and reset output image and buffer size + * output channel can not be changed + */ + virtual void reshape( + int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) = 0; + + /** + * reset the mkldnn forward primitve and memory + * only would be called when input size changes + */ + virtual void resetFwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) = 0; + + /** + * reset the mkldnn backward primitve and memory for mkldnn fc + * only would be called when needed + */ + virtual void resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) = 0; + + /** + * Update input value data when input layer is "data" type. + * Since the input value data address might be changed. + */ + virtual void updateInputData() {} + + /** + * Update weights and biases if necessary. + */ + virtual void updateWeights(const UpdateCallback& callback) {} + /** * convert weight from paddle format to mkldnn format * weight_ will be override @@ -114,10 +210,38 @@ public: virtual void convertWeightsToPaddle() {} /** - * convert MKLDNN output to other device. - * only support CPU device yet + * add this interface as public for unit test + */ + void addOutputArgument(int deviceId) { Layer::addOutputArgument(deviceId); } + +protected: + /** + * reshape the input image sizes and input batchsize */ - virtual void convertOutputToOtherDevice() {} + virtual void reshapeInput(int& batchsize, int& height, int& width) { + const Argument& input = inputLayers_[0]->getOutput(); + batchsize = input.getBatchSize(); + int h = input.getFrameHeight(); + int w = input.getFrameWidth(); + if (h != 0) { + height = h; + } + if (w != 0) { + width = w; + } + } + + /** + * reshape output image sizes + */ + virtual void reshapeOutput(size_t height, size_t width) { + output_.setFrameHeight(height); + output_.setFrameWidth(width); + for (size_t i = 0; i < outputOtherDevice_.size(); i++) { + outputOtherDevice_[i].setFrameHeight(height); + outputOtherDevice_[i].setFrameWidth(width); + } + } /** * print info about sizes @@ -133,8 +257,8 @@ public: */ virtual void printValueFormatFlow() { if (inVal_ && outVal_) { - VLOG(MKLDNN_FMTS) << "value format flow --- " << inVal_->getFormat() - << " >>> " << outVal_->getFormat(); + VLOG(MKLDNN_FMTS) << inVal_->getFormat() << " >>> " + << outVal_->getFormat(); } } @@ -143,29 +267,12 @@ public: */ virtual void printGradFormatFlow() { if (inGrad_ && outGrad_) { - VLOG(MKLDNN_FMTS) << "grad format flow --- " << inGrad_->getFormat() - << " <<< " << outGrad_->getFormat(); + VLOG(MKLDNN_FMTS) << inGrad_->getFormat() << " <<< " + << outGrad_->getFormat(); } } protected: - /** - * copy image size and sequence info to other device - * @note: can not directly use Layer::copyOutputToOtherDevice since here only - * copy base info and do not copy data value - */ - void copyOutputInfoToOtherDevice() { - for (size_t i = 0; i < outputOtherDevice_.size(); i++) { - outputOtherDevice_[i].setFrameHeight(output_.getFrameHeight()); - outputOtherDevice_[i].setFrameWidth(output_.getFrameWidth()); - outputOtherDevice_[i].sequenceStartPositions = - output_.sequenceStartPositions; - outputOtherDevice_[i].subSequenceStartPositions = - output_.subSequenceStartPositions; - outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims; - } - } - /** * If input only has MKLDNN device. * Otherwise, only support the previous layer using CPU device. @@ -193,37 +300,12 @@ protected: return outputOtherDevice_.size() == 0; } - /** - * Sync input value data - */ - void syncInputValue() { - if (inputIsOnlyMKLDNN()) { - return; - } - real* iData = getInputValue(0, CPU_DEVICE)->getData(); - // update input data - // since it might be changed if this is after data layer - inVal_->updateData(iData); - } - - /** - * Sync output grad data - */ - void syncOutputGrad() { - if (outputIsOnlyMKLDNN()) { - return; - } - - // update diff - real* oDiff = getOutput(CPU_DEVICE).grad->getData(); - outGrad_->updateData(oDiff); - } - /** * Set deviceId of this layer. */ void setDevice(int id) { deviceId_ = id; } +private: /** * Set deviceId of the params used in this layer. */ @@ -247,6 +329,42 @@ protected: parameter->setDevice(id); } } + + /** + * Check the cpu device number of outputOtherDevice_. + * should have only one at most. + */ + void checkCPUOutputsNumber(int max = 1) { + int cnt = 0; + for (size_t i = 0; i < outputOtherDevice_.size(); i++) { + if (outputOtherDevice_[i].deviceId == CPU_DEVICE) { + ++cnt; + } + } + CHECK_LE(cnt, max) << "too much CPU devies"; + } + + /** + * copy SeqInfo from input layer to this output and other output devices. + * @note: do not use getInput(0) since it used this deviceId_, + * use "inputLayers_[0]->getOutput()" instead. + */ + void copySeqInfoToOutputs() { + if (inputLayers_.empty() || !needSequenceInfo_) { + return; + } + const Argument& input = inputLayers_[0]->getOutput(); + output_.sequenceStartPositions = input.sequenceStartPositions; + output_.subSequenceStartPositions = input.subSequenceStartPositions; + output_.cpuSequenceDims = input.cpuSequenceDims; + for (size_t i = 0; i < outputOtherDevice_.size(); i++) { + outputOtherDevice_[i].sequenceStartPositions = + output_.sequenceStartPositions; + outputOtherDevice_[i].subSequenceStartPositions = + output_.subSequenceStartPositions; + outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims; + } + } }; } // namespace paddle diff --git a/paddle/gserver/layers/SwitchOrderLayer.cpp b/paddle/gserver/layers/SwitchOrderLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e97809141a93106f9e6ebaf40c7e8aa9c6010557 --- /dev/null +++ b/paddle/gserver/layers/SwitchOrderLayer.cpp @@ -0,0 +1,109 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "SwitchOrderLayer.h" +#include "paddle/utils/Stat.h" + +namespace paddle { + +REGISTER_LAYER(switch_order, SwitchOrderLayer); + +bool SwitchOrderLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* Initialize the basic parent class */ + Layer::init(layerMap, parameterMap); + auto& img_conf = config_.inputs(0).image_conf(); + size_t inD = img_conf.img_size_z(); + size_t inH = + img_conf.has_img_size_y() ? img_conf.img_size_y() : img_conf.img_size(); + size_t inW = img_conf.img_size(); + size_t inC = img_conf.channels(); + inH = inH * inD; + inDims_ = TensorShape({0, inC, inH, inW}); + outDims_ = TensorShape(4); + + auto& reshape_conf = config_.reshape_conf(); + for (int i = 0; i < reshape_conf.height_axis_size(); i++) { + heightAxis_.push_back(reshape_conf.height_axis(i)); + } + for (int i = 0; i < reshape_conf.width_axis_size(); i++) { + widthAxis_.push_back(reshape_conf.width_axis(i)); + } + createFunction(nchw2nhwc_, "NCHW2NHWC", FuncConfig()); + createFunction(nhwc2nchw_, "NHWC2NCHW", FuncConfig()); + return true; +} + +void SwitchOrderLayer::setOutDims() { + outDims_.setDim(0, inDims_[0]); + outDims_.setDim(1, inDims_[2]); + outDims_.setDim(2, inDims_[3]); + outDims_.setDim(3, inDims_[1]); + reshapeHeight_ = 1; + for (size_t i = 0; i < heightAxis_.size(); i++) { + reshapeHeight_ *= outDims_[heightAxis_[i]]; + } + output_.setFrameHeight(reshapeHeight_); + reshapeWidth_ = 1; + for (size_t i = 0; i < widthAxis_.size(); i++) { + reshapeWidth_ *= outDims_[widthAxis_[i]]; + } + output_.setFrameWidth(reshapeWidth_); +} + +void SwitchOrderLayer::setInDims() { + MatrixPtr input = inputLayers_[0]->getOutputValue(); + size_t batchSize = input->getHeight(); + inDims_.setDim(0, batchSize); + int d = inputLayers_[0]->getOutput().getFrameDepth(); + d = (d == 0 ? 1 : d); + int h = inputLayers_[0]->getOutput().getFrameHeight(); + if (h != 0) inDims_.setDim(2, h * d); + int w = inputLayers_[0]->getOutput().getFrameWidth(); + if (w != 0) inDims_.setDim(3, w); + int totalCount = input->getElementCnt(); + int channels = totalCount / (inDims_[0] * inDims_[2] * inDims_[3]); + if (channels != 0) inDims_.setDim(1, channels); +} + +void SwitchOrderLayer::forward(PassType passType) { + Layer::forward(passType); + setInDims(); + setOutDims(); + resetOutput(outDims_[0], outDims_[1] * outDims_[2] * outDims_[3]); + if (heightAxis_.size() > 0) { + resetOutput(reshapeHeight_, reshapeWidth_); + } + + // switch NCHW to NHWC + BufferArgs inputs; + BufferArgs outputs; + inputs.addArg(*getInputValue(0), inDims_); + outputs.addArg(*getOutputValue(), outDims_); + nchw2nhwc_[0]->calc(inputs, outputs); + forwardActivation(); +} + +void SwitchOrderLayer::backward(const UpdateCallback& callback) { + (void)callback; + backwardActivation(); + + // switch NHWC to NCHW + BufferArgs inputs; + BufferArgs outputs; + inputs.addArg(*getOutputGrad(), outDims_); + outputs.addArg(*getInputGrad(0), inDims_, ADD_TO); + nhwc2nchw_[0]->calc(inputs, outputs); +} +} // namespace paddle diff --git a/paddle/gserver/layers/SwitchOrderLayer.h b/paddle/gserver/layers/SwitchOrderLayer.h new file mode 100644 index 0000000000000000000000000000000000000000..47b1f7f73ee783b3eae3c9cfe08b1459cef16a71 --- /dev/null +++ b/paddle/gserver/layers/SwitchOrderLayer.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "Layer.h" + +namespace paddle { + +/** + * \brief This layer calculate softmax in image channel dimension. + */ +class SwitchOrderLayer : public Layer { +public: + explicit SwitchOrderLayer(const LayerConfig& config) : Layer(config) {} + + ~SwitchOrderLayer() {} + + bool init(const LayerMap& layerMap, + const ParameterMap& parameterMap) override; + void forward(PassType passType) override; + void backward(const UpdateCallback& callback = nullptr) override; + void setInDims(); + void setOutDims(); + +protected: + std::vector> nchw2nhwc_; + std::vector> nhwc2nchw_; + TensorShape inDims_; + TensorShape outDims_; + std::vector heightAxis_; + std::vector widthAxis_; + size_t reshapeHeight_; + size_t reshapeWidth_; +}; +} // namespace paddle diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp index de1635be2af37cd0ba49010199a417090865b0e4..2f48e5b2d3ffc9337ed1314f6db6549e56263fdd 100644 --- a/paddle/gserver/tests/MKLDNNTester.cpp +++ b/paddle/gserver/tests/MKLDNNTester.cpp @@ -63,8 +63,12 @@ void MKLDNNTester::reset(const TestConfig& dnn, initTestLayer( configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i])); } - dnnLayer_ = testLayers_[DNN]; refLayer_ = testLayers_[REF]; + dnnLayer_ = std::dynamic_pointer_cast(testLayers_[DNN]); + CHECK(dnnLayer_); + // for comparison with Paddle reference results, + // need manually add cpu device output for test + dnnLayer_->addOutputArgument(CPU_DEVICE); EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size()); EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size()); @@ -109,20 +113,22 @@ void MKLDNNTester::randomBotDatas() { void MKLDNNTester::randomTopDiffs() { refLayer_->getOutputGrad()->randomizeUniform(); - dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad())); - VLOG(lvl_) << "Random dom Backward Input, TopDiff: "; + dnnLayer_->getOutput(CPU_DEVICE) + .grad->copyFrom(*(refLayer_->getOutputGrad())); + VLOG(lvl_) << "Random Backward Input, TopDiff: "; printMatrix(refLayer_->getOutputGrad()); } void MKLDNNTester::checkForward() { - printTopDatas(); - double delta = compareMatrix(testLayers_[DNN]->getOutputValue(), - testLayers_[REF]->getOutputValue()); VLOG(MKLDNN_ALL) << "Check Forward"; + printTopDatas(); + double delta = compareMatrix(dnnLayer_->getOutput(-1).value, + refLayer_->getOutputValue()); EXPECT_LE(fabs(delta), eps_); } void MKLDNNTester::checkBackwardData() { + VLOG(MKLDNN_ALL) << "Check Backward Data"; // TODO(TJ): uncomment me when batch norm ready // const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm"; for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) { @@ -144,14 +150,12 @@ void MKLDNNTester::checkBackwardData() { } void MKLDNNTester::checkBackwardWgts() { + VLOG(MKLDNN_ALL) << "Check Backward Weight"; CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size()); vector dnnWgts; // used to temply save mkldnn weights saveWgt(parameters_[DNN], dnnWgts); - const MKLDNNLayerPtr dnnlayer = - std::dynamic_pointer_cast(dnnLayer_); - CHECK(dnnlayer); - dnnlayer->convertWeightsToPaddle(); + dnnLayer_->convertWeightsToPaddle(); for (size_t i = 0; i < parameters_[DNN].size(); ++i) { const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE); const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE); @@ -189,38 +193,38 @@ void MKLDNNTester::restoreWgt(const vector& from, } // clear parameters grad -void MKLDNNTester::clearWgtDiffs() { +void MKLDNNTester::clearWgtDiffs(size_t id) { + CHECK_LE(id, parameters_.size()); for (size_t n = 0; n < parameters_.size(); ++n) { - for (size_t i = 0; i < parameters_[n].size(); ++i) { - const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT); - if (grad) { - grad->zeroMem(); + if (id == n || id == parameters_.size()) { + for (size_t i = 0; i < parameters_[n].size(); ++i) { + const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT); + if (grad) { + grad->zeroMem(); + } } } } } -void MKLDNNTester::clearBotDiffs() { - // dnn and ref +void MKLDNNTester::clearBotDiffs(size_t id) { + CHECK_LE(id, dataLayers_.size()); for (size_t n = 0; n < dataLayers_.size(); ++n) { - // all inputs layers - for (size_t i = 0; i < dataLayers_[n].size(); ++i) { - dataLayers_[n][i]->getOutputGrad()->zeroMem(); + if (id == n || id == dataLayers_.size()) { + // clear inputs layers of this specific layer + for (size_t i = 0; i < dataLayers_[n].size(); ++i) { + dataLayers_[n][i]->getOutputGrad()->zeroMem(); + } } } } -void MKLDNNTester::clearBotDiffs(int n) { - CHECK_LT(n, NUM); - // all inputs layers - for (size_t i = 0; i < dataLayers_[n].size(); ++i) { - dataLayers_[n][i]->getOutputGrad()->zeroMem(); - } -} - -void MKLDNNTester::clearTopDatas() { +void MKLDNNTester::clearTopDatas(size_t id) { + CHECK_LE(id, testLayers_.size()); for (size_t i = 0; i < testLayers_.size(); ++i) { - testLayers_[i]->getOutputValue()->zeroMem(); + if (id == i || id == testLayers_.size()) { + testLayers_[i]->getOutputValue()->zeroMem(); + } } } @@ -300,16 +304,24 @@ void MKLDNNTester::runOnce() { checkForward(); // test backward + // simple updater + UpdateCallback updateCallback = [](Parameter* para) { + auto& grad = para->getBuf(PARAMETER_GRADIENT); + auto& value = para->getBuf(PARAMETER_VALUE); + real lr = 1e-3; + value->add(*grad, lr); + }; randomTopDiffs(); - dnnLayer_->backward(nullptr); - refLayer_->backward(nullptr); + dnnLayer_->backward(updateCallback); + refLayer_->backward(updateCallback); checkBackwardData(); checkBackwardWgts(); // clear buffers // ref code will addto the diff, dnn code will writeto it - // and clearTopDatas() and clearWgtDiffs() should be coverd by test layers + // and clearTopDatas(REF) should be coverd by ref layers clearBotDiffs(REF); + clearWgtDiffs(REF); } void MKLDNNTester::run(const TestConfig& dnn, diff --git a/paddle/gserver/tests/MKLDNNTester.h b/paddle/gserver/tests/MKLDNNTester.h index e55e4493ffdfe45b8cfdee423febd1878b8b3d8a..5ac885638cde7693a0c847733e7a6149c1b7e6c2 100644 --- a/paddle/gserver/tests/MKLDNNTester.h +++ b/paddle/gserver/tests/MKLDNNTester.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "LayerGradUtil.h" #include "paddle/gserver/layers/MKLDNNBase.h" +#include "paddle/gserver/layers/MKLDNNLayer.h" namespace paddle { @@ -40,7 +41,8 @@ protected: vector layerMaps_; vector> parameters_; vector testLayers_; - LayerPtr dnnLayer_, refLayer_; + LayerPtr refLayer_; + MKLDNNLayerPtr dnnLayer_; /// run some iterations, all the result should pass size_t iter_; @@ -88,10 +90,10 @@ private: void checkBackwardData(); void checkBackwardWgts(); - void clearWgtDiffs(); - void clearBotDiffs(); - void clearBotDiffs(int n); // clear specific layer - void clearTopDatas(); + // clear specific layer, clear all when id equals NUM + void clearWgtDiffs(size_t id = NUM); + void clearBotDiffs(size_t id = NUM); + void clearTopDatas(size_t id = NUM); void printTopDatas(); void printMatrix(const MatrixPtr& m); diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index a831ffbc73fbd6ad42fa31b2d6d583718474e59b..090bde7b203652e3ffb1662b8f5b8937885d2608 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -1703,6 +1703,55 @@ TEST(Layer, BatchNormalizationLayer) { #endif } +void testBatchNorm3DLayer(const string& type, bool trans, bool useGpu) { + TestConfig config; + const int CHANNELS = 10; + const int IMG_SIZE = 16; + const int IMG_SIZE_Y = 8; + const int IMG_SIZE_Z = 8; + size_t size = CHANNELS * IMG_SIZE * IMG_SIZE_Y * IMG_SIZE_Z; + config.layerConfig.set_type(type); + config.layerConfig.set_size(size); + config.layerConfig.set_active_type("sigmoid"); + config.biasSize = CHANNELS; + config.inputDefs.push_back({INPUT_DATA, + "layer_0", + /* dim= */ size, + /* paraSize= */ CHANNELS}); + + config.inputDefs.push_back({INPUT_DATA, "layer_1_running_mean", 1, CHANNELS}); + config.inputDefs.back().isStatic = true; + config.inputDefs.push_back({INPUT_DATA, "layer_2_running_var", 1, CHANNELS}); + config.inputDefs.back().isStatic = true; + + LayerInputConfig* input = config.layerConfig.add_inputs(); + config.layerConfig.add_inputs(); + config.layerConfig.add_inputs(); + + ImageConfig* img_conf = input->mutable_image_conf(); + img_conf->set_channels(CHANNELS); + img_conf->set_img_size(IMG_SIZE); + img_conf->set_img_size_y(IMG_SIZE_Y); + img_conf->set_img_size_z(IMG_SIZE_Z); + + testLayerGrad(config, + "batch_norm", + 64, + /* trans= */ trans, + useGpu, + /* useWeight */ true); +} + +TEST(Layer, testBatchNorm3DLayer) { + testBatchNorm3DLayer("batch_norm", false, false); +#ifndef PADDLE_ONLY_CPU + testBatchNorm3DLayer("batch_norm", false, true); + if (hl_get_cudnn_lib_version() >= int(4000)) { + testBatchNorm3DLayer("cudnn_batch_norm", false, true); + } +#endif +} + void testConvOperator(bool isDeconv) { TestConfig config; const int NUM_FILTERS = 16; @@ -2008,6 +2057,31 @@ TEST(Layer, CropLayer) { } } +TEST(Layer, SwitchOrderLayer) { + TestConfig config; + // config input_0 + config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 0}); + LayerInputConfig* input = config.layerConfig.add_inputs(); + ImageConfig* img = input->mutable_image_conf(); + img->set_channels(4); + img->set_img_size(16); + img->set_img_size_y(16); + + ReshapeConfig* reshape = config.layerConfig.mutable_reshape_conf(); + reshape->add_height_axis(0); + reshape->add_height_axis(1); + reshape->add_height_axis(2); + reshape->add_width_axis(3); + + // config softmax layer + config.layerConfig.set_type("switch_order"); + config.layerConfig.set_name("switchOrderLayer"); + + for (auto useGpu : {false, true}) { + testLayerGrad(config, "switch_order", 100, false, useGpu, true); + } +} + vector randSampling(real range, int n) { CHECK_GE(range, n); vector num(range); @@ -2228,26 +2302,27 @@ void test3DDeConvLayer(const string& type, bool trans, bool useGpu) { conv->set_stride(2); conv->set_stride_y(2); conv->set_stride_z(2); - conv->set_img_size(IMAGE_SIZE); - conv->set_img_size_y(IMAGE_SIZE_Y); - conv->set_img_size_z(IMAGE_SIZE_Z); - conv->set_output_x(imageSize(conv->img_size(), + conv->set_output_x(IMAGE_SIZE); + conv->set_output_y(IMAGE_SIZE_Y); + conv->set_output_z(IMAGE_SIZE_Z); + + conv->set_img_size(imageSize(conv->output_x(), conv->filter_size(), conv->padding(), conv->stride(), true)); - conv->set_output_y(imageSize(conv->img_size_y(), - conv->filter_size_y(), - conv->padding_y(), - conv->stride_y(), - true)); - conv->set_output_z(imageSize(conv->img_size_z(), - conv->filter_size_z(), - conv->padding_z(), - conv->stride_z(), - true)); - config.layerConfig.set_size(conv->output_x() * conv->output_y() * - conv->output_z() * NUM_FILTERS); + conv->set_img_size_y(imageSize(conv->output_y(), + conv->filter_size_y(), + conv->padding_y(), + conv->stride_y(), + true)); + conv->set_img_size_z(imageSize(conv->output_z(), + conv->filter_size_z(), + conv->padding_z(), + conv->stride_z(), + true)); + config.layerConfig.set_size(conv->img_size() * conv->img_size_y() * + conv->img_size_z() * NUM_FILTERS); conv->set_groups(1); conv->set_filter_channels(conv->channels() / conv->groups()); config.inputDefs.push_back( diff --git a/paddle/math/MKLDNNMatrix.cpp b/paddle/math/MKLDNNMatrix.cpp index 0a355e2644cce572ce90ecf5c9d2a5b7b395bc61..c4063e5069854242d9f93886b66580385557ca73 100644 --- a/paddle/math/MKLDNNMatrix.cpp +++ b/paddle/math/MKLDNNMatrix.cpp @@ -33,14 +33,12 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, memory::primitive_desc pd) { size_t width = cnts / dims[0]; m = Matrix::create(height, width, false, false); } - CHECK(m) << " Matrix should not be empty"; + CpuMatrixPtr cpuMatrix = std::dynamic_pointer_cast(m); CHECK(cpuMatrix) << "Only support create from CPU matrix yet"; - - CHECK_EQ(cnts, m->getElementCnt()) << "Count size does not match"; - return std::make_shared( - m->getData(), m->getHeight(), m->getWidth(), pd); + CHECK_EQ(cpuMatrix->getElementCnt(), cnts) << "Count size does not match"; + return std::make_shared(cpuMatrix, pd); } MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, @@ -138,7 +136,7 @@ void MKLDNNMatrix::downSpatial() { mkldnn_primitive_create(&result, pd.get(), nullptr, nullptr), "could not create a memory primitive"); reset(result); - set_data_handle(getData()); + set_data_handle(data_); } } // namespace paddle diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/math/MKLDNNMatrix.h index e50f698b495713e6f15ab7a12a7ee7487662040f..eef3b429e6fa0087aeac3f5aed9dff983b06e826 100644 --- a/paddle/math/MKLDNNMatrix.h +++ b/paddle/math/MKLDNNMatrix.h @@ -30,11 +30,10 @@ typedef std::shared_ptr MKLDNNMatrixPtr; */ class MKLDNNMatrix : public CpuMatrix, public mkldnn::memory { public: - MKLDNNMatrix(real* data, - size_t height, - size_t width, - mkldnn::memory::primitive_desc pd) - : CpuMatrix(data, height, width, false), mkldnn::memory(pd, data) {} + MKLDNNMatrix(CpuMatrixPtr m, mkldnn::memory::primitive_desc pd) + : CpuMatrix(m->getData(), m->getHeight(), m->getWidth(), false), + mkldnn::memory(pd, m->getData()), + m_(m) {} ~MKLDNNMatrix() {} @@ -81,11 +80,29 @@ public: void downSpatial(); /** - * Update the memory data handle. + * set the memory data handle. * Caution: This will not check the buffer size of the data, * it should be coverd by user. */ - void updateData(void* data) { set_data_handle(data); } + void setData(real* data) { + set_data_handle(data); + CpuMatrix::setData(data); + m_.reset(); + } + + /** + * override Matrix::getData + * check data before return + */ + real* getData() override { + CHECK_EQ((void*)data_, get_data_handle()); + return data_; + } + + const real* getData() const override { + CHECK_EQ((void*)data_, get_data_handle()); + return data_; + } /** * Get primitive descriptor. @@ -143,6 +160,10 @@ protected: memory::format srcFmt, memory::format dstFmt, memory::dims dm); + +private: + // save the CpuMatrixPtr in case the buffer released outside + CpuMatrixPtr m_; }; } // namespace paddle diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp index c8ba1074a1555bbddde7e5f0fb2a046138b27c09..c2f17beeb87942ea681f5d388659c0d280157b26 100644 --- a/paddle/math/MathFunctions.cpp +++ b/paddle/math/MathFunctions.cpp @@ -84,6 +84,7 @@ LAPACK_ROUTINE_EACH(DYNAMIC_LOAD_LAPACK_WRAP) namespace paddle { +#ifndef PADDLE_USE_EIGEN_FOR_BLAS template <> void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, @@ -143,6 +144,7 @@ void gemm(const CBLAS_TRANSPOSE transA, C, ldc); } +#endif template <> int getrf(const CBLAS_ORDER order, @@ -182,6 +184,7 @@ int getri(const CBLAS_ORDER order, return dynload::PADDLE_DGETRI(order, N, A, lda, ipiv); } +#ifndef PADDLE_USE_EIGEN_FOR_BLAS template <> void axpy(const int n, const float alpha, const float* x, float* y) { cblas_saxpy(n, alpha, x, 1, y, 1); @@ -201,6 +204,7 @@ template <> double dotProduct(const int n, const double* x, const double* y) { return cblas_ddot(n, x, 1, y, 1); } +#endif #if defined(PADDLE_USE_MKL) || defined(PADDLE_USE_MKLML) diff --git a/paddle/math/MathFunctions.h b/paddle/math/MathFunctions.h index 410851120b16dcb5828dc1bddb496d76a6132a02..8193aa4adffc0409d8ea68417c68fa153a2942d8 100644 --- a/paddle/math/MathFunctions.h +++ b/paddle/math/MathFunctions.h @@ -40,7 +40,14 @@ extern "C" { #ifndef LAPACK_FOUND extern "C" { +#ifndef PADDLE_USE_EIGEN_FOR_BLAS #include +#else +typedef enum CBLAS_ORDER { + CblasRowMajor = 101, + CblasColMajor = 102 +} CBLAS_ORDER; +#endif int LAPACKE_sgetrf( int matrix_layout, int m, int n, float* a, int lda, int* ipiv); int LAPACKE_dgetrf( @@ -56,6 +63,7 @@ int LAPACKE_dgetri( namespace paddle { +#ifndef PADDLE_USE_EIGEN_FOR_BLAS template void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, @@ -70,6 +78,7 @@ void gemm(const CBLAS_TRANSPOSE transA, const T beta, T* C, const int ldc); +#endif template int getrf(const CBLAS_ORDER Order, @@ -84,10 +93,21 @@ int getri( const CBLAS_ORDER Order, const int N, T* A, const int lda, const int* ipiv); template -void axpy(const int n, const T alpha, const T* x, T* y); +void axpy(const int n, const T alpha, const T* x, T* y) { + /// y = y + alpha * x + for (int i = 0; i < n; i++) { + y[i] = y[i] + alpha * x[i]; + } +} template -T dotProduct(const int n, const T* x, const T* y); +T dotProduct(const int n, const T* x, const T* y) { + T result = static_cast(0); + for (int i = 0; i < n; i++) { + result += x[i] * y[i]; + } + return result; +} template void vExp(const int n, const T* a, T* r); diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 8bc42571f7c141aa31e18d0504b95b2ed4f0da77..4a2132c8d1bfa329ced575f9b78052bdbfe3e4d5 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -28,6 +28,7 @@ limitations under the License. */ #include "hl_top_k.h" #include "paddle/utils/Logging.h" +#include "paddle/function/GemmFunctor.h" #include "paddle/utils/ThreadLocal.h" #include "SIMDFunctions.h" @@ -2773,24 +2774,24 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) { CHECK(!isTransposed()) << "Not supported"; size_t a_col, b_col, a_row, b_row; - CBLAS_TRANSPOSE a_trans, b_trans; + bool a_trans, b_trans; if (!a->isTransposed()) { a_col = a->getWidth(); a_row = a->getHeight(); - a_trans = CblasNoTrans; + a_trans = false; } else { a_col = a->getHeight(); a_row = a->getWidth(); - a_trans = CblasTrans; + a_trans = true; } if (!b->isTransposed()) { b_col = b->getWidth(); b_row = b->getHeight(); - b_trans = CblasNoTrans; + b_trans = false; } else { b_col = b->getHeight(); b_row = b->getWidth(); - b_trans = CblasTrans; + b_trans = true; } CHECK_EQ(a_col, b_row); @@ -2807,7 +2808,7 @@ void CpuMatrix::mul(CpuMatrix* a, CpuMatrix* b, real scaleAB, real scaleT) { int lda = a->getStride(); int ldb = b->getStride(); int ldc = getStride(); - gemm( + BlasGemm::compute( a_trans, b_trans, M, N, K, scaleAB, A, lda, B, ldb, scaleT, C, ldc); } diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 431d4e071072317c8fdfdc4f0d13e7cd4e3d062b..44180bca8bca53e74d71ce7bed3516399c01c81d 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -1616,6 +1616,10 @@ public: }; class CpuMatrix : public Matrix { +private: + MatrixPtr sftmaxSum_; + MatrixPtr sftmaxDot_; + public: CpuMatrix(size_t height, size_t width, bool trans = false); CpuMatrix(real* data, size_t height, size_t width, bool trans = false) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index e5efcccb0e219a1c9df888cfec7f8902806676d4..f9ea25ab045a02be5ab9ed81ef9c679126d3a188 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -14,27 +14,31 @@ function(op_library TARGET) cmake_parse_arguments(op_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - foreach(src ${op_library_SRCS}) - if (${src} MATCHES ".*\\.cu$") - list(APPEND cu_srcs ${src}) - elseif(${src} MATCHES ".*\\.cc$") - list(APPEND cc_srcs ${src}) - else() - message(FATAL_ERROR "${TARGET} Source file ${src} should only be .cc or .cu") + list(LENGTH op_library_SRCS op_library_SRCS_len) + if (${op_library_SRCS_len} EQUAL 0) + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cc) + list(APPEND cc_srcs ${TARGET}.cc) endif() - endforeach() + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu) + list(APPEND cu_srcs ${TARGET}.cu) + endif() + else() + foreach(src ${op_library_SRCS}) + if (${src} MATCHES ".*\\.cu$") + list(APPEND cu_srcs ${src}) + elseif(${src} MATCHES ".*\\.cc$") + list(APPEND cc_srcs ${src}) + else() + message(FATAL_ERROR "${TARGET} Source file ${src} should only be .cc or .cu") + endif() + endforeach() + endif() list(LENGTH cc_srcs cc_srcs_len) if (${cc_srcs_len} EQUAL 0) message(FATAL_ERROR "The op library ${TARGET} should contains at least one .cc file") endif() - list(LENGTH cu_srcs cu_srcs_len) - list(LENGTH op_library_DEPS dep_len) - if (${cu_srcs_len} EQUAL 0 AND ${dep_len} EQUAL 0) - message(WARNING "The op library ${TARGET} not support GPU!") - endif() - if (WITH_GPU) nv_library(${TARGET} SRCS ${cc_srcs} ${cu_srcs} DEPS ${op_library_DEPS} ${op_common_deps}) @@ -46,22 +50,22 @@ endfunction() add_subdirectory(math) -list(REMOVE_ITEM GENERAL_OPS - net_op - minus_op - mul_op - recurrent_op - scale_op) - -op_library(net_op SRCS net_op.cc) -op_library(minus_op SRCS minus_op.cc minus_op.cu DEPS scale_op) -op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function) -op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc +set(DEPS_OPS + identity_op + minus_op + mul_op + recurrent_op + scale_op) +op_library(identity_op DEPS scale_op) +op_library(minus_op DEPS scale_op) +op_library(mul_op DEPS math_function) +op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc DEPS framework_proto tensor operator net_op) -op_library(scale_op SRCS scale_op.cc scale_op.cu DEPS net_op) +op_library(scale_op DEPS net_op) +list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) - op_library(${src} SRCS ${src}.cc ${src}.cu) + op_library(${src}) endforeach() set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index 6384d8c8ce13dae8b58ed1069d496dd8e93eaa8a..8dbd47cf0dfbc265032a9966343eed5c7bd8692e 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -57,7 +57,6 @@ class AddOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker, ops::AddOpGrad); +REGISTER_OP(add, ops::AddOp, ops::AddOpMaker, add_grad, ops::AddOpGrad); -REGISTER_OP_CPU_KERNEL(add_two, - ops::AddKernel); +REGISTER_OP_CPU_KERNEL(add, ops::AddKernel); diff --git a/paddle/operators/add_op.cu b/paddle/operators/add_op.cu index cec5f558cbc161124620ad4241d6bd8a5324277c..d9c6d20a6c320b59e57ed25da3dd8b093833f8c7 100644 --- a/paddle/operators/add_op.cu +++ b/paddle/operators/add_op.cu @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#define EIGEN_USE_GPU -#include "paddle/framework/op_registry.h" #include "paddle/operators/add_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(add_two, - ops::AddKernel); +REGISTER_OP_GPU_KERNEL(add, ops::AddKernel); diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..0ebefbab26ec8fdf316f852fbb7f6d9f3bbc48eb --- /dev/null +++ b/paddle/operators/concat_op.cc @@ -0,0 +1,79 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/concat_op.h" +#include + +namespace paddle { +namespace operators { +using framework::Tensor; + +class ConcatOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto ins = ctx.MultiInput("X"); + auto *out = ctx.Output("Out"); + size_t axis = static_cast(ctx.Attr("axis")); + size_t n = ins.size(); + + PADDLE_ENFORCE_GT(n, 1, "Input tensors count should > 1."); + + auto out_dims = ins[0]->dims(); + size_t in_zero_dims_size = out_dims.size(); + for (size_t i = 1; i < n; i++) { + for (size_t j = 0; j < in_zero_dims_size; j++) { + if (j == axis) { + out_dims[axis] += ins[i]->dims()[j]; + continue; + } + PADDLE_ENFORCE_EQ(out_dims[j], ins[i]->dims()[j], + "Input tensors should have the same " + "elements except the specify axis.") + } + } + out->Resize(out_dims); + } +}; + +class ConcatOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ConcatOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "the input tensors of concat operator.").AsDuplicable(); + AddOutput("Out", "the output tensor of concat operator."); + AddComment(R"DOC( + Join the input tensors along with the axis. + Examples: + Input[0] = [[1,2],[3,4]] + Input[1] = [[5,6]] + axis = 0 + Output = [[1,2], + [3,4], + [5,6]] + )DOC"); + AddAttr("axis", "The axis which the inputs will be joined with.") + .SetDefault(0); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(concat, ops::ConcatOp, ops::ConcatOpMaker) +REGISTER_OP_CPU_KERNEL(concat, + ops::ConcatKernel) diff --git a/paddle/operators/scatter_op.cu b/paddle/operators/concat_op.cu similarity index 75% rename from paddle/operators/scatter_op.cu rename to paddle/operators/concat_op.cu index 6716b478833ff3adb6112cdb1ee25b7f1744ea1f..38fee7473dbb2ba97fe95b6632db7a1749cf3bbe 100644 --- a/paddle/operators/scatter_op.cu +++ b/paddle/operators/concat_op.cu @@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,8 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/scatter_op.h" +#include "paddle/operators/concat_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(scatter, - ops::ScatterOpKernel); +// TODO(Yancey1989) Add GPU kernel diff --git a/paddle/operators/concat_op.h b/paddle/operators/concat_op.h new file mode 100644 index 0000000000000000000000000000000000000000..f977054fdf8aa0164db726b94a21c57f770dd674 --- /dev/null +++ b/paddle/operators/concat_op.h @@ -0,0 +1,64 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class ConcatKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto ins = ctx.MultiInput("X"); + auto* out = ctx.Output("Out"); + int64_t axis = static_cast(ctx.Attr("axis")); + size_t n = ins.size(); + size_t output_axis_dim = 0; + size_t before = 1, after = 1; + for (size_t i = 0; i < n; i++) { + output_axis_dim += ins[i]->dims()[axis]; + } + auto& input_zero = ins[0]; + for (int64_t i = 0; i < input_zero->dims().size(); i++) { + if (i == axis) { + continue; + } + if (i < axis) { + before *= input_zero->dims()[i]; + } else { + after *= input_zero->dims()[i]; + } + } + size_t output_offset = 0; + for (size_t i = 0; i < n; i++) { + auto& in = ins[i]; + auto axis_dim = in->dims()[axis]; + for (size_t j = 0; j < before; j++) { + size_t len = axis_dim * after * sizeof(T); + const T* src = in->data() + axis_dim * after * j; + T* out_data = out->mutable_data(platform::CPUPlace()); + T* dest = out_data + output_offset + output_axis_dim * after * j; + memcpy(dest, src, len); + } + output_offset += axis_dim * after; + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..c033af3b741ae26ad9d37b2164f87aa6e8651c6e --- /dev/null +++ b/paddle/operators/cos_sim_op.cc @@ -0,0 +1,107 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/cos_sim_op.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class CosSimOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null."); + PADDLE_ENFORCE_EQ(ctx.Input("X")->dims(), + ctx.Input("Y")->dims(), + "Dimensions of Input(X) and Input(Y) must be the same."); + + auto dims = ctx.Input("X")->dims(); + ctx.Output("Out")->Resize({dims[0], 1}); + ctx.Output("XNorm")->Resize({dims[0], 1}); + ctx.Output("YNorm")->Resize({dims[0], 1}); + } +}; + +class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { + public: + CosSimOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The first input of cos_sim op."); + AddInput("Y", "The second input of cos_sim op."); + AddOutput("Out", "The output of cos_sim op."); + AddOutput("XNorm", "Row norm of the first input.").AsIntermediate(); + AddOutput("YNorm", "Row norm of the second input.").AsIntermediate(); + + AddComment(R"DOC( +Cosine Similarity Operator. + +The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)) +)DOC"); + } +}; + +class CosSimOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("XNorm"), + "Input(XNorm) must not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("YNorm"), + "Input(YNorm) must not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Input(Out@GRAD) must not be null."); + + auto x_dims = ctx.Input("X")->dims(); + auto y_dims = ctx.Input("Y")->dims(); + auto xnorm_dims = ctx.Input("XNorm")->dims(); + auto ynorm_dims = ctx.Input("YNorm")->dims(); + auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); + PADDLE_ENFORCE_EQ(x_dims, y_dims, + "Dimensions of Input(X) and Input(Y) must be the same."); + PADDLE_ENFORCE_EQ(xnorm_dims[0], x_dims[0], + "1st dimension of XNorm must equal that of Input(X)."); + PADDLE_ENFORCE_EQ(xnorm_dims[1], 1, "2st dimension of XNorm must be one."); + PADDLE_ENFORCE_EQ(ynorm_dims[0], y_dims[0], + "1st dimension of YNorm must equal that of Input(Y)."); + PADDLE_ENFORCE_EQ(ynorm_dims[1], 1, "2st dimension of YNorm must be one."); + PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0], + "1st dimension of Out@GRAD must equal that of Input(X)"); + PADDLE_ENFORCE_EQ(out_dims[1], 1, "1st dimension of Out@GRAD must be one."); + + auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *y_grad = ctx.Output(framework::GradVarName("Y")); + if (x_grad) x_grad->Resize(x_dims); + if (y_grad) y_grad->Resize(y_dims); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(cos_sim, ops::CosSimOp, ops::CosSimOpMaker, cos_sim_grad, + ops::CosSimOpGrad); +REGISTER_OP_CPU_KERNEL(cos_sim, + ops::CosSimKernel); +REGISTER_OP_CPU_KERNEL( + cos_sim_grad, ops::CosSimGradKernel); diff --git a/paddle/operators/gather_op.cu b/paddle/operators/cos_sim_op.cu similarity index 72% rename from paddle/operators/gather_op.cu rename to paddle/operators/cos_sim_op.cu index 3f04a7b3f8142106917975cd1e0413fa1633a298..0cb8fd26de47a4a464db98664263544e3e503d63 100644 --- a/paddle/operators/gather_op.cu +++ b/paddle/operators/cos_sim_op.cu @@ -13,8 +13,10 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/gather_op.h" +#include "paddle/operators/cos_sim_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(gather, - ops::GatherOpKernel); +REGISTER_OP_GPU_KERNEL(cos_sim, + ops::CosSimKernel); +REGISTER_OP_GPU_KERNEL( + cos_sim_grad, ops::CosSimGradKernel); diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h new file mode 100644 index 0000000000000000000000000000000000000000..0dc509952578497671a128374f77ce616a520909 --- /dev/null +++ b/paddle/operators/cos_sim_op.h @@ -0,0 +1,107 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenMatrix = framework::EigenMatrix; +template +using EigenVector = framework::EigenVector; + +template +class CosSimKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* input_x = context.Input("X"); + auto* input_y = context.Input("Y"); + auto* output_z = context.Output("Out"); + auto* output_x_norm = context.Output("XNorm"); + auto* output_y_norm = context.Output("YNorm"); + + output_z->mutable_data(context.GetPlace()); + output_x_norm->mutable_data(context.GetPlace()); + output_y_norm->mutable_data(context.GetPlace()); + + auto dims = input_x->dims(); + int64_t size = input_x->numel(); + auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); + auto x = EigenMatrix::From(*input_x, new_dims); + auto y = EigenMatrix::From(*input_y, new_dims); + auto z = EigenVector::Flatten(*output_z); + auto x_norm = EigenVector::Flatten(*output_x_norm); + auto y_norm = EigenVector::Flatten(*output_y_norm); + + auto place = context.GetEigenDevice(); + auto xy = (x * y).sum(Eigen::array({{1}})); + x_norm.device(place) = x.square().sum(Eigen::array({{1}})).sqrt(); + y_norm.device(place) = y.square().sum(Eigen::array({{1}})).sqrt(); + z.device(place) = xy / x_norm / y_norm; + } +}; + +template +class CosSimGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* input_x = context.Input("X"); + auto* input_y = context.Input("Y"); + auto* input_z = context.Input("Out"); + auto* input_x_norm = context.Input("XNorm"); + auto* input_y_norm = context.Input("YNorm"); + auto* output_grad_x = context.Output(framework::GradVarName("X")); + auto* output_grad_y = context.Output(framework::GradVarName("Y")); + auto* input_grad_z = context.Input(framework::GradVarName("Out")); + + auto dims = input_x->dims(); + int64_t size = input_x->numel(); + auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); + auto x = EigenMatrix::From(*input_x, new_dims); + auto y = EigenMatrix::From(*input_y, new_dims); + auto z = EigenMatrix::From(*input_z); + auto x_norm = EigenMatrix::From(*input_x_norm); + auto y_norm = EigenMatrix::From(*input_y_norm); + auto dz = EigenMatrix::From(*input_grad_z); + + Eigen::DSizes bcast(1, new_dims[1]); + auto z_bcast = z.broadcast(bcast); + auto dz_bcast = dz.broadcast(bcast); + auto place = context.GetEigenDevice(); + auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast); + auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast); + auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast); + if (output_grad_x) { + output_grad_x->mutable_data(context.GetPlace()); + auto dx = EigenMatrix::From(*output_grad_x, new_dims); + dx.device(place) = + dz_bcast * (y / norm_prod_bcast - z_bcast * x / x_snorm_bcast); + } + if (output_grad_y) { + output_grad_y->mutable_data(context.GetPlace()); + auto dy = EigenMatrix::From(*output_grad_y, new_dims); + dy.device(place) = + dz_bcast * (x / norm_prod_bcast - z_bcast * y / y_snorm_bcast); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index ac76326262c88e2014cf64f7fb73b5a7338ab3e9..ab1e1c101a10e09a81f7785d2f1514822e3bdf15 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -67,7 +67,8 @@ OnehotCrossEntropy Operator. namespace ops = paddle::operators; REGISTER_OP(onehot_cross_entropy, ops::OnehotCrossEntropyOp, - ops::OnehotCrossEntropyOpMaker, ops::OnehotCrossEntropyGradientOp); + ops::OnehotCrossEntropyOpMaker, onehot_cross_entropy_grad, + ops::OnehotCrossEntropyGradientOp); REGISTER_OP_CPU_KERNEL(onehot_cross_entropy, ops::OnehotCrossEntropyOpKernel); REGISTER_OP_CPU_KERNEL(onehot_cross_entropy_grad, diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index 07fa704824174f939e459093b245036771d9cd4f..123bed296c462c30bddd3bfbd530098fdbfe4856 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -63,7 +63,8 @@ Out = X[Index] } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(gather, ops::GatherOp, ops::GatherOpMaker, ops::GatherGradOp); +REGISTER_OP(gather, ops::GatherOp, ops::GatherOpMaker, gather_grad, + ops::GatherGradOp); REGISTER_OP_CPU_KERNEL(gather, ops::GatherOpKernel); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index a85363ad81d2a23e7267026c067f74f8c94c4786..3d76516405960c502a46997108049b2db5cab6bf 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -19,21 +19,20 @@ template class CPUGaussianRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - float mean = context.op_.GetAttr("mean"); - float std = context.op_.GetAttr("std"); + float mean = context.Attr("mean"); + float std = context.Attr("std"); auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); - unsigned int seed = - static_cast(context.op_.GetAttr("seed")); + unsigned int seed = static_cast(context.Attr("seed")); std::minstd_rand engine; if (seed == 0) { seed = std::random_device()(); } engine.seed(seed); std::normal_distribution dist(mean, std); - ssize_t size = framework::product(tensor->dims()); - for (ssize_t i = 0; i < size; ++i) { + int64_t size = tensor->numel(); + for (int64_t i = 0; i < size; ++i) { data[i] = dist(engine); } } @@ -46,10 +45,15 @@ class GaussianRandomOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& context) const override { auto* tensor = context.Output("Out"); - auto dims = GetAttr>("dims"); + auto dims = Attr>("dims"); + std::vector temp; + temp.reserve(dims.size()); + for (auto dim : dims) { + temp.push_back(static_cast(dim)); + } PADDLE_ENFORCE(dims.size() > 0UL, "dims can be one int or array. dims must be set."); - tensor->Resize(framework::make_ddim(dims)); + tensor->Resize(framework::make_ddim(temp)); } }; diff --git a/paddle/operators/gaussian_random_op.cu b/paddle/operators/gaussian_random_op.cu index 018a4bfcb26b9008c054000c91edf01e371fd82b..2d63b3049988cfc3135a87a57dad56b970df3eab 100644 --- a/paddle/operators/gaussian_random_op.cu +++ b/paddle/operators/gaussian_random_op.cu @@ -42,17 +42,16 @@ class GPUGaussianRandomKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); - unsigned int seed = - static_cast(context.op_.GetAttr("seed")); + unsigned int seed = static_cast(context.Attr("seed")); if (seed == 0) { std::random_device rd; seed = rd(); } - T mean = static_cast(context.op_.GetAttr("mean")); - T std = static_cast(context.op_.GetAttr("std")); + T mean = static_cast(context.Attr("mean")); + T std = static_cast(context.Attr("std")); thrust::counting_iterator index_sequence_begin(0); - ssize_t N = framework::product(tensor->dims()); - thrust::transform(index_sequence_begin, index_sequence_begin + N, + int64_t size = tensor->numel(); + thrust::transform(index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data), GaussianGenerator(mean, std, seed)); } diff --git a/paddle/operators/identity_op.cc b/paddle/operators/identity_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..7d9d4fa519d1c690feacbadc5175aeab49082282 --- /dev/null +++ b/paddle/operators/identity_op.cc @@ -0,0 +1,57 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/net_op.h" +#include "paddle/operators/scale_op.h" + +namespace paddle { +namespace operators { + +// The identity operator is an alias of the scale operator. This is also an +// example for creating an alias for an existing operator. +template +class IdentityOpMaker : public framework::OpProtoAndCheckerMaker { + public: + IdentityOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input tensor of identity operator."); + AddOutput("Out", "The output tensor of identity operator."); + AddComment(R"DOC( +The identity operator is an alias of the scale operator +with the attribute scale fixed to 1.0. +)DOC"); + } +}; + +template +class IdentityOp : public NetOp { + public: + IdentityOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : NetOp(type, inputs, outputs, attrs) { + AppendOp(framework::OpRegistry::CreateOp( + "scale", {{"X", {Input("X")}}}, {{"Out", {Output("Out")}}}, + {{"scale", static_cast(1)}})); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_WITHOUT_GRADIENT(identity, ops::IdentityOp, + ops::IdentityOpMaker); diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index c3108ba8ec7ad85bd3485c135bf03e514bc66cd1..94d40890a765413e88a35a6ad995ca97ac84dcda 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -66,7 +66,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(lookup_table, ops::LookupTableOp, ops::LookupTableOpMaker, - ops::LookupTableOpGrad); + lookup_table_grad, ops::LookupTableOpGrad); REGISTER_OP_CPU_KERNEL(lookup_table, ops::LookupTableKernel); REGISTER_OP_CPU_KERNEL(lookup_table_grad, ops::LookupTableGradKernel); diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index 27eee3436af8107cef2aa3577ea238be49edf1af..708344046760691aa2da562eb1ee3d8b130c5f18 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -70,7 +70,7 @@ class LookupTableCUDAKernel : public framework::OpKernel { size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; - size_t K = product(ids_t->dims()); + size_t K = ids_t->numel(); auto ids = ids_t->data(); auto table = table_t->data(); auto output = output_t->mutable_data(context.GetPlace()); @@ -91,7 +91,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; - int K = product(ids_t->dims()); + int K = ids_t->numel(); const int32_t* ids = ids_t->data(); const T* d_output = d_output_t->data(); T* d_table = d_table_t->mutable_data(context.GetPlace()); diff --git a/paddle/operators/lookup_table_op.h b/paddle/operators/lookup_table_op.h index 4da8079b91624c3510cae89fd599a7035a4c7477..a1298906dd4b4209644fe06584f70169519de01c 100644 --- a/paddle/operators/lookup_table_op.h +++ b/paddle/operators/lookup_table_op.h @@ -30,12 +30,12 @@ class LookupTableKernel : public framework::OpKernel { auto ids_t = context.Input("Ids"); // int tensor auto output_t = context.Output("Out"); // float tensor - size_t N = table_t->dims()[0]; - size_t D = table_t->dims()[1]; + int N = table_t->dims()[0]; + int D = table_t->dims()[1]; auto ids = ids_t->data(); auto table = table_t->data(); auto output = output_t->mutable_data(context.GetPlace()); - for (size_t i = 0; i < product(ids_t->dims()); ++i) { + for (int64_t i = 0; i < ids_t->numel(); ++i) { PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_GE(ids[i], 0); memcpy(output + i * D, table + ids[i] * D, D * sizeof(T)); @@ -51,8 +51,8 @@ class LookupTableGradKernel : public framework::OpKernel { auto d_output_t = context.Input(framework::GradVarName("Out")); auto d_table_t = context.Output(framework::GradVarName("W")); - size_t N = d_table_t->dims()[0]; - size_t D = d_table_t->dims()[1]; + int N = d_table_t->dims()[0]; + int D = d_table_t->dims()[1]; auto ids = ids_t->data(); const T* d_output = d_output_t->data(); T* d_table = d_table_t->mutable_data(context.GetPlace()); @@ -61,10 +61,10 @@ class LookupTableGradKernel : public framework::OpKernel { t.device(context.GetEigenDevice()) = t.constant(static_cast(0)); - for (size_t i = 0; i < product(ids_t->dims()); ++i) { + for (int64_t i = 0; i < ids_t->numel(); ++i) { PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_GE(ids[i], 0); - for (size_t j = 0; j < D; ++j) { + for (int j = 0; j < D; ++j) { d_table[ids[i] * D + j] += d_output[i * D + j]; } } diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index ed51d416ed9497eee45ba826ad672b8fb1ad3678..f8333f34f7b4c7b0f9a0f14a7a33f9d98e1d331c 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -1,8 +1,10 @@ if(WITH_GPU) - nv_library(math_function SRCS math_function.cc math_function.cu DEPS cblas device_context) + nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc + im2col.cu DEPS cblas device_context) else() - cc_library(math_function SRCS math_function.cc DEPS cblas device_context) + cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context) endif() nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) +cc_test(im2col_test SRCS im2col_test.cc DEPS math_function tensor) diff --git a/paddle/operators/math/im2col.cc b/paddle/operators/math/im2col.cc new file mode 100644 index 0000000000000000000000000000000000000000..5727c1cab16c1379ffe77f5594c057e93a042785 --- /dev/null +++ b/paddle/operators/math/im2col.cc @@ -0,0 +1,260 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/im2col.h" + +namespace paddle { +namespace operators { +namespace math { + +/* + * im = [input_channels, input_height, input_width] + * col = + * [input_channels, filter_height, filter_width, output_height, output_width] + */ +template +class Im2ColFunctor { + public: + void operator()(const framework::Tensor& im, framework::Tensor& col, + int stride_height, int stride_width, int padding_height, + int padding_width, platform::DeviceContext* context) { + PADDLE_ENFORCE(im.dims().size() == 3); + PADDLE_ENFORCE(col.dims().size() == 5); + + int input_channels = im.dims()[0]; + int input_height = im.dims()[1]; + int input_width = im.dims()[2]; + int filter_height = col.dims()[1]; + int filter_width = col.dims()[2]; + int output_height = col.dims()[3]; + int output_width = col.dims()[4]; + int channels_col = input_channels * filter_height * filter_width; + + const T* im_data = im.data(); + T* col_data = col.data(); + + for (int c = 0; c < channels_col; ++c) { + int w_offset = c % filter_width; + int h_offset = (c / filter_width) % filter_height; + int c_im = c / filter_width / filter_height; + for (int h = 0; h < output_height; ++h) { + for (int w = 0; w < output_width; ++w) { + int im_row_idx = h * stride_height + h_offset; + int im_col_idx = w * stride_width + w_offset; + if ((im_row_idx - padding_height) < 0 || + (im_row_idx - padding_height) >= input_height || + (im_col_idx - padding_width) < 0 || + (im_col_idx - padding_width) >= input_width) { + col_data[(c * output_height + h) * output_width + w] = T(0); + } else { + im_row_idx += c_im * input_height - padding_height; + im_col_idx -= padding_width; + col_data[(c * output_height + h) * output_width + w] = + im_data[im_row_idx * input_width + im_col_idx]; + } + } + } + } + } +}; + +/* + * im = [input_channels, input_height, input_width] + * col = + * [input_channels, filter_height, filter_width, output_height, output_width] + */ +template +class Col2ImFunctor { + public: + void operator()(framework::Tensor& im, const framework::Tensor& col, + int stride_height, int stride_width, int padding_height, + int padding_width, platform::DeviceContext* context) { + PADDLE_ENFORCE(im.dims().size() == 3); + PADDLE_ENFORCE(col.dims().size() == 5); + int input_channels = im.dims()[0]; + int input_height = im.dims()[1]; + int input_width = im.dims()[2]; + int filter_height = col.dims()[1]; + int filter_width = col.dims()[2]; + int output_height = col.dims()[3]; + int output_width = col.dims()[4]; + int channels_col = input_channels * filter_height * filter_width; + + T* im_data = im.data(); + const T* col_data = col.data(); + + for (int c = 0; c < channels_col; ++c) { + int w_offset = c % filter_width; + int h_offset = (c / filter_width) % filter_height; + int c_im = c / filter_width / filter_height; + for (int h = 0; h < output_height; ++h) { + for (int w = 0; w < output_width; ++w) { + int im_row_idx = h * stride_height + h_offset; + int im_col_idx = w * stride_width + w_offset; + if ((im_row_idx - padding_height) >= 0 && + (im_row_idx - padding_height) < input_height && + (im_col_idx - padding_width) >= 0 && + (im_col_idx - padding_width) < input_width) { + im_row_idx += c_im * input_height - padding_height; + im_col_idx -= padding_width; + im_data[im_row_idx * input_width + im_col_idx] += + col_data[(c * output_height + h) * output_width + w]; + } + } + } + } + } +}; + +template class Im2ColFunctor; +template class Im2ColFunctor; +template class Col2ImFunctor; +template class Col2ImFunctor; + +/* + * im = [input_channels, input_height, input_width] + * col = + * [output_height, output_width, input_channels, filter_height, filter_width] + */ +template +class Im2ColFunctor { + public: + void operator()(const framework::Tensor& im, framework::Tensor& col, + int stride_height, int stride_width, int padding_height, + int padding_width, platform::DeviceContext* context) { + PADDLE_ENFORCE(im.dims().size() == 3); + PADDLE_ENFORCE(col.dims().size() == 5); + int input_channels = im.dims()[0]; + int input_height = im.dims()[1]; + int input_width = im.dims()[2]; + int filter_height = col.dims()[3]; + int filter_width = col.dims()[4]; + int output_height = col.dims()[0]; + int output_width = col.dims()[1]; + + const T* im_data = im.data(); + T* col_data = col.data(); + + for (int col_row_idx = 0; col_row_idx < output_height; ++col_row_idx) { + for (int col_col_idx = 0; col_col_idx < output_width; ++col_col_idx) { + for (int channel = 0; channel < input_channels; ++channel) { + for (int filter_row_idx = 0; filter_row_idx < filter_height; + ++filter_row_idx) { + for (int filter_col_idx = 0; filter_col_idx < filter_width; + ++filter_col_idx) { + int im_row_offset = + col_row_idx * stride_height + filter_row_idx - padding_height; + int im_col_offset = + col_col_idx * stride_width + filter_col_idx - padding_width; + int col_offset = (((col_row_idx * output_width + col_col_idx) * + input_channels + + channel) * + filter_height + + filter_row_idx) * + filter_width + + filter_col_idx; + if (im_row_offset < 0 || im_row_offset >= input_height || + im_col_offset < 0 || im_col_offset >= input_width) { + col_data[col_offset] = T(0); + } else { + int im_offset = + (channel * input_height + im_row_offset) * input_width + + im_col_offset; + col_data[col_offset] = im_data[im_offset]; + } + } + } + } + } + } + } +}; + +/* + * im = [input_channels, input_height, input_width] + * col = + * [output_height, output_width, input_channels, filter_height, filter_width] + */ +template +class Col2ImFunctor { + public: + void operator()(framework::Tensor& im, const framework::Tensor& col, + int stride_height, int stride_width, int padding_height, + int padding_width, platform::DeviceContext* context) { + PADDLE_ENFORCE(im.dims().size() == 3); + PADDLE_ENFORCE(col.dims().size() == 5); + int input_channels = im.dims()[0]; + int input_height = im.dims()[1]; + int input_width = im.dims()[2]; + int filter_height = col.dims()[3]; + int filter_width = col.dims()[4]; + int output_height = col.dims()[0]; + int output_width = col.dims()[1]; + + T* im_data = im.data(); + const T* col_data = col.data(); + + for (int col_row_idx = 0; col_row_idx < output_height; ++col_row_idx) { + for (int col_col_idx = 0; col_col_idx < output_width; ++col_col_idx) { + for (int channel = 0; channel < input_channels; ++channel) { + for (int filter_row_idx = 0; filter_row_idx < filter_height; + ++filter_row_idx) { + for (int filter_col_idx = 0; filter_col_idx < filter_width; + ++filter_col_idx) { + int im_row_offset = + col_row_idx * stride_height + filter_row_idx - padding_height; + int im_col_offset = + col_col_idx * stride_width + filter_col_idx - padding_width; + int col_offset = (((col_row_idx * output_width + col_col_idx) * + input_channels + + channel) * + filter_height + + filter_row_idx) * + filter_width + + filter_col_idx; + if (im_row_offset >= 0 && im_row_offset < input_height && + im_col_offset >= 0 && im_col_offset < input_width) { + int im_offset = + (channel * input_height + im_row_offset) * input_width + + im_col_offset; + im_data[im_offset] += col_data[col_offset]; + } + } + } + } + } + } + } +}; + +template class Im2ColFunctor; +template class Im2ColFunctor; +template class Col2ImFunctor; +template class Col2ImFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/im2col.cu b/paddle/operators/math/im2col.cu new file mode 100644 index 0000000000000000000000000000000000000000..9bff7bee3c95093852305d392af0949b831e5665 --- /dev/null +++ b/paddle/operators/math/im2col.cu @@ -0,0 +1,374 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/im2col.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { +namespace math { + +template +__global__ void im2col(const T* data_im, int num_outs, int height, int width, + int filter_height, int filter_width, int stride_height, + int stride_width, int padding_height, int padding_width, + int output_height, int output_width, T* data_col) { + int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; + if (index < num_outs) { + int w_out = index % output_width; + index /= output_width; + int h_out = index % output_height; + int channel_in = index / output_height; + int channel_out = channel_in * filter_height * filter_width; + int h_in = h_out * stride_height; + int w_in = w_out * stride_width; + + data_col += (channel_out * output_height + h_out) * output_width + w_out; + for (int i = 0; i < filter_height; ++i) { + for (int j = 0; j < filter_width; ++j) { + int rIdx = int(h_in + i); + int cIdx = int(w_in + j); + if ((rIdx - (int)padding_height) >= (int)height || + (rIdx - (int)padding_height) < 0 || + (cIdx - (int)padding_width) >= (int)width || + (cIdx - (int)padding_width) < 0) { + *data_col = 0; + } else { + rIdx = rIdx + channel_in * height - padding_height; + cIdx = cIdx - padding_width; + *data_col = data_im[rIdx * width + cIdx]; + } + data_col += output_height * output_width; + } + } + } +} + +/* + * im = [input_channels, input_height, input_width] + * col = + * [input_channels, filter_height, filter_width, output_height, output_width] + */ +template +class Im2ColFunctor { + public: + void operator()(const framework::Tensor& im, framework::Tensor& col, + int stride_height, int stride_width, int padding_height, + int padding_width, platform::DeviceContext* context) { + PADDLE_ENFORCE(im.dims().size() == 3); + PADDLE_ENFORCE(col.dims().size() == 5); + + int input_channels = im.dims()[0]; + int input_height = im.dims()[1]; + int input_width = im.dims()[2]; + int filter_height = col.dims()[1]; + int filter_width = col.dims()[2]; + int output_height = col.dims()[3]; + int output_width = col.dims()[4]; + + int num_outputs = input_channels * output_height * output_width; + int blocks = (num_outputs + 1024 - 1) / 1024; + int block_x = 512; + int block_y = (blocks + 512 - 1) / 512; + dim3 threads(1024, 1); + dim3 grid(block_x, block_y); + im2col<<< + grid, threads, 0, + reinterpret_cast(context)->stream()>>>( + im.data(), num_outputs, input_height, input_width, filter_height, + filter_width, stride_height, stride_width, padding_height, + padding_width, output_height, output_width, col.data()); + } +}; + +template +__global__ void col2im(size_t n, const T* data_col, size_t height, size_t width, + size_t channels, size_t filter_height, + size_t filter_width, size_t stride_height, + size_t stride_width, size_t padding_height, + size_t padding_width, size_t output_height, + size_t output_width, T* data_im) { + size_t index = + (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; + if (index < n) { + T val = 0; + int w = int(index % width); + int h = int((index / width) % height); + int c = int(index / (width * height)); + if ((w - (int)padding_width) >= 0 && + (w - (int)padding_width) < (width - 2 * padding_width) && + (h - (int)padding_height) >= 0 && + (h - padding_height) < (height - 2 * padding_height)) { + // compute the start and end of the output + int w_col_start = (w < (int)filter_width) + ? 0 + : (w - int(filter_width)) / (int)stride_width + 1; + int w_col_end = + min((int)(w / (int)stride_width + 1), (int)(output_width)); + int h_col_start = (h < (int)filter_height) + ? 0 + : (h - (int)filter_height) / (int)stride_height + 1; + int h_col_end = min(int(h / stride_height + 1), int(output_height)); + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + // the col location: [c * width * height + h_out, w_out] + int c_col = int(c * filter_height * filter_width) + + (h - h_col * (int)stride_height) * (int)filter_width + + (w - w_col * (int)stride_width); + val += + data_col[(c_col * output_height + h_col) * output_width + w_col]; + } + } + h -= padding_height; + w -= padding_width; + data_im[c * ((width - 2 * padding_width) * + (height - 2 * padding_height)) + + h * (width - 2 * padding_width) + w] += val; + } + } +} + +/* + * im = [input_channels, input_height, input_width] + * col = + * [input_channels, filter_height, filter_width, output_height, output_width] + */ +template +class Col2ImFunctor { + public: + void operator()(framework::Tensor& im, const framework::Tensor& col, + int stride_height, int stride_width, int padding_height, + int padding_width, platform::DeviceContext* context) { + PADDLE_ENFORCE(im.dims().size() == 3); + PADDLE_ENFORCE(col.dims().size() == 5); + + int input_channels = im.dims()[0]; + int input_height = im.dims()[1]; + int input_width = im.dims()[2]; + int filter_height = col.dims()[1]; + int filter_width = col.dims()[2]; + int output_height = col.dims()[3]; + int output_width = col.dims()[4]; + + size_t num_kernels = input_channels * (input_height + 2 * padding_height) * + (input_width + 2 * padding_width); + + size_t blocks = (num_kernels + 1024 - 1) / 1024; + size_t block_x = 512; + size_t block_y = (blocks + 512 - 1) / 512; + dim3 threads(1024, 1); + dim3 grid(block_x, block_y); + + // To avoid involving atomic operations, we will launch one kernel per + // bottom dimension, and then in the kernel add up the top dimensions. + col2im<<< + grid, threads, 0, + reinterpret_cast(context)->stream()>>>( + num_kernels, col.data(), input_height + 2 * padding_height, + input_width + 2 * padding_width, input_channels, filter_height, + filter_width, stride_height, stride_width, padding_height, + padding_width, output_height, output_width, im.data()); + } +}; + +template class Im2ColFunctor; +template class Im2ColFunctor; +template class Col2ImFunctor; +template class Col2ImFunctor; + +template +__global__ void im2colOCF(const T* im_data, T* col_data, int input_channels, + int input_height, int input_width, int filter_height, + int filter_width, int stride_height, int stride_width, + int padding_height, int padding_width, + int output_height, int output_width) { + int swid = blockIdx.x; + int shid = blockIdx.y; + for (int channelid = threadIdx.z; channelid < input_channels; + channelid += blockDim.z) { + for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) { + for (int idx = threadIdx.x; idx < filter_width; idx += blockDim.x) { + int width_offset = idx + swid * stride_width - padding_width; + int height_offset = idy + shid * stride_height - padding_height; + int im_offset = width_offset + height_offset * input_width + + channelid * input_height * input_width; + + int col_offset = idx + idy * filter_width + + channelid * filter_height * filter_width + + (shid * output_width + swid) * + (input_channels * filter_height * filter_width); + + if (height_offset >= input_height || height_offset < 0 || + width_offset >= input_width || width_offset < 0) { + col_data[col_offset] = T(0); + } else { + col_data[col_offset] = im_data[im_offset]; + } + } + } + } +} + +/* + * im = [input_channels, input_height, input_width] + * col = + * [output_height, output_width, input_channels, filter_height, filter_width] + */ +template +class Im2ColFunctor { + public: + void operator()(const framework::Tensor& im, framework::Tensor& col, + int stride_height, int stride_width, int padding_height, + int padding_width, platform::DeviceContext* context) { + PADDLE_ENFORCE(im.dims().size() == 3); + PADDLE_ENFORCE(col.dims().size() == 5); + int input_channels = im.dims()[0]; + int input_height = im.dims()[1]; + int input_width = im.dims()[2]; + int filter_height = col.dims()[3]; + int filter_width = col.dims()[4]; + int output_height = col.dims()[0]; + int output_width = col.dims()[1]; + + int block_dim_x = 0; + int block_dim_y = 0; + if (filter_height <= 4 && filter_width <= 4) { + block_dim_x = 4; + block_dim_y = 4; + } else if (filter_height <= 8 && filter_width <= 8) { + block_dim_x = 8; + block_dim_y = 8; + } else if (filter_height <= 16 && filter_width <= 16) { + block_dim_x = 16; + block_dim_y = 16; + } else { + block_dim_x = 32; + block_dim_y = 32; + } + + int block_dim_z = 1024 / block_dim_x / block_dim_y; + dim3 threads(block_dim_x, block_dim_y, + std::min(block_dim_z, input_channels)); + dim3 grid(output_width, output_height); + im2colOCF<<< + grid, threads, 0, + reinterpret_cast(context)->stream()>>>( + im.data(), col.data(), input_channels, input_height, input_width, + filter_height, filter_width, stride_height, stride_width, + padding_height, padding_width, output_height, output_width); + } +}; + +template +__global__ void col2imOCF(T* im_data, const T* col_data, int input_channels, + int input_height, int input_width, int filter_height, + int filter_width, int stride_height, int stride_width, + int padding_height, int padding_width, + int output_height, int output_width) { + int swid = blockIdx.x; + int shid = blockIdx.y; + for (int channelid = threadIdx.z; channelid < input_channels; + channelid += blockDim.z) { + for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) { + for (int idx = threadIdx.x; idx < filter_width; idx += blockDim.x) { + int width_offset = idx + swid * stride_width - padding_width; + int height_offset = idy + shid * stride_height - padding_height; + int im_offset = width_offset + height_offset * input_width + + channelid * input_height * input_width; + + int col_offset = idx + idy * filter_width + + channelid * filter_height * filter_width + + (shid * output_width + swid) * + (input_channels * filter_height * filter_width); + + if (height_offset >= 0 && height_offset < input_height && + width_offset >= 0 && width_offset < input_width) { + paddle::platform::CudaAtomicAdd(im_data + im_offset, + col_data[col_offset]); + } + } + } + } +} + +/* + * im = [input_channels, input_height, input_width] + * col = + * [output_height, output_width, input_channels, filter_height, filter_width] + */ +template +class Col2ImFunctor { + public: + void operator()(framework::Tensor& im, const framework::Tensor& col, + int stride_height, int stride_width, int padding_height, + int padding_width, platform::DeviceContext* context) { + PADDLE_ENFORCE(im.dims().size() == 3); + PADDLE_ENFORCE(col.dims().size() == 5); + int input_channels = im.dims()[0]; + int input_height = im.dims()[1]; + int input_width = im.dims()[2]; + int filter_height = col.dims()[3]; + int filter_width = col.dims()[4]; + int output_height = col.dims()[0]; + int output_width = col.dims()[1]; + + int block_dim_x = 0; + int block_dim_y = 0; + if (filter_height <= 4 && filter_width <= 4) { + block_dim_x = 4; + block_dim_y = 4; + } else if (filter_height <= 8 && filter_width <= 8) { + block_dim_x = 8; + block_dim_y = 8; + } else if (filter_height <= 16 && filter_width <= 16) { + block_dim_x = 16; + block_dim_y = 16; + } else { + block_dim_x = 32; + block_dim_y = 32; + } + + int block_dim_z = 1024 / block_dim_x / block_dim_y; + dim3 threads(block_dim_x, block_dim_y, + std::min(block_dim_z, input_channels)); + dim3 grid(output_width, output_height); + col2imOCF<<< + grid, threads, 0, + reinterpret_cast(context)->stream()>>>( + im.data(), col.data(), input_channels, input_height, input_width, + filter_height, filter_width, stride_height, stride_width, + padding_height, padding_width, output_height, output_width); + } +}; + +template class Im2ColFunctor; +template class Im2ColFunctor; +template class Col2ImFunctor; +template class Col2ImFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/im2col.h b/paddle/operators/math/im2col.h new file mode 100644 index 0000000000000000000000000000000000000000..8958c5457cc2c3034c34ca82fb2e98cc06be63c5 --- /dev/null +++ b/paddle/operators/math/im2col.h @@ -0,0 +1,90 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/tensor.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { +namespace math { + +/* The storage format of the coldata in the Im2ColFunctor and Col2ImFunctor. */ +enum class ColFormat { kCFO = 0, kOCF = 1 }; + +/* + * \brief Converts the image data of three dimensions(CHW) into a colData of + * five dimensions in the Im2ColFunctor calculation, + * And in the Col2ImFunctor calculation, it is reversed. + * + * \param imData Image data. + * \param imShape The shape of imData, + * [input_channels, input_height, input_width]. + * \param colData Column data. + * \param colShape The shape of colData. + * + * If the template argument Format is kCFO, the shape of colData is: + * [input_channels, filter_height, filter_width, output_height, output_width] + * So, it is easy to reshape into a convolution matrix for convolution + * calculation based on matrix multiplication. + * The shape of convolution matrix is [height, width], where the height is equal + * input_channels * filter_height * filter_width, and the width is equal + * output_height * output_width. + * + * Reshape: + * shape of colData shape of convolution matrix + * [input_channels, + * filter_height, + * filter_width, ======> [height, width] + * output_height, + * output_width] + * + * If the template argument Format is kOCF, the shape of colData is: + * [output_height, output_width, input_channels, filter_height, filter_width] + * So, it is easy to reshape into a sequence matrix for rnn calculation. + * The shape of sequence matrix is [seq_length, step_size], where the seq_length + * is equal output_height * output_width, and the step_size is equal + * input_channels * filter_height * filter_width. + * + * Reshape: + * shape of colData shape of sequence matrix + * [output_height, + * output_width, + * input_channels, ======> [seqLength, stepSize] + * filter_height, + * filter_width] + * + * \note The caller needs to ensure that imShape.inputChannels is equal to + * colShape.inputChannels. + */ +template +class Im2ColFunctor { + public: + void operator()(const framework::Tensor& im, framework::Tensor& col, + int stride_height, int stride_width, int padding_height, + int padding_width, platform::DeviceContext* context); +}; + +template +class Col2ImFunctor { + public: + void operator()(framework::Tensor& im, const framework::Tensor& col, + int stride_height, int stride_width, int padding_height, + int padding_width, platform::DeviceContext* context); +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/im2col_test.cc b/paddle/operators/math/im2col_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..4f380388b108dc173d847f027ba5c9db387a87f8 --- /dev/null +++ b/paddle/operators/math/im2col_test.cc @@ -0,0 +1,122 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/im2col.h" +#include +#include + +template +void testIm2col() { + paddle::framework::Tensor input_tmp; + paddle::framework::Tensor input; + paddle::framework::Tensor output_cfo; + paddle::framework::Tensor output_ocf; + paddle::framework::Tensor output_tmp; + + /** + * input = [0, 1, 2, + * 3, 4, 5] + * + * output_cfo = [0, 1 + * 1, 2 + * 3, 4 + * 4, 5] + * + * output_ocf = [0, 1, 3, 4 + * 1, 2, 4, 5] + */ + int input_height = 2; + int input_width = 3; + int filter_size = 2; + int stride = 1; + int padding = 0; + int output_height = (input_height - filter_size + 2 * padding) / stride + 1; + int output_width = (input_width - filter_size + 2 * padding) / stride + 1; + float* input_ptr = input_tmp.mutable_data( + {1, input_height, input_width}, paddle::platform::CPUPlace()); + float arr[6] = {0, 1, 2, 3, 4, 5}; + memcpy(input_ptr, arr, 6 * sizeof(float)); + + auto* place = new Place(); + if (paddle::platform::is_cpu_place(*place)) { + input = input_tmp; + } else { + input.CopyFrom(input_tmp, *place); + } + output_cfo.mutable_data( + {1, filter_size, filter_size, output_height, output_width}, *place); + output_ocf.mutable_data( + {output_height, output_width, 1, filter_size, filter_size}, *place); + + paddle::operators::math::Im2ColFunctor< + paddle::operators::math::ColFormat::kCFO, Place, float> + im2col; + paddle::operators::math::Im2ColFunctor< + paddle::operators::math::ColFormat::kOCF, Place, float> + im2col_ocf; + + paddle::platform::DeviceContext* context; + if (paddle::platform::is_cpu_place(*place)) { + context = + new paddle::platform::CPUDeviceContext(paddle::platform::CPUPlace()); + } else { +#ifndef PADDLE_ONLY_CPU + context = + new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); +#else + PADDLE_THROW("no GPU support"); +#endif // PADDLE_ONLY_CPU + } + im2col(input, output_cfo, stride, stride, padding, padding, context); + im2col_ocf(input, output_ocf, stride, stride, padding, padding, context); + + float* out_cfo_ptr; + if (paddle::platform::is_cpu_place(*place)) { + out_cfo_ptr = output_cfo.data(); + } else { + output_tmp.CopyFrom(output_cfo, paddle::platform::CPUPlace()); + out_cfo_ptr = output_tmp.data(); + } + EXPECT_EQ(out_cfo_ptr[0], 0); + EXPECT_EQ(out_cfo_ptr[1], 1); + EXPECT_EQ(out_cfo_ptr[2], 1); + EXPECT_EQ(out_cfo_ptr[3], 2); + EXPECT_EQ(out_cfo_ptr[4], 3); + EXPECT_EQ(out_cfo_ptr[5], 4); + EXPECT_EQ(out_cfo_ptr[6], 4); + EXPECT_EQ(out_cfo_ptr[7], 5); + + float* out_ocf_ptr; + if (paddle::platform::is_cpu_place(*place)) { + out_ocf_ptr = output_ocf.data(); + } else { + output_tmp.CopyFrom(output_ocf, paddle::platform::CPUPlace()); + out_ocf_ptr = output_tmp.data(); + } + EXPECT_EQ(out_ocf_ptr[0], 0); + EXPECT_EQ(out_ocf_ptr[1], 1); + EXPECT_EQ(out_ocf_ptr[2], 3); + EXPECT_EQ(out_ocf_ptr[3], 4); + EXPECT_EQ(out_ocf_ptr[4], 1); + EXPECT_EQ(out_ocf_ptr[5], 2); + EXPECT_EQ(out_ocf_ptr[6], 4); + EXPECT_EQ(out_ocf_ptr[7], 5); +} + +TEST(math, im2col) { + testIm2col(); +#ifndef PADDLE_ONLY_CPU + testIm2col(); +#endif +} diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index e66e0abb25f9b933025a6d098ed9dd9eb18a47a5..d3d0e55a674587fb04f43f24d0790de4358f035a 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -54,7 +54,7 @@ class MeanGradOp : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker, ops::MeanGradOp); +REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker, mean_grad, ops::MeanGradOp); REGISTER_OP_CPU_KERNEL(mean, ops::MeanKernel); REGISTER_OP_CPU_KERNEL(mean_grad, diff --git a/paddle/operators/mean_op.h b/paddle/operators/mean_op.h index 9848af280b62729bef9243052ceae0b7d8f4c6f5..ce31e178d8e375dc59be80a6c05133201308da70 100644 --- a/paddle/operators/mean_op.h +++ b/paddle/operators/mean_op.h @@ -49,12 +49,11 @@ class MeanGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto OG = context.Input(framework::GradVarName("Out")); - PADDLE_ENFORCE(framework::product(OG->dims()) == 1, - "Mean Gradient should be scalar"); + PADDLE_ENFORCE(OG->numel() == 1, "Mean Gradient should be scalar"); auto IG = context.Output(framework::GradVarName("X")); IG->mutable_data(context.GetPlace()); - T ig_size = (T)framework::product(IG->dims()); + T ig_size = static_cast(IG->numel()); Eigen::DSizes bcast(ig_size); EigenVector::Flatten(*IG).device(context.GetEigenDevice()) = diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index b4afebcd97a8efff70aaaa85bc2ec5455ddd05c5..a4876feb2edf77bd422fa2a7687b0fa7d55dae47 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -31,8 +31,7 @@ class MinusOp : public framework::OperatorWithKernel { auto *right_tensor = ctx.Input("Y"); PADDLE_ENFORCE_EQ( - framework::product(left_tensor->dims()), - framework::product(right_tensor->dims()), + left_tensor->numel(), right_tensor->numel(), "Minus operator must take two tensor with same num of elements"); ctx.Output("Out")->Resize(left_tensor->dims()); } @@ -79,8 +78,9 @@ class MinusGradOp : public NetOp { } // namespace paddle USE_OP(scale); -USE_OP_ITSELF(identity); +USE_NO_KERNEL_OP(identity); namespace ops = paddle::operators; -REGISTER_OP(minus, ops::MinusOp, ops::MinusOpMaker, ops::MinusGradOp); +REGISTER_OP(minus, ops::MinusOp, ops::MinusOpMaker, minus_grad, + ops::MinusGradOp); REGISTER_OP_CPU_KERNEL(minus, ops::MinusKernel); diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 559d19e6bdc083fffebe1c82a0bebbb18dd134fd..710a56a0e8e2d17162d7d000df226f1537104eb9 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -25,18 +25,27 @@ class MulOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto dim0 = ctx.Input("X")->dims(); - auto dim1 = ctx.Input("Y")->dims(); - PADDLE_ENFORCE_EQ(dim0.size(), 2, - "input X(%s) should be a tensor with 2 dims, a matrix", - ctx.op_.Input("X")); - PADDLE_ENFORCE_EQ(dim1.size(), 2, - "input Y(%s) should be a tensor with 2 dims, a matrix", - ctx.op_.Input("Y")); + auto x_dims = ctx.Input("X")->dims(); + auto y_dims = ctx.Input("Y")->dims(); + int x_num_col_dims = Attr("x_num_col_dims"); + int y_num_col_dims = Attr("y_num_col_dims"); + + PADDLE_ENFORCE(x_dims.size() > x_num_col_dims, + "The rank of input tensor X(%s) should be larger than " + "`mul_op`'s `x_num_col_dims`.", + ctx.op().Input("X")); + PADDLE_ENFORCE(y_dims.size() > y_num_col_dims, + "The rank of input tensor Y(%s) should be larger than " + "`mul_op`'s `y_num_col_dims`.", + ctx.op().Input("Y")); + + auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims); + auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims); + PADDLE_ENFORCE_EQ( - dim0[1], dim1[0], + x_mat_dims[1], y_mat_dims[0], "First matrix's width must be equal with second matrix's height."); - ctx.Output("Out")->Resize({dim0[0], dim1[1]}); + ctx.Output("Out")->Resize({x_mat_dims[0], y_mat_dims[1]}); } }; @@ -47,6 +56,23 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "The first input of mul op"); AddInput("Y", "The second input of mul op"); AddOutput("Out", "The output of mul op"); + AddAttr( + "x_num_col_dims", + R"DOC(mul_op can take tensors with more than two dimensions as input `X`, + in that case, tensors will be reshaped to a matrix. The matrix's first + dimension(column length) will be the product of tensor's last + `num_col_dims` dimensions, and the matrix's second dimension(row length) + will be the product of tensor's first `rank - num_col_dims` dimensions. + )DOC") + .SetDefault(1) + .EqualGreaterThan(1); + AddAttr( + "y_num_col_dims", + R"DOC(mul_op can take tensors with more than two dimensions as input `Y`, + in that case, tensors will be reshaped to a matrix. Just like input `X`. + )DOC") + .SetDefault(1) + .EqualGreaterThan(1); AddComment(R"DOC( Two Element Mul Operator. @@ -70,13 +96,23 @@ class MulOpGrad : public framework::OperatorWithKernel { auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); auto *x_grad = ctx.Output(framework::GradVarName("X")); auto *y_grad = ctx.Output(framework::GradVarName("Y")); - PADDLE_ENFORCE(x_dims[0] == out_dims[0], - "Out@GRAD M X N must equal to X dims 0, M "); - PADDLE_ENFORCE(y_dims[1] == out_dims[1], - "Out@GRAD M X N must equal to Y dims 1, N "); - x_grad->Resize(x_dims); - y_grad->Resize(y_dims); + auto x_mat_dims = + framework::flatten_to_2d(x_dims, Attr("x_num_col_dims")); + auto y_mat_dims = + framework::flatten_to_2d(y_dims, Attr("y_num_col_dims")); + + PADDLE_ENFORCE_EQ( + x_mat_dims[0], out_dims[0], + "The first dimension of Out@GRAD must equal to the first dimension of " + "the first operand."); + PADDLE_ENFORCE_EQ( + y_mat_dims[1], out_dims[1], + "The second dimension of Out@GRAD must equal to the second " + "dimension of the second operand."); + + if (x_grad) x_grad->Resize(x_dims); + if (y_grad) y_grad->Resize(y_dims); } }; @@ -84,7 +120,7 @@ class MulOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, ops::MulOpGrad); +REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulOpGrad); REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); REGISTER_OP_CPU_KERNEL(mul_grad, ops::MulGradKernel); diff --git a/paddle/operators/mul_op.h b/paddle/operators/mul_op.h index 8facc0281449785bf40726f23ca2fd5d166ff272..3c01f868bda8cba488b3403df456d63d6b082fa6 100644 --- a/paddle/operators/mul_op.h +++ b/paddle/operators/mul_op.h @@ -1,7 +1,7 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. + You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 @@ -31,13 +31,25 @@ template class MulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* X = context.Input("X"); - auto* Y = context.Input("Y"); - auto* Z = context.Output("Out"); - Z->mutable_data(context.GetPlace()); + const Tensor* x = context.Input("X"); + const Tensor* y = context.Input("Y"); + Tensor* z = context.Output("Out"); + const Tensor x_matrix = + x->dims().size() > 2 + ? framework::ReshapeToMatrix( + *x, context.template Attr("x_num_col_dims")) + : *x; + const Tensor y_matrix = + y->dims().size() > 2 + ? framework::ReshapeToMatrix( + *y, context.template Attr("y_num_col_dims")) + : *y; + + z->mutable_data(context.GetPlace()); auto* device_context = const_cast(context.device_context_); - math::matmul(*X, false, *Y, false, 1, Z, 0, device_context); + math::matmul(x_matrix, false, y_matrix, false, 1, z, 0, + device_context); } }; @@ -45,20 +57,40 @@ template class MulGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* X = ctx.Input("X"); - auto* Y = ctx.Input("Y"); - auto* dOut = ctx.Input(framework::GradVarName("Out")); + int x_num_col_dims = ctx.template Attr("x_num_col_dims"); + int y_num_col_dims = ctx.template Attr("y_num_col_dims"); + const Tensor* x = ctx.Input("X"); + const Tensor* y = ctx.Input("Y"); + const Tensor x_matrix = + x->dims().size() > 2 ? framework::ReshapeToMatrix(*x, x_num_col_dims) + : *x; + const Tensor y_matrix = + y->dims().size() > 2 ? framework::ReshapeToMatrix(*y, y_num_col_dims) + : *y; + const Tensor* dout = ctx.Input(framework::GradVarName("Out")); - auto* dX = ctx.Output(framework::GradVarName("X")); - auto* dY = ctx.Output(framework::GradVarName("Y")); - dX->mutable_data(ctx.GetPlace()); - dY->mutable_data(ctx.GetPlace()); + Tensor* dx = ctx.Output(framework::GradVarName("X")); + Tensor* dy = ctx.Output(framework::GradVarName("Y")); auto* device_context = const_cast(ctx.device_context_); - // dX = dOut * Y'. dX: M x K, dOut : M x N, Y : K x N - math::matmul(*dOut, false, *Y, true, 1, dX, 0, device_context); - // dY = X' * dOut. dY: K x N, dOut : M x N, X : M x K - math::matmul(*X, true, *dOut, false, 1, dY, 0, device_context); + if (dx) { + dx->mutable_data(ctx.GetPlace()); + Tensor dx_matrix = dx->dims().size() > 2 ? framework::ReshapeToMatrix( + *dx, x_num_col_dims) + : *dx; + // dx = dout * y'. dx: M x K, dout : M x N, y : K x N + math::matmul(*dout, false, y_matrix, true, 1, &dx_matrix, 0, + device_context); + } + if (dy) { + dy->mutable_data(ctx.GetPlace()); + Tensor dy_matrix = dy->dims().size() > 2 ? framework::ReshapeToMatrix( + *dy, y_num_col_dims) + : *dy; + // dy = x' * dout. dy K x N, dout : M x N, x : M x K + math::matmul(x_matrix, true, *dout, false, 1, &dy_matrix, 0, + device_context); + } } }; diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md new file mode 100644 index 0000000000000000000000000000000000000000..a090e0b5450509affdd739f63df618595f204f97 --- /dev/null +++ b/paddle/operators/name_convention.md @@ -0,0 +1,59 @@ +## Operator's Parameter Name Convention + +To make the operator document itself more clear, we recommend operator names obey the listing conventions. + +### OpProtoMaker names + +When defining an operator in Paddle, a corresponding [OpProtoMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L170) (TODO: OpProtoMaker Doc)need to be defined. All the Input/Output and Attributes will write into the [OpProto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L61) , and will be used in client language to create operator. + +- Input/Output. + - Input/Output names follow the **CamelCase**. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. + - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule intends making operators which have few inputs/outputs unified. + +- Attribute. + - Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. + +- Comments. + - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier`. + - Operator comment format of` R"DOC(your comment here)DOC"`. You should explain the input/output of the operator first. If there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. + +- Order. + - Follow the order of Input/Output, then Attribute, then Comments. See the example in best practice. + +### Best Practice + +Here we give some examples to show how these rules will be used. + +- The operator has one input, one output. e.g.`relu`, inputs: `X`, outputs: `Out`. + +- The operator has two input, one output. e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out`. + +- The operator contains attribute. e.g. `cosine`, inputs : `X`, `axis`, outputs : `Out`. + + We give a full example of Accumulator Operator. + +```c++ +class AccumulateOpMaker : public framework::OpProtoAndCheckerMaker { +public: + AccumulateOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor. If the output size is not the same as input size, the output tensor is first reshaped and initialized to zero, and only then, accumulation is done."); + AddOutput("Out", "(Tensor) Accumulated output tensor"); + AddAttr("gamma", "(float, default 1.0) Accumulation multiplier"); + AddComment(R"DOC( +Accumulate operator accumulates the input tensor to the output tensor. If the +output tensor already has the right size, we add to it; otherwise, we first +initialize the output tensor to all zeros, and then do accumulation. Any +further calls to the operator, given that no one else fiddles with the output +in the interim, will do simple accumulations. +Accumulation is done as shown: + +Out = 1*X + gamma*Out + +where X is the input tensor, Y is the output tensor and gamma is the multiplier +argument. +)DOC"); + } +}; +``` diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 16bd249cb3d989c695ec9378f09d48833d70be58..e826703c60ca82e1fe690eb78c3d4f92981ef3a2 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -235,5 +235,5 @@ RecurrentGradientOp::RecurrentGradientOp( } // namespace paddle REGISTER_OP_WITHOUT_GRADIENT( - recurrent_op, paddle::operators::RecurrentOp, + recurrent, paddle::operators::RecurrentOp, paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker); diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..b7061153d2bf13982f14f233e87a87daeeebf5fd --- /dev/null +++ b/paddle/operators/reshape_op.cc @@ -0,0 +1,107 @@ + +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/reshape_op.h" + +namespace paddle { +namespace operators { + +class ReshapeOp : public framework::OperatorWithKernel { + public: + ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + // input check + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) shouldn't be null"); + auto shape = ctx.Attr>("shape"); + PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty."); + for (auto dim : shape) { + PADDLE_ENFORCE(dim > 0, "Each dimension of shape must be positive."); + } + // capacity check + int64_t capacity = + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); + auto *in = ctx.Input("X"); + int64_t in_size = framework::product(in->dims()); + PADDLE_ENFORCE_EQ(capacity, in_size, + "The size of Input(X) mismatches with Attr(shape)."); + // resize output + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + auto out_dims = framework::make_ddim(shape_int64); + ctx.Output("Out")->Resize(out_dims); + } +}; + +class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ReshapeOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input tensor of reshape operator."); + AddOutput("Out", "The output tensor of reshape operator."); + AddAttr>("shape", "Target shape of reshape operator."); + AddComment(R"DOC(Reshape operator + +Reshape Input(X) into the shape specified by Attr(shape). + +An example: +Given a 2-D tensor X with 2 rows and 2 columns + + [[1, 2], [3, 4]] + +with target shape = [1, 4], the reshape operator will transform +the tensor X into a 1-D tensor: + + [1, 2, 3, 4] + +)DOC"); + } +}; + +class ReshapeGradOp : public framework::OperatorWithKernel { + public: + ReshapeGradOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) shouldn't be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); + auto dims = ctx.Input("X")->dims(); + auto *d_in = ctx.Output(framework::GradVarName("X")); + d_in->Resize(dims); + } +}; + +} // namespace operators +} // namespace paddle +namespace ops = paddle::operators; + +REGISTER_OP(reshape, ops::ReshapeOp, ops::ReshapeOpMaker, reshape_grad, + ops::ReshapeGradOp); +REGISTER_OP_CPU_KERNEL(reshape, + ops::ReshapeKernel); +REGISTER_OP_CPU_KERNEL( + reshape_grad, ops::ReshapeGradKernel); diff --git a/paddle/operators/reshape_op.cu b/paddle/operators/reshape_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..23dbe089d3b37aabedf9ef166f7bbfbf67da7e0a --- /dev/null +++ b/paddle/operators/reshape_op.cu @@ -0,0 +1,22 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/reshape_op.h" + +REGISTER_OP_GPU_KERNEL( + reshape, + paddle::operators::ReshapeKernel); +REGISTER_OP_GPU_KERNEL( + reshape_grad, + paddle::operators::ReshapeGradKernel); diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h new file mode 100644 index 0000000000000000000000000000000000000000..873acf30782d390cdca5e7e864c76e1f743f9a7c --- /dev/null +++ b/paddle/operators/reshape_op.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class ReshapeKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* out = ctx.Output("Out"); + auto* in = ctx.Input("X"); + out->mutable_data(ctx.GetPlace()); + + auto shape = ctx.Attr>("shape"); + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + auto out_dims = framework::make_ddim(shape_int64); + out->CopyFrom(*in, ctx.GetPlace()); + out->Resize(out_dims); + } +}; + +template +class ReshapeGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* d_out = ctx.Input(framework::GradVarName("Out")); + auto* d_x = ctx.Output(framework::GradVarName("X")); + d_x->mutable_data(ctx.GetPlace()); + + auto in_dims = d_x->dims(); + d_x->CopyFrom(*d_out, ctx.GetPlace()); + d_x->Resize(in_dims); + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index a9b65c30f25554e54e9fd7103f240946a93566e2..97872c67ac99fbf6c9c177d52f1d4069163e8548 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -61,7 +61,7 @@ void ConcatOutputs(const std::vector& step_scopes, PADDLE_ENFORCE(step_scope_var != nullptr, "%s not in scope", outlinks[i].internal); f::DDim step_dims = step_scope_var->template GetMutable()->dims(); - std::vector dims_vec = vectorize(step_dims); + std::vector dims_vec = vectorize(step_dims); dims_vec.insert(dims_vec.begin(), seq_len); output->Resize(f::make_ddim(dims_vec)); } else { @@ -109,7 +109,7 @@ void InitArgument(const ArgumentName& name, Argument* arg, arg->step_scopes = op.Output(name.step_scopes); auto inlinks = op.Inputs(name.inlinks); - auto inlink_alias = op.GetAttr>(name.inlink_alias); + auto inlink_alias = op.Attr>(name.inlink_alias); PADDLE_ENFORCE(inlinks.size() == inlink_alias.size(), "the size of inlinks and inlink_alias don't match:%d,%d", inlinks.size(), inlink_alias.size()); @@ -121,7 +121,7 @@ void InitArgument(const ArgumentName& name, Argument* arg, } auto outlinks = op.Outputs(name.outlinks); - auto outlink_alias = op.GetAttr>(name.outlink_alias); + auto outlink_alias = op.Attr>(name.outlink_alias); PADDLE_ENFORCE(outlinks.size() == outlink_alias.size(), "the size of outlinks and outlink_alias don't match:%d,%d", outlinks.size(), outlink_alias.size()); @@ -135,8 +135,8 @@ void InitArgument(const ArgumentName& name, Argument* arg, auto boot_memories = op.Inputs(name.boot_memories); // attributes - auto memories = op.GetAttr>(name.memories); - auto pre_memories = op.GetAttr>(name.pre_memories); + auto memories = op.Attr>(name.memories); + auto pre_memories = op.Attr>(name.pre_memories); PADDLE_ENFORCE(memories.size() == boot_memories.size(), "the size of memories, boot_memories don't match:%d,%d", diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index 63de91254f4b75587cb2fb29aeb8ff7358ba8e76..fa8f0ff1a858143af427b51025279c726f1628e0 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -25,14 +25,19 @@ class RowwiseAddOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto dim0 = ctx.Input("X")->dims(); - auto dim1 = ctx.Input("b")->dims(); - - PADDLE_ENFORCE(dim0.size() == 2, "Input 0 must be matrix"); - PADDLE_ENFORCE(dim1.size() == 1, "The second input must be vector"); - PADDLE_ENFORCE(dim0[1] == dim1[0], "The width of two input must be same"); - PADDLE_ENFORCE(ctx.OutputSize("Out") == 1, "The output size must be 1"); - ctx.Output("Out")->Resize(ctx.Input("X")->dims()); + auto x_dims = ctx.Input("X")->dims(); + auto b_dims = ctx.Input("b")->dims(); + PADDLE_ENFORCE_GT( + x_dims.size(), b_dims.size(), + "The rank of input `X` must be larger than the one of input `b`."); + + int num_col_dims = x_dims.size() - b_dims.size(); + + PADDLE_ENFORCE_EQ( + framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, + "The width of two operands must be same"); + PADDLE_ENFORCE_EQ(ctx.OutputSize("Out"), 1, "The output size must be 1"); + ctx.Output("Out")->Resize(x_dims); } }; @@ -61,11 +66,20 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("b"), "b should not be null"); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); - auto dims0 = ctx.Input("X")->dims(); - auto dims1 = ctx.Input("b")->dims(); - PADDLE_ENFORCE_EQ(1, dims1.size(), "b dims should be 1") - ctx.Output(framework::GradVarName("X"))->Resize(dims0); - ctx.Output(framework::GradVarName("b"))->Resize(dims1); + auto x_dims = ctx.Input("X")->dims(); + auto b_dims = ctx.Input("b")->dims(); + PADDLE_ENFORCE_GT( + x_dims.size(), b_dims.size(), + "The rank of input `X` must be larger than the one of input `b`."); + + int num_col_dims = x_dims.size() - b_dims.size(); + PADDLE_ENFORCE_EQ( + framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, + "The width of two operands must be same"); + auto *dx = ctx.Output(framework::GradVarName("X")); + auto *db = ctx.Output(framework::GradVarName("b")); + if (dx) dx->Resize(x_dims); + if (db) db->Resize(b_dims); } }; @@ -74,7 +88,7 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(rowwise_add, ops::RowwiseAddOp, ops::RowwiseAddOpMaker, - ops::RowwiseAddGradOp); + rowwise_add_grad, ops::RowwiseAddGradOp); REGISTER_OP_CPU_KERNEL( rowwise_add, ops::RowwiseAddKernel); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/operators/rowwise_add_op.h b/paddle/operators/rowwise_add_op.h index 1cbd8bb31ad90a32d8a4e3bb59617d0b5384e470..35774b940926f77167b8f19597027e74d3477e5b 100644 --- a/paddle/operators/rowwise_add_op.h +++ b/paddle/operators/rowwise_add_op.h @@ -33,10 +33,12 @@ class RowwiseAddKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto out = context.Output("Out"); out->mutable_data(context.GetPlace()); - - auto input = EigenMatrix::From(*context.Input("X")); - auto bias = EigenVector::From(*context.Input("b")); - auto output = EigenMatrix::From(*out); + int num_col_dims = context.Input("X")->dims().size() - + context.Input("b")->dims().size(); + auto input = + EigenMatrix::Reshape(*context.Input("X"), num_col_dims); + auto bias = EigenVector::Flatten(*context.Input("b")); + auto output = EigenMatrix::Reshape(*out, num_col_dims); const int bias_size = bias.dimension(0); const int rest_size = input.size() / bias_size; @@ -51,20 +53,27 @@ template class RowwiseAddGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* dOut = context.Input(framework::GradVarName("Out")); - auto* dX = context.Output(framework::GradVarName("X")); + auto* dout = context.Input(framework::GradVarName("Out")); + auto* dx = context.Output(framework::GradVarName("X")); auto* db = context.Output(framework::GradVarName("b")); - dX->mutable_data(context.GetPlace()); - db->mutable_data(context.GetPlace()); + int num_col_dims = context.Input("X")->dims().size() - + context.Input("b")->dims().size(); - auto OutGrad = EigenMatrix::From(*dOut); + auto out_grad = EigenMatrix::Reshape(*dout, num_col_dims); auto place = context.GetEigenDevice(); - EigenMatrix::From(*dX).device(place) = OutGrad; - // https://eigen.tuxfamily.org/dox/unsupported/TensorBase_8h_source.html - // colwise add - Eigen::array dims{{0}}; /* dimension to reduce */ - EigenVector::Flatten(*db).device(place) = OutGrad.sum(dims); + if (dx) { + dx->mutable_data(context.GetPlace()); + EigenMatrix::Reshape(*dx, num_col_dims).device(place) = out_grad; + } + + if (db) { + db->mutable_data(context.GetPlace()); + // https://eigen.tuxfamily.org/dox/unsupported/TensorBase_8h_source.html + // colwise add + Eigen::array dims{{0}}; /* dimension to reduce */ + EigenVector::Flatten(*db).device(place) = out_grad.sum(dims); + } } }; } // namespace operators diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index 4e039688d4d74f2a101fc91c747bd1e6ebec7ad2..ea991f683d841b3dc4624a0d8aa3c88367fd3c6d 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -44,11 +44,13 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { The equation is: Out = scale*X )DOC"); - AddAttr("scale", "scale of scale operator.").SetDefault(1.0); + AddAttr("scale", "The scaling factor of the scale operator.") + .SetDefault(1.0); } }; -// Identity Op's gradient is identity op, too. +// The operator to calculate gradients of a scale operator is just the scale +// operator itself. // Grad(Out=scale(X)) => Grad(X) = scale(Grad(Out)) template class ScaleGradOp : public NetOp { @@ -60,46 +62,17 @@ class ScaleGradOp : public NetOp { AppendOp(framework::OpRegistry::CreateOp( "scale", {{"X", {Input(framework::GradVarName("Out"))}}}, {{"Out", {Output(framework::GradVarName("X"))}}}, - {{"scale", GetAttr("scale")}})); + {{"scale", Attr("scale")}})); CompleteAddOp(false); } }; -// identity is a alias of scale op. This is also a example for creating a alias -// operator. -template -class IdentityOpMaker : public framework::OpProtoAndCheckerMaker { - public: - IdentityOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "input tensor of identity op"); - AddOutput("Out", "output tensor of identity op"); - AddComment("identity operator. Just a alias of scale op which scale = 1.0"); - } -}; - -template -class IdentityOp : public NetOp { - public: - IdentityOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : NetOp(type, inputs, outputs, attrs) { - AppendOp(framework::OpRegistry::CreateOp( - "scale", {{"X", {Input("X")}}}, {{"Out", {Output("Out")}}}, - {{"scale", static_cast(1)}})); - } -}; - } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(scale, ops::ScaleOp, ops::ScaleOpMaker, +REGISTER_OP(scale, ops::ScaleOp, ops::ScaleOpMaker, scale_grad, ops::ScaleGradOp); REGISTER_OP_CPU_KERNEL(scale, ops::ScaleKernel); -REGISTER_OP_WITHOUT_GRADIENT(identity, ops::IdentityOp, - ops::IdentityOpMaker); diff --git a/paddle/operators/scale_op.h b/paddle/operators/scale_op.h index aea64f1b0428ffe79ba8d90cf79dbfd2b5ef36f4..02fbdc52bbf89c9f2acc5eeaa1197e4ccbca9d31 100644 --- a/paddle/operators/scale_op.h +++ b/paddle/operators/scale_op.h @@ -27,7 +27,7 @@ class ScaleKernel : public framework::OpKernel { auto* in = context.Input("X"); tensor->mutable_data(in->place()); - auto scale = static_cast(context.op_.GetAttr("scale")); + auto scale = static_cast(context.Attr("scale")); auto eigen_out = framework::EigenVector::Flatten(*tensor); auto eigen_in = framework::EigenVector::Flatten(*in); diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index 35c185ad80f93d1005c1616dcffd2e61bcd54222..f901edefa22dc9a252e87116df756d04767a7162 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -77,7 +77,8 @@ Out[Index] = Ref[Index] + Updates } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(scatter, ops::ScatterOp, ops::ScatterOpMaker, ops::ScatterGradOp); +REGISTER_OP(scatter, ops::ScatterOp, ops::ScatterOpMaker, scatter_grad, + ops::ScatterGradOp); REGISTER_OP_CPU_KERNEL(scatter, ops::ScatterOpKernel); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index a0b5000ffbf54364e15f87870913926a071fa972..f8888f9c362e1c39af42236bb3a23be37aa3ae15 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -31,7 +31,7 @@ class SGDOpKernel : public framework::OpKernel { auto param = ctx.Input("param"); auto grad = ctx.Input("grad"); auto param_out = ctx.Output("param_out"); - float lr = ctx.op_.GetAttr("learning_rate"); + float lr = ctx.Attr("learning_rate"); param_out->mutable_data(ctx.GetPlace()); diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index f35b7023845bac52887d81a8f5c496cb5e7193aa..761c6de8d4d2150b30b97b58da95da3d5f33db63 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -53,7 +53,8 @@ class SigmoidOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker, ops::SigmoidOpGrad); +REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker, sigmoid_grad, + ops::SigmoidOpGrad); REGISTER_OP_CPU_KERNEL(sigmoid, ops::SigmoidKernel); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 471bb288fb20f113aefb2a9e13eb805b161b0631..7166b2f60be8a6088ab3a81686f7bed1b7181d97 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -24,7 +24,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE(ctx.Input("X")->dims().size() == 2UL, - "The input of softmax op must be matrix"); + "The input of softmax op must be a matrix."); ctx.Output("Y")->Resize(ctx.Input("X")->dims()); } }; @@ -34,9 +34,27 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { SoftmaxOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "input of softmax"); - AddOutput("Y", "output of softmax"); - AddComment("Softmax Op"); + AddInput("X", + "The input tensor of softmax. " + "2-D with shape [batch_size, input_feature_dimensions]."); + AddOutput("Y", "The normalized values with the same shape as X."); + AddComment(R"DOC( +The input of softmax operator is a 2-D tensor with shape N x K (N is the +batch_size, K is the dimension of input feature). The output tensor has the +same shape as the input tensor. + +For each row of the input tensor, the softmax operator squashes the +K-dimensional vector of arbitrary real values to a K-dimensional vector of real +values in the range [0, 1] that add up to 1. Specifically, it computes the +exponential of the given dimension and the sum of exponential values of all +the other dimensions in the K-dimensional vector input. Then the ratio of the +exponential of the given dimension and the sum of exponential values of all +the other dimensions is the output of the softmax operator. + +For each row `i` and each column `j` in input X, we have: + Y[i, j] = exp(X[i, j]) / sum_j(exp(X[i, j])) + +)DOC"); } }; @@ -46,14 +64,15 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE(ctx.InputVar("Y") != nullptr, "Input(Y) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should be not null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Y")), - "Input(Y@GRAD) should not be null"); - PADDLE_ENFORCE(ctx.Input("Y")->dims() == - ctx.Input(framework::GradVarName("Y"))->dims(), - "the shape of Input(0) and Input(1) should be the same"); + "Input(Y@GRAD) should be not null."); + PADDLE_ENFORCE_EQ(ctx.Input("Y")->dims(), + ctx.Input(framework::GradVarName("Y"))->dims(), + "Input(Y) and its gradients should have a same shape."); + ctx.Output(framework::GradVarName("X")) - ->Resize(ctx.Input("Y")->dims()); + ->Resize(ctx.Input("X")->dims()); } }; @@ -62,7 +81,8 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; -REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, ops::SoftmaxOpGrad); +REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, softmax_grad, + ops::SoftmaxOpGrad); REGISTER_OP_CPU_KERNEL(softmax, ops::SoftmaxKernel); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/operators/softmax_op.h b/paddle/operators/softmax_op.h index 4fa6b59540498638c3b7df639ae10a66c0fa1c16..8a3a5ab927c0e2937936fcc973f000d4d95c3dbc 100644 --- a/paddle/operators/softmax_op.h +++ b/paddle/operators/softmax_op.h @@ -28,12 +28,12 @@ template class SoftmaxKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto input = context.Input("X"); - auto output = context.Output("Y"); - output->mutable_data(context.GetPlace()); + auto X = context.Input("X"); + auto Y = context.Output("Y"); + Y->mutable_data(context.GetPlace()); - auto logits = EigenMatrix::From(*input); - auto softmax = EigenMatrix::From(*output); + auto logits = EigenMatrix::From(*X); + auto softmax = EigenMatrix::From(*Y); const int kBatchDim = 0; const int kClassDim = 1; diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..9f51d3efa8ecba894a1023b9de2df451ca85916c --- /dev/null +++ b/paddle/operators/squared_l2_distance_op.cc @@ -0,0 +1,116 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/squared_l2_distance_op.h" + +namespace paddle { +namespace operators { + +class SquaredL2DistanceOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext& ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input of SquaredL2DistanceOp " + "must be initialized."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), + "Target of SquaredL2DistanceOp " + "must be initialized."); + + auto* x = ctx.Input("X"); + auto x_dims = x->dims(); + auto* y = ctx.Input("Y"); + auto y_dims = y->dims(); + + PADDLE_ENFORCE_EQ(framework::arity(x_dims), framework::arity(y_dims), + "Tensor rank of both SquaredL2DistanceOp's " + "inputs must be same."); + + int rank = framework::arity(x_dims); + PADDLE_ENFORCE_GE(rank, 2, "Tensor rank should be at least equal to 2."); + PADDLE_ENFORCE_EQ(x->numel() / x_dims[0], y->numel() / y_dims[0], + "Product of dimensions expcet the first dimension of " + "input and target must be equal."); + PADDLE_ENFORCE(y_dims[0] == 1 || y_dims[0] == x_dims[0], + "First dimension of target must be equal to input " + "or to 1."); + + ctx.Output("sub_result") + ->Resize({x_dims[0], x->numel() / x_dims[0]}); + ctx.Output("Out")->Resize({x_dims[0], 1}); + } +}; + +class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SquaredL2DistanceOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of SquaredL2DistanceOp."); + AddInput("Y", "Target of SquaredL2DistanceOp."); + AddOutput("sub_result", + "Buffering substraction result which " + "will be reused in backward.") + .AsIntermediate(); + AddOutput("Out", "Squared l2 distance between input and target."); + AddComment(R"DOC( + SquaredL2DistanceOp will cacluate the squared L2 distance for + input and target. Number of distance value equals to the + first dimension of input. First dimension of target could be equal to + input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp + will broadcast target's first dimension to input's first dimension. + You can decide whether calculate the gradient of input and target. + )DOC"); + } +}; + +class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext& ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Gradient of Out should not be null"); + auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); + auto x_dims = ctx.Input("X")->dims(); + auto y_dims = ctx.Input("Y")->dims(); + PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0], + "First dimension of output gradient and " + "input value must be equal."); + PADDLE_ENFORCE_EQ(out_dims[1], 1, + "Second dimension of output gradient " + "must be 1."); + auto* x_grad = ctx.Output(framework::GradVarName("X")); + auto* y_grad = ctx.Output(framework::GradVarName("Y")); + if (x_grad) x_grad->Resize(x_dims); + if (y_grad) y_grad->Resize(y_dims); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(squared_l2_distance, ops::SquaredL2DistanceOp, + ops::SquaredL2DistanceOpMaker, squared_l2_distance_grad, + ops::SquaredL2DistanceGradOp); +REGISTER_OP_CPU_KERNEL( + squared_l2_distance, + ops::SquaredL2DistanceKernel); +REGISTER_OP_CPU_KERNEL( + squared_l2_distance_grad, + ops::SquaredL2DistanceGradKernel); diff --git a/paddle/operators/squared_l2_distance_op.cu b/paddle/operators/squared_l2_distance_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..3fe62f1a9cb56722ea544b0fed052ac384e799aa --- /dev/null +++ b/paddle/operators/squared_l2_distance_op.cu @@ -0,0 +1,25 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU + +#include "paddle/operators/squared_l2_distance_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + squared_l2_distance, + ops::SquaredL2DistanceKernel); +REGISTER_OP_GPU_KERNEL( + squared_l2_distance_grad, + ops::SquaredL2DistanceGradKernel); diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h new file mode 100644 index 0000000000000000000000000000000000000000..097ac04fc09a10b3b624f491a847e281e41a802c --- /dev/null +++ b/paddle/operators/squared_l2_distance_op.h @@ -0,0 +1,123 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; +template +using EigenMatrix = framework::EigenMatrix; + +template +class SquaredL2DistanceKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* in0 = context.Input("X"); + auto* in1 = context.Input("Y"); + auto* out0 = context.Output("sub_result"); + auto* out1 = context.Output("Out"); + + auto in0_dims = in0->dims(); + auto in1_dims = in1->dims(); + + int cols = in0->numel() / in0_dims[0]; + // reduce dimensions except the first + auto x = + EigenMatrix::From(*in0, framework::make_ddim({in0_dims[0], cols})); + auto y = + EigenMatrix::From(*in1, framework::make_ddim({in1_dims[0], cols})); + + out0->mutable_data(context.GetPlace()); + out1->mutable_data(context.GetPlace()); + auto sub_result = EigenMatrix::From(*out0); + auto z = EigenVector::Flatten(*out1); + + auto place = context.GetEigenDevice(); + auto x_dims = x.dimensions(); + auto y_dims = y.dimensions(); + // buffer the substraction result + if (y_dims[0] == 1 && x_dims[0] > y_dims[0]) { + sub_result.device(place) = + x - + y.broadcast(Eigen::array({{static_cast(x_dims[0]), 1}})); + } else { + sub_result.device(place) = x - y; + } + auto sub_res_pow2 = sub_result * sub_result; + z.device(place) = sub_res_pow2.sum(Eigen::array({{1}})); + } +}; + +template +class SquaredL2DistanceGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* in0 = context.Input("sub_result"); + auto* in1 = context.Input(framework::GradVarName("Out")); + auto* x_g = context.Output(framework::GradVarName("X")); + auto* y_g = context.Output(framework::GradVarName("Y")); + + auto sub_result = EigenMatrix::From(*in0); + auto out_grad = EigenMatrix::From(*in1); + + auto x_dims = x_g->dims(); + auto y_dims = y_g->dims(); + + int cols = x_g->numel() / x_dims[0]; + // calculate gradient + auto grad_mat = 2 * + (out_grad.broadcast(Eigen::array({{1, cols}}))) * + sub_result; + + // propagate back to input + auto eigen_place = context.GetEigenDevice(); + if (x_g) { + x_g->mutable_data(context.GetPlace()); + // eigen matrix + auto x_grad = + EigenMatrix::From(*x_g, framework::make_ddim({x_dims[0], cols})); + // dimensions are same with subResult + x_grad.device(eigen_place) = grad_mat; + } + + if (y_g) { + y_g->mutable_data(context.GetPlace()); + + PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0], + "First dimension of gradient must be greater or " + "equal than first dimension of target."); + + if (sub_result.dimensions()[0] == y_dims[0]) { + auto y_grad = + EigenMatrix::From(*y_g, framework::make_ddim({y_dims[0], cols})); + y_grad.device(eigen_place) = -1 * grad_mat; + } else { + auto col_sum_res = -1 * (grad_mat.sum(Eigen::array({{0}}))); + auto y_grad = EigenVector::Flatten(*y_g); + y_grad.device(eigen_place) = col_sum_res; + } + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..5805826ee8a555ca6dfc1ca81feaadffea9e1012 --- /dev/null +++ b/paddle/operators/sum_op.cc @@ -0,0 +1,73 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/sum_op.h" +#include + +namespace paddle { +namespace operators { +using framework::Tensor; + +class SumOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto ins = ctx.MultiInput("X"); + auto *out = ctx.Output("Out"); + int N = ins.size(); + + auto in_dim = ins[0]->dims(); + + PADDLE_ENFORCE_GT(N, 1, "Input tensors count should > 1."); + for (int i = 1; i < N; i++) { + auto dim = ins[i]->dims(); + PADDLE_ENFORCE(in_dim == dim, "Input tensors must have same shape"); + } + out->Resize(in_dim); + } +}; + +class SumOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SumOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "the input tensors of sum operator.").AsDuplicable(); + AddOutput("Out", "the output tensor of sum operator."); + AddComment(R"DOC( + Sum the input tensors. + )DOC"); + } +}; + +class SumGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto outputs = ctx.MultiOutput(framework::GradVarName("X")); + auto dims = ctx.Input(framework::GradVarName("Out"))->dims(); + for (auto output : outputs) { + output->Resize(dims); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(sum, ops::SumOp, ops::SumOpMaker, sum_grad, ops::SumGradOp); +REGISTER_OP_CPU_KERNEL(sum, ops::SumKernel); +REGISTER_OP_CPU_KERNEL(sum_grad, + ops::SumGradKernel); diff --git a/paddle/operators/sum_op.cu b/paddle/operators/sum_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..a465cf3659ba7c51338abadfc62962fb6755a39d --- /dev/null +++ b/paddle/operators/sum_op.cu @@ -0,0 +1,18 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/sum_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(sum, ops::SumKernel); +REGISTER_OP_GPU_KERNEL(sum_grad, + ops::SumGradKernel); diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h new file mode 100644 index 0000000000000000000000000000000000000000..0b1e9ebaa38d455fb5e3ce8c1a39cbbcdad9a940 --- /dev/null +++ b/paddle/operators/sum_op.h @@ -0,0 +1,65 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; + +template +class SumKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto ins = context.MultiInput("X"); + auto* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + + auto place = context.GetEigenDevice(); + auto result = EigenVector::Flatten(*out); + + int N = ins.size(); + auto in = EigenVector::Flatten(*(ins[0])); + result.device(place) = in; + for (int i = 1; i < N; i++) { + auto in = EigenVector::Flatten(*(ins[i])); + result.device(place) = result + in; + } + } +}; + +template +class SumGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* input = context.Input(framework::GradVarName("Out")); + auto outs = context.MultiOutput(framework::GradVarName("X")); + for (auto out : outs) { + out->mutable_data(context.GetPlace()); + } + + auto place = context.GetEigenDevice(); + auto in = EigenVector::Flatten(*input); + for (auto out : outs) { + auto result = EigenVector::Flatten(*out); + result.device(place) = in; + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/top_k_op.cc b/paddle/operators/top_k_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..38d2f0a09aec751734864947a2f3cfa20107e22f --- /dev/null +++ b/paddle/operators/top_k_op.cc @@ -0,0 +1,67 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/top_k_op.h" + +namespace paddle { +namespace operators { + +class TopkOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input of TopkOP must be initialized."); + auto *input = ctx.Input("X"); + const int k = static_cast(ctx.Attr("k")); + + PADDLE_ENFORCE_GE(k, 1, "k must >= 1"); + PADDLE_ENFORCE_GE(input->dims().size(), 1, "input must have >= 1d shape"); + PADDLE_ENFORCE_GE(input->dims()[input->dims().size() - 1], k, + "input must have >= k columns"); + + framework::DDim dims = input->dims(); + dims[dims.size() - 1] = k; + ctx.Output("Out")->Resize(dims); + ctx.Output("Indices")->Resize(dims); + } +}; + +class TopkOpMaker : public framework::OpProtoAndCheckerMaker { + public: + TopkOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input of Topk op"); + AddOutput("Out", "The output tensor of Topk op"); + AddOutput("Indices", "The indices of Topk elements of input"); + AddComment( + R"DOC(If the input is a vector (1d tensor), finds the k largest entries in the vector and outputs their values and indices as vectors. Thus values[j] is the j-th largest entry in input, and its index is indices[j]. + + For matrices, computes the top k entries in each row. )DOC"); + AddAttr("k", + "Number of top elements to look for along the last " + "dimension (along each row for matrices).") + .SetDefault(1); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(top_k, ops::TopkOp, ops::TopkOpMaker); +REGISTER_OP_CPU_KERNEL(top_k, + ops::TopkKernel); diff --git a/paddle/operators/top_k_op.cu b/paddle/operators/top_k_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..afe4d149c53819c45e20353bc9d16393f3f61e0f --- /dev/null +++ b/paddle/operators/top_k_op.cu @@ -0,0 +1,318 @@ +/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/op_registry.h" +#include "paddle/platform/assert.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +struct Pair { + __device__ __forceinline__ Pair() {} + __device__ __forceinline__ Pair(T value, int id) : v(value), id(id) {} + + __device__ __forceinline__ void set(T value, int id) { + v = value; + id = id; + } + + __device__ __forceinline__ void operator=(const Pair& in) { + v = in.v; + id = in.id; + } + + __device__ __forceinline__ bool operator<(const T value) const { + return (v < value); + } + + __device__ __forceinline__ bool operator<(const Pair& in) const { + return (v < in.v) || ((v == in.v) && (id > in.id)); + } + + __device__ __forceinline__ bool operator>(const Pair& in) const { + return (v > in.v) || ((v == in.v) && (id < in.id)); + } + + T v; + int id; +}; + +template +__device__ __forceinline__ void AddTo(Pair topk[], const Pair& p, + int beam_size) { + for (int k = beam_size - 2; k >= 0; k--) { + if (topk[k] < p) { + topk[k + 1] = topk[k]; + } else { + topk[k + 1] = p; + return; + } + } + topk[0] = p; +} + +template +__device__ __forceinline__ void AddTo(Pair topk[], const Pair& p) { + for (int k = beam_size - 2; k >= 0; k--) { + if (topk[k] < p) { + topk[k + 1] = topk[k]; + } else { + topk[k + 1] = p; + return; + } + } + topk[0] = p; +} + +template +__device__ __forceinline__ void GetTopK(Pair topk[], const T* src, int idx, + int dim, int beam_size) { + while (idx < dim) { + if (topk[beam_size - 1] < src[idx]) { + Pair tmp(src[idx], idx); + AddTo(topk, tmp, beam_size); + } + idx += BlockSize; + } +} + +template +__device__ __forceinline__ void GetTopK(Pair topk[], const T* src, int idx, + int dim, const Pair& max, + int beam_size) { + while (idx < dim) { + if (topk[beam_size - 1] < src[idx]) { + Pair tmp(src[idx], idx); + if (tmp < max) { + AddTo(topk, tmp, beam_size); + } + } + idx += BlockSize; + } +} + +template +__device__ __forceinline__ void GetTopK(Pair topk[], const T* val, int* col, + int idx, int dim, int beam_size) { + while (idx < dim) { + if (topk[beam_size - 1] < val[idx]) { + Pair tmp(val[idx], col[idx]); + AddTo(topk, tmp, beam_size); + } + idx += BlockSize; + } +} + +template +__device__ __forceinline__ void GetTopK(Pair topk[], const T* val, int* col, + int idx, int dim, const Pair& max, + int beam_size) { + while (idx < dim) { + if (topk[beam_size - 1] < val[idx]) { + Pair tmp(val[idx], col[idx]); + if (tmp < max) { + AddTo(topk, tmp, beam_size); + } + } + idx += BlockSize; + } +} + +template +__device__ __forceinline__ void ThreadGetTopK(Pair topk[], int& beam, + int beam_size, const T* src, + bool& firstStep, bool& is_empty, + Pair& max, int dim, + const int tid) { + if (beam > 0) { + int length = beam < beam_size ? beam : beam_size; + if (firstStep) { + firstStep = false; + GetTopK(topk, src, tid, dim, length); + } else { + for (int k = 0; k < MaxLength; k++) { + if (k < MaxLength - beam) { + topk[k] = topk[k + beam]; + } else { + topk[k].set(-INFINITY, -1); + } + } + if (!is_empty) { + GetTopK(topk + MaxLength - beam, src, tid, dim, max, + length); + } + } + + max = topk[MaxLength - 1]; + if (max.v == -1) is_empty = true; + beam = 0; + } +} + +template +__device__ __forceinline__ void ThreadGetTopK(Pair topk[], int& beam, + int beam_size, const T* val, + int* col, bool& firstStep, + bool& is_empty, Pair& max, + int dim, const int tid) { + if (beam > 0) { + int length = beam < beam_size ? beam : beam_size; + if (firstStep) { + firstStep = false; + GetTopK(topk, val, col, tid, dim, length); + } else { + for (int k = 0; k < MaxLength; k++) { + if (k < MaxLength - beam) { + topk[k] = topk[k + beam]; + } else { + topk[k].set(-INFINITY, -1); + } + } + if (!is_empty) { + GetTopK(topk + MaxLength - beam, val, col, tid, dim, max, + length); + } + } + + max = topk[MaxLength - 1]; + if (max.v == -1) is_empty = true; + beam = 0; + } +} + +template +__device__ __forceinline__ void BlockReduce(Pair* sh_topk, int* maxid, + Pair topk[], T** topVal, + int** topIds, int& beam, int& k, + const int tid, const int warp) { + while (true) { + __syncthreads(); + if (tid < BlockSize / 2) { + if (sh_topk[tid] < sh_topk[tid + BlockSize / 2]) { + maxid[tid] = tid + BlockSize / 2; + } else { + maxid[tid] = tid; + } + } + __syncthreads(); + for (int stride = BlockSize / 4; stride > 0; stride = stride / 2) { + if (tid < stride) { + if (sh_topk[maxid[tid]] < sh_topk[maxid[tid + stride]]) { + maxid[tid] = maxid[tid + stride]; + } + } + __syncthreads(); + } + __syncthreads(); + + if (tid == 0) { + **topVal = sh_topk[maxid[0]].v; + **topIds = sh_topk[maxid[0]].id; + (*topVal)++; + (*topIds)++; + } + if (tid == maxid[0]) beam++; + if (--k == 0) break; + __syncthreads(); + + if (tid == maxid[0]) { + if (beam < MaxLength) { + sh_topk[tid] = topk[beam]; + } + } + if (maxid[0] / 32 == warp) { + if (__shfl(beam, (maxid[0]) % 32, 32) == MaxLength) break; + } + } +} + +/** + * Each block compute one sample. + * In a block: + * 1. every thread get top MaxLength value; + * 2. merge to sh_topk, block reduce and get max value; + * 3. go to the second setp, until one thread's topk value is null; + * 4. go to the first setp, until get the topk value. + */ +template +__global__ void KeMatrixTopK(T* output, int output_stride, int* indices, + const T* src, int lds, int dim, int k) { + __shared__ Pair sh_topk[BlockSize]; + __shared__ int maxid[BlockSize / 2]; + const int tid = threadIdx.x; + const int warp = threadIdx.x / 32; + output += blockIdx.x * output_stride; + indices += blockIdx.x * k; + + Pair topk[MaxLength]; + int beam = MaxLength; + Pair max; + bool is_empty = false; + bool firststep = true; + + for (int k = 0; k < MaxLength; k++) { + topk[k].set(-INFINITY, -1); + } + while (k) { + ThreadGetTopK(topk, beam, k, + src + blockIdx.x * lds, firststep, + is_empty, max, dim, tid); + + sh_topk[tid] = topk[0]; + BlockReduce(sh_topk, maxid, topk, &output, + &indices, beam, k, tid, warp); + } +} + +template +class TopkOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use GPUPlace."); + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + auto* indices = ctx.Output("Indices"); + size_t k = static_cast(ctx.Attr("k")); + + const T* input_data = input->data(); + + T* output_data = output->mutable_data(ctx.GetPlace()); + // FIXME(typhoonzero): data is always converted to type T? + int* indices_data = indices->mutable_data(ctx.GetPlace()); + + size_t input_height = input->dims()[0]; + size_t input_width = input->dims()[1]; + if (k > input_width) k = input_width; + + // NOTE: pass lds and dim same to input width. + // NOTE: old matrix implementation of stride is different to eigen. + // TODO(typhoonzero): launch kernel on specified stream. + // TODO(typhoonzero): refine this kernel. + dim3 threads(256, 1); + dim3 grid(input_height, 1); + + KeMatrixTopK<<>>( + output_data, output->dims()[1], indices_data, input_data, input_width, + input_width, int(k)); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_GPU_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel); diff --git a/paddle/operators/top_k_op.h b/paddle/operators/top_k_op.h new file mode 100644 index 0000000000000000000000000000000000000000..ef66acc1d569282a42be64b7a5e90f3fbdb20690 --- /dev/null +++ b/paddle/operators/top_k_op.h @@ -0,0 +1,76 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +using EigenMatrix = framework::EigenMatrix; + +template +class TopkKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + // Get the top k elements of each row of input tensor + // FIXME: only deal with matrix(2d tensor). + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + auto* indices = ctx.Output("Indices"); + // k is determined by Attr + const size_t k = static_cast(ctx.Attr("k")); + + T* output_data = output->mutable_data(ctx.GetPlace()); + T* indices_data = indices->mutable_data(ctx.GetPlace()); + + auto eg_input = EigenMatrix::From(*input); + + // reshape input to a flattern matrix(like flat_inner_dims) + framework::DDim inputdims = input->dims(); + const size_t row = framework::product( + framework::slice_ddim(inputdims, 0, inputdims.size() - 1)); + const size_t col = inputdims[inputdims.size() - 1]; + Eigen::DSizes flat2dims(row, col); + // NOTE: eigen shape doesn't affect paddle tensor. + eg_input.reshape(flat2dims); + + for (size_t i = 0; i < row; i++) { + std::vector> vec; + for (size_t j = 0; j < col; j++) { + vec.push_back(std::pair(eg_input(i, j), j)); + } + + std::partial_sort( + vec.begin(), vec.begin() + k, vec.end(), + [](const std::pair& l, const std::pair& r) { + return l.first > r.first; + }); + for (size_t j = 0; j < k; j++) { + output_data[i * k + j] = vec[j].first; + indices_data[i * k + j] = vec[j].second; + } + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 29491137e6d8b4bfa2d0d07d48ffed1212a6131f..b8fbc9b52aecdb5c8d985b5de9bcd7cb85835b60 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -26,18 +26,17 @@ class CPUUniformRandomKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); - unsigned int seed = - static_cast(context.op_.GetAttr("seed")); + unsigned int seed = static_cast(context.Attr("seed")); std::minstd_rand engine; if (seed == 0) { seed = std::random_device()(); } engine.seed(seed); std::uniform_real_distribution dist( - static_cast(context.op_.GetAttr("min")), - static_cast(context.op_.GetAttr("max"))); - ssize_t size = framework::product(tensor->dims()); - for (ssize_t i = 0; i < size; ++i) { + static_cast(context.Attr("min")), + static_cast(context.Attr("max"))); + int64_t size = tensor->numel(); + for (int64_t i = 0; i < size; ++i) { data[i] = dist(engine); } } @@ -49,11 +48,16 @@ class UniformRandomOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& ctx) const override { - PADDLE_ENFORCE(GetAttr("min") < GetAttr("max"), + PADDLE_ENFORCE(Attr("min") < Attr("max"), "uniform_random's min must less then max"); auto* tensor = ctx.Output("Out"); - auto dims = GetAttr>("dims"); - tensor->Resize(framework::make_ddim(dims)); + auto dims = Attr>("dims"); + std::vector temp; + temp.reserve(dims.size()); + for (auto dim : dims) { + temp.push_back(static_cast(dim)); + } + tensor->Resize(framework::make_ddim(temp)); } }; diff --git a/paddle/operators/uniform_random_op.cu b/paddle/operators/uniform_random_op.cu index 1d6709934cbbcf50265eabef87c857654f783ed8..6614b53b3f990d10c82633f3c1f079acea0cd827 100644 --- a/paddle/operators/uniform_random_op.cu +++ b/paddle/operators/uniform_random_op.cu @@ -45,17 +45,16 @@ class GPUUniformRandomKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* tensor = context.Output("Out"); T* data = tensor->mutable_data(context.GetPlace()); - unsigned int seed = - static_cast(context.op_.GetAttr("seed")); + unsigned int seed = static_cast(context.Attr("seed")); if (seed == 0) { std::random_device rd; seed = rd(); } - T min = static_cast(context.op_.GetAttr("min")); - T max = static_cast(context.op_.GetAttr("max")); + T min = static_cast(context.Attr("min")); + T max = static_cast(context.Attr("max")); thrust::counting_iterator index_sequence_begin(0); - ssize_t N = framework::product(tensor->dims()); - thrust::transform(index_sequence_begin, index_sequence_begin + N, + int64_t size = tensor->numel(); + thrust::transform(index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data), UniformGenerator(min, max, seed)); } diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index 120eb1e4af9cef43e76e27d4ad66acfbbd597a36..17bdac8749e31565b119b2cb84aed199fac0f441 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -22,3 +22,5 @@ ENDIF() cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS}) nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info) + +nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) diff --git a/paddle/platform/cudnn_helper.h b/paddle/platform/cudnn_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..2841d2a2dbec5c17ef098a06c976ca01247820f5 --- /dev/null +++ b/paddle/platform/cudnn_helper.h @@ -0,0 +1,201 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/platform/dynload/cudnn.h" +#include "paddle/platform/enforce.h" +#include "paddle/platform/macros.h" + +namespace paddle { +namespace platform { + +enum class DataLayout { + kNHWC, + kNCHW, + kNCHW_VECT_C, +}; + +enum class PoolingMode { + kMaximum, + kAverage, +}; + +template +class CudnnDataType; + +template <> +class CudnnDataType { + public: + static const cudnnDataType_t type = CUDNN_DATA_FLOAT; +}; + +template <> +class CudnnDataType { + public: + static const cudnnDataType_t type = CUDNN_DATA_DOUBLE; +}; + +inline cudnnTensorFormat_t GetCudnnTensorFormat(const DataLayout& order) { + switch (order) { + case DataLayout::kNHWC: + return CUDNN_TENSOR_NHWC; + case DataLayout::kNCHW: + return CUDNN_TENSOR_NCHW; + default: + PADDLE_THROW("Unknown cudnn equivalent for order"); + } + return CUDNN_TENSOR_NCHW; +} + +class ScopedTensorDescriptor { + public: + ScopedTensorDescriptor() { + PADDLE_ENFORCE(dynload::cudnnCreateTensorDescriptor(&desc_)); + } + ~ScopedTensorDescriptor() { + PADDLE_ENFORCE(dynload::cudnnDestroyTensorDescriptor(desc_)); + } + + inline cudnnTensorDescriptor_t descriptor(const cudnnTensorFormat_t format, + const cudnnDataType_t type, + const std::vector& dims) { + // the format is not used now, but it maybe useful feature + std::vector strides(dims.size()); + strides[dims.size() - 1] = 1; + for (int i = dims.size() - 2; i >= 0; i--) { + strides[i] = dims[i + 1] * strides[i + 1]; + } + PADDLE_ENFORCE(dynload::cudnnSetTensorNdDescriptor( + desc_, type, dims.size(), dims.data(), strides.data())); + return desc_; + } + + template + inline cudnnTensorDescriptor_t descriptor(const DataLayout& order, + const std::vector& dims) { + return descriptor(GetCudnnTensorFormat(order), CudnnDataType::type, + dims); + } + + private: + cudnnTensorDescriptor_t desc_; + DISABLE_COPY_AND_ASSIGN(ScopedTensorDescriptor); +}; + +class ScopedFilterDescriptor { + public: + ScopedFilterDescriptor() { + PADDLE_ENFORCE(dynload::cudnnCreateFilterDescriptor(&desc_)); + } + ~ScopedFilterDescriptor() { + PADDLE_ENFORCE(dynload::cudnnDestroyFilterDescriptor(desc_)); + } + + inline cudnnFilterDescriptor_t descriptor(const cudnnTensorFormat_t format, + const cudnnDataType_t type, + const std::vector& kernel) { + // filter layout: output input spatial_dim_y spatial_dim_x + PADDLE_ENFORCE(dynload::cudnnSetFilterNdDescriptor( + desc_, type, format, kernel.size(), kernel.data())); + return desc_; + } + + template + inline cudnnFilterDescriptor_t descriptor(const DataLayout& order, + const std::vector& kernel) { + return descriptor(GetCudnnTensorFormat(order), CudnnDataType::type, + kernel); + } + + private: + cudnnFilterDescriptor_t desc_; + DISABLE_COPY_AND_ASSIGN(ScopedFilterDescriptor); +}; + +class ScopedConvolutionDescriptor { + public: + ScopedConvolutionDescriptor() { + PADDLE_ENFORCE(dynload::cudnnCreateConvolutionDescriptor(&desc_)); + } + ~ScopedConvolutionDescriptor() { + PADDLE_ENFORCE(dynload::cudnnDestroyConvolutionDescriptor(desc_)); + } + + inline cudnnConvolutionDescriptor_t descriptor( + cudnnDataType_t type, const std::vector& pads, + const std::vector& strides, const std::vector& dilations) { + PADDLE_ENFORCE_EQ(pads.size(), strides.size()); + PADDLE_ENFORCE_EQ(pads.size(), dilations.size()); + +#if CUDNN_VERSION < 6000 + // cudnn v5 does not support dilation conv, the argument is called upscale + // instead of dilations and it is must be one. + for (size_t i = 0; i < dilations.size(); ++i) { + PADDLE_ENFORCE_EQ( + dilations[i], 1, + "Dilations conv is not supported in this cuDNN version"); + } +#endif + + PADDLE_ENFORCE(dynload::cudnnSetConvolutionNdDescriptor( + desc_, pads.size(), pads.data(), strides.data(), dilations.data(), + CUDNN_CROSS_CORRELATION, type)); + return desc_; + } + + template + inline cudnnConvolutionDescriptor_t descriptor( + const std::vector& pads, const std::vector& strides, + const std::vector& dilations) { + return descriptor(CudnnDataType::type, pads, strides, dilations); + } + + private: + cudnnConvolutionDescriptor_t desc_; + DISABLE_COPY_AND_ASSIGN(ScopedConvolutionDescriptor); +}; + +class ScopedPoolingDescriptor { + public: + ScopedPoolingDescriptor() { + PADDLE_ENFORCE(dynload::cudnnCreatePoolingDescriptor(&desc_)); + } + ~ScopedPoolingDescriptor() { + PADDLE_ENFORCE(dynload::cudnnDestroyPoolingDescriptor(desc_)); + } + + inline cudnnPoolingDescriptor_t descriptor(const PoolingMode& mode, + const std::vector& kernel, + const std::vector& pads, + const std::vector& strides) { + PADDLE_ENFORCE_EQ(kernel.size(), pads.size()); + PADDLE_ENFORCE_EQ(kernel.size(), strides.size()); + PADDLE_ENFORCE(dynload::cudnnSetPoolingNdDescriptor( + desc_, (mode == PoolingMode::kMaximum + ? CUDNN_POOLING_MAX + : CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING), + CUDNN_PROPAGATE_NAN, // Always propagate nans. + kernel.size(), kernel.data(), pads.data(), strides.data())); + return desc_; + } + + private: + cudnnPoolingDescriptor_t desc_; + DISABLE_COPY_AND_ASSIGN(ScopedPoolingDescriptor); +}; + +} // namespace platform +} // namespace paddle diff --git a/paddle/platform/cudnn_helper_test.cc b/paddle/platform/cudnn_helper_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..6bd85ae1ca8b47b203e0321e9d9224d5cfd3a586 --- /dev/null +++ b/paddle/platform/cudnn_helper_test.cc @@ -0,0 +1,120 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/platform/cudnn_helper.h" +#include + +TEST(CudnnHelper, ScopedTensorDescriptor) { + using paddle::platform::ScopedTensorDescriptor; + using paddle::platform::DataLayout; + + ScopedTensorDescriptor tensor_desc; + std::vector shape = {2, 4, 6, 6}; + auto desc = tensor_desc.descriptor(DataLayout::kNCHW, shape); + + cudnnDataType_t type; + int nd; + std::vector dims(4); + std::vector strides(4); + paddle::platform::dynload::cudnnGetTensorNdDescriptor( + desc, 4, &type, &nd, dims.data(), strides.data()); + + EXPECT_EQ(nd, 4); + for (size_t i = 0; i < dims.size(); ++i) { + EXPECT_EQ(dims[i], shape[i]); + } + EXPECT_EQ(strides[3], 1); + EXPECT_EQ(strides[2], 6); + EXPECT_EQ(strides[1], 36); + EXPECT_EQ(strides[0], 144); +} + +TEST(CudnnHelper, ScopedFilterDescriptor) { + using paddle::platform::ScopedFilterDescriptor; + using paddle::platform::DataLayout; + + ScopedFilterDescriptor filter_desc; + std::vector shape = {2, 3, 3}; + auto desc = filter_desc.descriptor(DataLayout::kNCHW, shape); + + cudnnDataType_t type; + int nd; + cudnnTensorFormat_t format; + std::vector kernel(3); + paddle::platform::dynload::cudnnGetFilterNdDescriptor(desc, 3, &type, &format, + &nd, kernel.data()); + + EXPECT_EQ(GetCudnnTensorFormat(DataLayout::kNCHW), format); + EXPECT_EQ(nd, 3); + for (size_t i = 0; i < shape.size(); ++i) { + EXPECT_EQ(kernel[i], shape[i]); + } +} + +TEST(CudnnHelper, ScopedConvolutionDescriptor) { + using paddle::platform::ScopedConvolutionDescriptor; + + ScopedConvolutionDescriptor conv_desc; + std::vector src_pads = {2, 2, 2}; + std::vector src_strides = {1, 1, 1}; + std::vector src_dilations = {1, 1, 1}; + auto desc = conv_desc.descriptor(src_pads, src_strides, src_dilations); + + cudnnDataType_t type; + cudnnConvolutionMode_t mode; + int nd; + std::vector pads(3); + std::vector strides(3); + std::vector dilations(3); + paddle::platform::dynload::cudnnGetConvolutionNdDescriptor( + desc, 3, &nd, pads.data(), strides.data(), dilations.data(), &mode, + &type); + + EXPECT_EQ(nd, 3); + for (size_t i = 0; i < src_pads.size(); ++i) { + EXPECT_EQ(pads[i], src_pads[i]); + EXPECT_EQ(strides[i], src_strides[i]); + EXPECT_EQ(dilations[i], src_dilations[i]); + } + EXPECT_EQ(mode, CUDNN_CROSS_CORRELATION); +} + +TEST(CudnnHelper, ScopedPoolingDescriptor) { + using paddle::platform::ScopedPoolingDescriptor; + using paddle::platform::PoolingMode; + + ScopedPoolingDescriptor pool_desc; + std::vector src_kernel = {2, 2, 5}; + std::vector src_pads = {1, 1, 2}; + std::vector src_strides = {2, 2, 3}; + auto desc = pool_desc.descriptor(PoolingMode::kMaximum, src_kernel, src_pads, + src_strides); + + cudnnPoolingMode_t mode; + cudnnNanPropagation_t nan_t = CUDNN_PROPAGATE_NAN; + int nd; + std::vector kernel(3); + std::vector pads(3); + std::vector strides(3); + paddle::platform::dynload::cudnnGetPoolingNdDescriptor( + desc, 3, &mode, &nan_t, &nd, kernel.data(), pads.data(), strides.data()); + + EXPECT_EQ(nd, 3); + for (size_t i = 0; i < src_pads.size(); ++i) { + EXPECT_EQ(kernel[i], src_kernel[i]); + EXPECT_EQ(pads[i], src_pads[i]); + EXPECT_EQ(strides[i], src_strides[i]); + } + EXPECT_EQ(mode, CUDNN_POOLING_MAX); +} diff --git a/paddle/platform/dynload/CMakeLists.txt b/paddle/platform/dynload/CMakeLists.txt index d205ead84598e04eea523be32139959a02e0dd83..ceb66f84b6b01892cbaf61c79a47ae60d2589164 100644 --- a/paddle/platform/dynload/CMakeLists.txt +++ b/paddle/platform/dynload/CMakeLists.txt @@ -1,2 +1,2 @@ cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags) -nv_library(dynload_cuda SRCS cublas.cc cudnn.cc curand.cc) +nv_library(dynload_cuda SRCS cublas.cc cudnn.cc curand.cc DEPS dynamic_loader) diff --git a/paddle/platform/dynload/cudnn.h b/paddle/platform/dynload/cudnn.h index ef0dd85b083dc2335dd5c70d3dc5f59eda25daeb..0120625b7c14448f1b8deb88c24a3ee06eaf4f01 100644 --- a/paddle/platform/dynload/cudnn.h +++ b/paddle/platform/dynload/cudnn.h @@ -62,19 +62,27 @@ extern void* cudnn_dso_handle; #define CUDNN_DNN_ROUTINE_EACH(__macro) \ __macro(cudnnSetTensor4dDescriptor); \ __macro(cudnnSetTensor4dDescriptorEx); \ + __macro(cudnnSetTensorNdDescriptor); \ + __macro(cudnnGetTensorNdDescriptor); \ __macro(cudnnGetConvolutionNdForwardOutputDim); \ __macro(cudnnGetConvolutionForwardAlgorithm); \ __macro(cudnnCreateTensorDescriptor); \ __macro(cudnnDestroyTensorDescriptor); \ __macro(cudnnCreateFilterDescriptor); \ __macro(cudnnSetFilter4dDescriptor); \ + __macro(cudnnSetFilterNdDescriptor); \ + __macro(cudnnGetFilterNdDescriptor); \ __macro(cudnnSetPooling2dDescriptor); \ + __macro(cudnnSetPoolingNdDescriptor); \ + __macro(cudnnGetPoolingNdDescriptor); \ __macro(cudnnDestroyFilterDescriptor); \ __macro(cudnnCreateConvolutionDescriptor); \ __macro(cudnnCreatePoolingDescriptor); \ __macro(cudnnDestroyPoolingDescriptor); \ __macro(cudnnSetConvolution2dDescriptor); \ __macro(cudnnDestroyConvolutionDescriptor); \ + __macro(cudnnSetConvolutionNdDescriptor); \ + __macro(cudnnGetConvolutionNdDescriptor); \ __macro(cudnnCreate); \ __macro(cudnnDestroy); \ __macro(cudnnSetStream); \ diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 81448897e95eb05f4ce7de8683d98e05bade77cb..64fcbd93b6c4d5d9b36f2636c3ef4f7327f08d25 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -25,10 +25,6 @@ limitations under the License. */ #include "paddle/string/printf.h" #include "paddle/string/to_string.h" -#ifdef __GNUC__ -#include // for __cxa_demangle -#endif - #ifndef PADDLE_ONLY_CPU #include "paddle/platform/dynload/cublas.h" @@ -46,19 +42,6 @@ limitations under the License. */ namespace paddle { namespace platform { -namespace { -#ifdef __GNUC__ -inline std::string demangle(std::string name) { - int status = -4; // some arbitrary value to eliminate the compiler warning - std::unique_ptr res{ - abi::__cxa_demangle(name.c_str(), NULL, NULL, &status), std::free}; - return (status == 0) ? res.get() : name; -} -#else -inline std::string demangle(std::string name) { return name; } -#endif -} - struct EnforceNotMet : public std::exception { std::exception_ptr exp_; std::string err_str_; @@ -79,7 +62,7 @@ struct EnforceNotMet : public std::exception { Dl_info info; for (int i = 0; i < size; ++i) { if (dladdr(call_stack[i], &info)) { - auto demangled = demangle(info.dli_sname); + auto demangled = info.dli_sname; auto addr_offset = static_cast(call_stack[i]) - static_cast(info.dli_saddr); sout << string::Sprintf("%-3d %*0p %s + %zd\n", i, diff --git a/paddle/platform/macros.h b/paddle/platform/macros.h new file mode 100644 index 0000000000000000000000000000000000000000..4a04a38c0c6f905639004dea2f4416ecc57c8620 --- /dev/null +++ b/paddle/platform/macros.h @@ -0,0 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +// Disable the copy and assignment operator for a class. +#ifndef DISABLE_COPY_AND_ASSIGN +#define DISABLE_COPY_AND_ASSIGN(classname) \ + private: \ + classname(const classname&) = delete; \ + classname& operator=(const classname&) = delete +#endif diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 3bc150ccb7af2885439cc2344aa0db9ba3b1ca03..16a2368aae5fff7445654161db4fd6a97d5bebfc 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "paddle/framework/backward.h" +#include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -30,7 +31,7 @@ limitations under the License. */ namespace py = pybind11; -USE_OP(add_two); +USE_OP(add); USE_OP(onehot_cross_entropy); USE_OP(sgd); USE_OP(mul); @@ -39,20 +40,28 @@ USE_OP(sigmoid); USE_OP(softmax); USE_OP(rowwise_add); USE_OP(fill_zeros_like); -USE_OP_ITSELF(recurrent_op); +USE_NO_KERNEL_OP(recurrent); USE_OP(gaussian_random); USE_OP(uniform_random); USE_OP(lookup_table); USE_OP(scale); -USE_OP_ITSELF(identity); +USE_NO_KERNEL_OP(identity); USE_OP(minus); +USE_OP(cos_sim); USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(scatter); +USE_CPU_ONLY_OP(concat); +USE_OP(top_k); +USE_OP(squared_l2_distance); +USE_OP(sum); +USE_OP(reshape); namespace paddle { namespace framework { using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +using LoD = framework::LoD; static size_t UniqueIntegerGenerator() { static std::atomic generator; @@ -76,7 +85,7 @@ PYBIND11_PLUGIN(core) { .def("get_dims", [](const Tensor &self) { return vectorize(self.dims()); }) .def("set_dims", - [](Tensor &self, const std::vector &dim) { + [](Tensor &self, const std::vector &dim) { self.Resize(make_ddim(dim)); }) .def("alloc_float", @@ -112,6 +121,60 @@ PYBIND11_PLUGIN(core) { return self.data()[offset]; }); + py::class_(m, "LoDTensor", R"DOC(LoD(Leval of Ddetails) Tensor. + +The tensor and LoD info should be created before creating the LoDTensor, then +call the set_tensor and set_lod functions to set them. + +)DOC") + .def("__init__", + [](LoDTensor &instance, + const std::vector> &lod, + Tensor *t) { +#ifdef PADDLE_ONLY_CPU + new (&instance) LoDTensor(lod, t); +#else + paddle::framework::LoD new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + new (&instance) LoDTensor(new_lod, t); +#endif + }) + .def("set_tensor", + [](LoDTensor &self, Tensor *tensor) { self.set_tensor(tensor); }) + .def("set_lod", + [](LoDTensor &self, const std::vector> &lod) { +#ifdef PADDLE_ONLY_CPU + self.set_lod(lod); +#else + paddle::framework::LoD new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + self.set_lod(new_lod); +#endif + }) + .def("tensor", + [](LoDTensor &self) -> Tensor & { return self.tensor(); }, + py::return_value_policy::reference) + .def("lod", [](LoDTensor &self) -> std::vector> { +#ifdef PADDLE_ONLY_CPU + return self.lod(); +#else + auto lod = self.lod(); + std::vector> new_lod; + new_lod.reserve(lod.size()); + std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod), + [](paddle::framework::Vector item) -> + std::vector { + std::vector v; + v.reserve(item.size()); + std::copy(item.begin(), item.end(), std::back_inserter(v)); + return v; + }); + return new_lod; +#endif + }); + py::class_(m, "Variable", R"DOC(Variable Class. All parameter, weight, gradient are variables in Paddle. @@ -123,6 +186,11 @@ All parameter, weight, gradient are variables in Paddle. .def("get_tensor", [](Variable &self) -> Tensor * { return self.GetMutable(); }, py::return_value_policy::reference) + .def("get_lod_tensor", + [](Variable &self) -> LoDTensor * { + return self.GetMutable(); + }, + py::return_value_policy::reference) .def("get_net", [](Variable &self) -> operators::NetOp * { return self.GetMutable(); @@ -213,7 +281,10 @@ All parameter, weight, gradient are variables in Paddle. -> std::map> { return op.Outputs(); }) + .def("output_vars", + [](const OperatorBase &op) { return op.OutputVars(true); }) .def("inputs", [](const OperatorBase &op) { return op.Inputs(); }) + .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); }) .def("__str__", &OperatorBase::DebugString) .def("no_intermediate_outputs", [](const OperatorBase &op) { return op.OutputVars(false); }) diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index 39ba60b4dc7ebe3f39a0aa4023b34540b340a841..95171acf729a513e5c92d1e0cba15cb12b38561a 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -85,7 +85,7 @@ void PyCPUTensorSetFromArray( framework::Tensor &self, py::array_t array, paddle::platform::CPUPlace &place) { - std::vector dims; + std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { dims.push_back((int)array.shape()[i]); @@ -102,7 +102,7 @@ void PyCUDATensorSetFromArray( framework::Tensor &self, py::array_t array, paddle::platform::GPUPlace &place) { - std::vector dims; + std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { dims.push_back((int)array.shape()[i]); diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 17986420220fec173bbf3ecff240d4c504f8adbd..2ac455d771bf78377ce4ee7d921393d3b3958e3c 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -30,6 +30,8 @@ Configuring cmake in /paddle/build ... -DCMAKE_BUILD_TYPE=Release -DWITH_DOC=OFF -DWITH_GPU=${WITH_GPU:-OFF} + -DWITH_MKLDNN=${WITH_MKLDNN:-ON} + -DWITH_MKLML=${WITH_MKLML:-ON} -DWITH_AVX=${WITH_AVX:-OFF} -DWITH_GOLANG=${WITH_GOLANG:-ON} -DWITH_SWIG_PY=ON @@ -37,7 +39,7 @@ Configuring cmake in /paddle/build ... -DWITH_PYTHON=${WITH_PYTHON:-ON} -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} -DCUDNN_ROOT=/usr/ - -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-OFF} + -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-ON} -DWITH_TESTING=${WITH_TESTING:-ON} -DCMAKE_EXPORT_COMPILE_COMMANDS=ON ======================================== @@ -50,6 +52,8 @@ cmake .. \ -DCMAKE_BUILD_TYPE=Release \ -DWITH_DOC=OFF \ -DWITH_GPU=${WITH_GPU:-OFF} \ + -DWITH_MKLDNN=${WITH_MKLDNN:-ON} \ + -DWITH_MKLML=${WITH_MKLML:-ON} \ -DWITH_AVX=${WITH_AVX:-OFF} \ -DWITH_GOLANG=${WITH_GOLANG:-ON} \ -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} \ diff --git a/paddle/scripts/docker/build_android.sh b/paddle/scripts/docker/build_android.sh index 5584e29e2a155a8062f7d4f2016bd389bd9803f3..aabd2da5e499c8e648f2967e56c661ec37f025a1 100644 --- a/paddle/scripts/docker/build_android.sh +++ b/paddle/scripts/docker/build_android.sh @@ -2,22 +2,58 @@ set -xe -mkdir -p /paddle/build_android -cd /paddle/build_android -rm -rf /paddle/install 2>/dev/null || true -cmake -DCMAKE_SYSTEM_NAME=Android \ - -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_STANDALONE_TOOLCHAIN \ - -DANDROID_ABI=armeabi-v7a \ - -DANDROID_ARM_NEON=ON \ - -DANDROID_ARM_MODE=ON \ - -DHOST_C_COMPILER=/usr/bin/gcc \ - -DHOST_CXX_COMPILER=/usr/bin/g++ \ - -DCMAKE_INSTALL_PREFIX=/paddle/install \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo \ - -DCMAKE_C_FLAGS_RELWITHDEBINFO="-O3" \ - -DCMAKE_CXX_FLAGS_RELWITHDEBINFO="-O3" \ - -DWITH_C_API=ON \ - -DWITH_SWIG_PY=OFF \ - .. +BUILD_ROOT=/paddle/build_android +DEST_ROOT=/paddle/install + +rm -rf $BUILD_ROOT 2>/dev/null || true +mkdir -p $BUILD_ROOT +cd $BUILD_ROOT + +if [ $ANDROID_ABI == "armeabi-v7a" ]; then + cmake -DCMAKE_SYSTEM_NAME=Android \ + -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM_STANDALONE_TOOLCHAIN \ + -DANDROID_ABI=$ANDROID_ABI \ + -DANDROID_ARM_NEON=ON \ + -DANDROID_ARM_MODE=ON \ + -DHOST_C_COMPILER=/usr/bin/gcc \ + -DHOST_CXX_COMPILER=/usr/bin/g++ \ + -DCMAKE_INSTALL_PREFIX=$DEST_ROOT \ + -DCMAKE_BUILD_TYPE=Release \ + -DUSE_EIGEN_FOR_BLAS=ON \ + -DWITH_C_API=ON \ + -DWITH_SWIG_PY=OFF \ + -DWITH_STYLE_CHECK=OFF \ + .. +elif [ $ANDROID_ABI == "arm64-v8a" ]; then + cmake -DCMAKE_SYSTEM_NAME=Android \ + -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM64_STANDALONE_TOOLCHAIN \ + -DANDROID_ABI=$ANDROID_ABI \ + -DANDROID_ARM_MODE=ON \ + -DHOST_C_COMPILER=/usr/bin/gcc \ + -DHOST_CXX_COMPILER=/usr/bin/g++ \ + -DCMAKE_INSTALL_PREFIX=$DEST_ROOT \ + -DCMAKE_BUILD_TYPE=Release \ + -DUSE_EIGEN_FOR_BLAS=OFF \ + -DWITH_C_API=ON \ + -DWITH_SWIG_PY=OFF \ + -DWITH_STYLE_CHECK=OFF \ + .. +elif [ $ANDROID_ABI == "armeabi" ]; then + cmake -DCMAKE_SYSTEM_NAME=Android \ + -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM_STANDALONE_TOOLCHAIN \ + -DANDROID_ABI=$ANDROID_ABI \ + -DANDROID_ARM_MODE=ON \ + -DHOST_C_COMPILER=/usr/bin/gcc \ + -DHOST_CXX_COMPILER=/usr/bin/g++ \ + -DCMAKE_INSTALL_PREFIX=/paddle/install \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_C_API=ON \ + -DWITH_SWIG_PY=OFF \ + -DWITH_STYLE_CHECK=OFF \ + .. +else + echo "Invalid ANDROID_ABI: $ANDROID_ABI" +fi + make -j `nproc` make install -j `nproc` diff --git a/paddle/scripts/travis/build_android.sh b/paddle/scripts/travis/build_android.sh index 004067a8f55351509caaf2bbf6d5c349a4698a79..9da71d1e8cdec4047167fe354973e6bac85fb9f0 100755 --- a/paddle/scripts/travis/build_android.sh +++ b/paddle/scripts/travis/build_android.sh @@ -22,6 +22,7 @@ cmake -DCMAKE_SYSTEM_NAME=Android \ -DANDROID_ABI=armeabi-v7a \ -DANDROID_ARM_NEON=ON \ -DANDROID_ARM_MODE=ON \ + -DUSE_EIGEN_FOR_BLAS=ON \ -DWITH_C_API=ON \ -DWITH_SWIG_PY=OFF \ -DWITH_STYLE_CHECK=OFF \ diff --git a/paddle/utils/Util.cpp b/paddle/utils/Util.cpp index b18b73e06a6c39c3bf9717280bc6323917c80efb..2755fdd9cd1c2509cad996557c6fb24363d42d8a 100644 --- a/paddle/utils/Util.cpp +++ b/paddle/utils/Util.cpp @@ -320,6 +320,9 @@ void loadFileList(const std::string& fileListFileName, } double getMemoryUsage() { +#if defined(__ANDROID__) + return 0.0; +#else FILE* fp = fopen("/proc/meminfo", "r"); CHECK(fp) << "failed to fopen /proc/meminfo"; size_t bufsize = 256 * sizeof(char); @@ -357,6 +360,7 @@ double getMemoryUsage() { delete[] buf; double usedMem = 1.0 - 1.0 * (freeMem + bufMem + cacheMem) / totalMem; return usedMem; +#endif } SyncThreadPool* getGlobalSyncThreadPool() { diff --git a/paddle/utils/Util.h b/paddle/utils/Util.h index 613844669d2495ada7b8f7a841f47b821b7fdeba..22ce2534d3468ded36221810aa61c15b37f13f3d 100644 --- a/paddle/utils/Util.h +++ b/paddle/utils/Util.h @@ -33,6 +33,13 @@ limitations under the License. */ #include "Flags.h" #include "hl_gpu.h" +#if defined(__ANDROID__) && (__ANDROID_API__ < 21) +inline int rand_r(unsigned int* seedp) { + (void)seedp; + return rand(); +} +#endif + /** * Loop over the elements in a container * TODO(yuyang18): It's this foreach useful? Why not use C++ 11 foreach, diff --git a/proto/ModelConfig.proto b/proto/ModelConfig.proto index 4ddf023780c704cb10c51ee9e5d7cb63420f9d73..ebf0911d6ea0b39d51447859ae2aef485b50b0e6 100644 --- a/proto/ModelConfig.proto +++ b/proto/ModelConfig.proto @@ -271,6 +271,7 @@ message ImageConfig { // The size of input feature map. required uint32 img_size = 8; optional uint32 img_size_y = 9; + optional uint32 img_size_z = 10 [ default = 1 ]; } message PriorBoxConfig { @@ -287,6 +288,11 @@ message PadConfig { repeated uint32 pad_w = 4; } +message ReshapeConfig { + repeated uint32 height_axis = 1; + repeated uint32 width_axis = 2; +} + message MultiBoxLossConfig { required uint32 num_classes = 1; required float overlap_threshold = 2; @@ -339,7 +345,6 @@ message LayerInputConfig { } message LayerConfig { - required string name = 1; required string type = 2; optional uint64 size = 3; @@ -515,7 +520,11 @@ message LayerConfig { // for HuberRegressionLoss optional double delta = 57 [ default = 1.0 ]; + // for 3D data optional uint64 depth = 58 [ default = 1 ]; + + // for switch order layer + optional ReshapeConfig reshape_conf = 59; } message EvaluatorConfig { diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py index 7e305e2cd9fbe306368a44d08f7f66b4185ae2d2..248da4ae8d1fb24652625ae8fc9ef314a028b912 100644 --- a/python/paddle/trainer/PyDataProvider2.py +++ b/python/paddle/trainer/PyDataProvider2.py @@ -27,6 +27,14 @@ class SequenceType(object): SEQUENCE = 1 SUB_SEQUENCE = 2 + @classmethod + def tostring(cls, value): + for k in cls.__dict__: + if not k.startswith('__'): + if getattr(cls, k) == value: + return cls.__name__ + '.' + k + return 'INVALID(' + str(value) + ')' + # TODO(yuyang18): Add string data type here. class DataType(object): @@ -35,6 +43,14 @@ class DataType(object): SparseValue = 2 Index = 3 + @classmethod + def tostring(cls, value): + for k in cls.__dict__: + if not k.startswith('__'): + if getattr(cls, k) == value: + return cls.__name__ + '.' + k + return 'INVALID(' + str(value) + ')' + class CacheType(object): NO_CACHE = 0 # No cache at all @@ -69,6 +85,26 @@ class InputType(object): self.seq_type = seq_type self.type = tp + def __repr__(self): + """ + Return a human readable representation like 'InputType(dim=25921, + seq_type=SequenceType.NO_SEQUENCE, type=DataType.Dense)' + """ + repr_str = type(self).__name__ + repr_str += '(' + serialize_func_map = { + 'dim': repr, + 'seq_type': SequenceType.tostring, + 'type': DataType.tostring + } + for idx, k in enumerate(self.__slots__): + if idx != 0: + repr_str += ', ' + repr_str += ( + k + '=' + serialize_func_map.get(k, repr)(getattr(self, k))) + repr_str += ')' + return repr_str + def dense_slot(dim, seq_type=SequenceType.NO_SEQUENCE): """ diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 152a56190c1ffddbf9590ed8f71308ceb88403f4..4f68a8953446ffa0510df65c5b214d09b913cff8 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1332,6 +1332,12 @@ def parse_image(image, input_layer_name, image_conf): get_img_size(input_layer_name, image_conf.channels) +def parse_image3d(image, input_layer_name, image_conf): + image_conf.channels = image.channels + image_conf.img_size, image_conf.img_size_y, image_conf.img_size_z = \ + get_img3d_size(input_layer_name, image_conf.channels) + + def parse_norm(norm, input_layer_name, norm_conf): norm_conf.norm_type = norm.norm_type config_assert( @@ -2028,6 +2034,7 @@ class ParameterReluLayer(LayerBase): config_assert(input_layer.size % partial_sum == 0, "a wrong setting for partial_sum") self.set_layer_size(input_layer.size) + self.config.partial_sum = partial_sum self.create_input_parameter(0, input_layer.size / partial_sum) @@ -2365,9 +2372,11 @@ class BatchNormLayer(LayerBase): name, inputs, bias=True, + img3D=False, use_global_stats=True, moving_average_fraction=0.9, batch_norm_type=None, + mean_var_names=None, **xargs): if inputs is None: inputs = [] @@ -2409,24 +2418,69 @@ class BatchNormLayer(LayerBase): input_layer = self.get_input_layer(0) image_conf = self.config.inputs[0].image_conf - parse_image(self.inputs[0].image, input_layer.name, image_conf) - - # Only pass the width and height of input to batch_norm layer - # when either of it is non-zero. - if input_layer.width != 0 or input_layer.height != 0: - self.set_cnn_layer(name, image_conf.img_size_y, image_conf.img_size, - image_conf.channels, False) + if img3D: + parse_image3d(self.inputs[0].image, input_layer.name, image_conf) + # Only pass the width and height of input to batch_norm layer + # when either of it is non-zero. + if input_layer.width != 0 or input_layer.height != 0: + self.set_cnn_layer( + input_layer_name=name, + depth=image_conf.img_size_z, + height=image_conf.img_size_y, + width=image_conf.img_size, + channels=image_conf.channels, + is_print=True) + else: + self.set_layer_size(input_layer.size) else: - self.set_layer_size(input_layer.size) + parse_image(self.inputs[0].image, input_layer.name, image_conf) + # Only pass the width and height of input to batch_norm layer + # when either of it is non-zero. + if input_layer.width != 0 or input_layer.height != 0: + self.set_cnn_layer( + input_layer_name=name, + height=image_conf.img_size_y, + width=image_conf.img_size, + channels=image_conf.channels, + is_print=True) + else: + self.set_layer_size(input_layer.size) psize = self.calc_parameter_size(image_conf) dims = [1, psize] + if mean_var_names is not None: + assert len(mean_var_names) == 2 + self.inputs[1].parameter_name = mean_var_names[0] + self.inputs[2].parameter_name = mean_var_names[1] + self.create_input_parameter(0, psize) self.create_input_parameter(1, psize, dims) self.create_input_parameter(2, psize, dims) self.create_bias_parameter(bias, psize) + def set_cnn_layer(self, + input_layer_name, + depth=None, + height=None, + width=None, + channels=None, + is_print=True): + depthIsNone = False + if depth is None: + depth = 1 + depthIsNone = True + size = depth * height * width * channels + self.set_layer_size(size) + self.set_layer_height_width(height, width) + self.set_layer_depth(depth) + if is_print and depthIsNone: + print("output for %s: c = %d, h = %d, w = %d, size = %d" % + (input_layer_name, channels, height, width, size)) + elif is_print: + print("output for %s: c = %d, d = %d, h = %d, w = %d, size = %d" % + (input_layer_name, channels, depth, height, width, size)) + def calc_parameter_size(self, image_conf): return image_conf.channels @@ -2688,9 +2742,20 @@ class AddToLayer(LayerBase): super(AddToLayer, self).__init__( name, 'addto', 0, inputs=inputs, **xargs) config_assert(len(inputs) > 0, 'inputs cannot be empty for AddToLayer') - for input_index in xrange(len(self.inputs)): - input_layer = self.get_input_layer(input_index) - self.set_layer_size(input_layer.size) + + if len(self.inputs) > 1: + for input_index in xrange(len(self.inputs)): + assert self.get_input_layer(0).height == self.get_input_layer( + input_index).height + assert self.get_input_layer(0).width == self.get_input_layer( + input_index).width + assert self.get_input_layer(0).depth == self.get_input_layer( + input_index).depth + + self.set_layer_size(self.get_input_layer(0).size) + self.set_layer_height_width(self.get_input_layer(0).height, \ + self.get_input_layer(0).width) + self.set_layer_depth(self.get_input_layer(0).depth) self.create_bias_parameter(bias, self.config.size) @@ -3370,11 +3435,20 @@ class ConcatenateLayer(LayerBase): name, 'concat', 0, inputs=inputs, **xargs) size = 0 for input_index in xrange(len(self.inputs)): + assert self.get_input_layer(0).height == self.get_input_layer( + input_index).height + assert self.get_input_layer(0).width == self.get_input_layer( + input_index).width + assert self.get_input_layer(0).depth == self.get_input_layer( + input_index).depth input_layer = self.get_input_layer(input_index) input = self.inputs[input_index] if self.config.size == 0: size += input_layer.size + self.set_layer_height_width(self.get_input_layer(0).height, \ + self.get_input_layer(0).width) + self.set_layer_depth(self.get_input_layer(0).depth) self.set_layer_size(size) @@ -3670,6 +3744,15 @@ class RecurrentLayerGroup(LayerBase): name, 'recurrent_layer_group', 0, inputs=[], device=device) +@config_layer('switch_order') +class SwitchOrderLayer(LayerBase): + def __init__(self, name, inputs, reshape, **xargs): + super(SwitchOrderLayer, self).__init__( + name, 'switch_order', 0, inputs=inputs, **xargs) + self.config.reshape_conf.height_axis.extend(reshape['height']) + self.config.reshape_conf.width_axis.extend(reshape['width']) + + # Deprecated, use a new layer specific class instead @config_func def Layer(name, type, **xargs): diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 2bd274fad2ab7eed0902ffe944c6e0670f963233..4b1d80d3db924bfa2ad0e081f785d8f5dd719fce 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -53,7 +53,7 @@ __all__ = [ 'cos_sim', 'hsigmoid', 'conv_projection', - 'mse_cost', + 'square_error_cost', 'regression_cost', 'classification_cost', 'LayerOutput', @@ -131,6 +131,7 @@ __all__ = [ 'row_conv_layer', 'dropout_layer', 'prelu_layer', + 'switch_order_layer', 'gated_unit_layer', 'crop_layer', 'sub_nested_seq_layer', @@ -239,6 +240,7 @@ class LayerType(object): SMOOTH_L1 = 'smooth_l1' PRELU = 'prelu' + SWITCH_ORDER_LAYER = 'switch_order' CROP_LAYER = 'crop' SUB_NESTED_SEQ = 'sub_nested_seq' CLIP_LAYER = 'clip' @@ -352,6 +354,10 @@ class LayerOutput(object): def height(self): return cp.g_layer_map[self.full_name].height + @property + def depth(self): + return cp.g_layer_map[self.full_name].depth + def set_input(self, input): """ Set the input for a memory layer. Can only be used for memory layer @@ -941,7 +947,7 @@ def data_layer(name, size, depth=None, height=None, width=None, if height is not None and width is not None: num_filters = size / (width * height * depth) assert num_filters * width * height * depth == size, \ - "size=%s width=%s height=%s depth=%s" % (size, width, height, depth) + "size=%s width=%s height=%s depth=%s" % (size, width, height, depth) return LayerOutput(name, LayerType.DATA, size=size, num_filters=num_filters) @@ -1217,7 +1223,8 @@ def detection_output_layer(input_loc, name=None): """ Apply the NMS to the output of network and compute the predict bounding - box location. + box location. The output of this layer could be None if there is no valid + bounding box. :param name: The Layer Name. :type name: basestring @@ -2951,13 +2958,15 @@ def img_cmrnorm_layer(input, def batch_norm_layer(input, act=None, name=None, + img3D=False, num_channels=None, bias_attr=None, param_attr=None, layer_attr=None, batch_norm_type=None, moving_average_fraction=0.9, - use_global_stats=None): + use_global_stats=None, + mean_var_names=None): """ Batch Normalization Layer. The notation of this layer as follow. @@ -3024,6 +3033,8 @@ def batch_norm_layer(input, :math:`runningMean = newMean*(1-factor) + runningMean*factor` :type moving_average_fraction: float. + :param mean_var_names: [mean name, variance name] + :type mean_var_names: string list :return: LayerOutput object. :rtype: LayerOutput """ @@ -3037,6 +3048,7 @@ def batch_norm_layer(input, (batch_norm_type == "cudnn_batch_norm") l = Layer( name=name, + img3D=img3D, inputs=Input( input.name, image=Image(channels=num_channels), **param_attr.attr), active_type=act.name, @@ -3045,6 +3057,7 @@ def batch_norm_layer(input, bias=ParamAttr.to_bias(bias_attr), moving_average_fraction=moving_average_fraction, use_global_stats=use_global_stats, + mean_var_names=mean_var_names, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( @@ -4238,13 +4251,18 @@ def __cost_input__(input, label, weight=None): @wrap_name_default() @layer_support() -def mse_cost(input, label, weight=None, name=None, coeff=1.0, layer_attr=None): +def square_error_cost(input, + label, + weight=None, + name=None, + coeff=1.0, + layer_attr=None): """ - mean squared error cost: + sum of square error cost: .. math:: - \\frac{1}{N}\sum_{i=1}^N(t_i-y_i)^2 + cost = \\sum_{i=1}^N(t_i-y_i)^2 :param name: layer name. :type name: basestring @@ -4273,7 +4291,7 @@ def mse_cost(input, label, weight=None, name=None, coeff=1.0, layer_attr=None): return LayerOutput(name, LayerType.COST, parents=parents, size=1) -regression_cost = mse_cost +regression_cost = square_error_cost @wrap_name_default("cost") @@ -5798,9 +5816,9 @@ def huber_regression_cost(input, coeff=1.0, layer_attr=None): """ - In statistics, the Huber loss is a loss function used in robust regression, - that is less sensitive to outliers in data than the squared error loss. - Given a prediction f(x), a label y and :math:`\delta`, the loss function + In statistics, the Huber loss is a loss function used in robust regression, + that is less sensitive to outliers in data than the squared error loss. + Given a prediction f(x), a label y and :math:`\delta`, the loss function is defined as: .. math: @@ -5848,13 +5866,13 @@ def huber_classification_cost(input, coeff=1.0, layer_attr=None): """ - For classification purposes, a variant of the Huber loss called modified Huber - is sometimes used. Given a prediction f(x) (a real-valued classifier score) and - a true binary class label :math:`y\in \left \{-1, 1 \right \}`, the modified Huber + For classification purposes, a variant of the Huber loss called modified Huber + is sometimes used. Given a prediction f(x) (a real-valued classifier score) and + a true binary class label :math:`y\in \left \{-1, 1 \right \}`, the modified Huber loss is defined as: .. math: - loss = \max \left ( 0, 1-yf(x) \right )^2, yf(x)\geq 1 + loss = \max \left ( 0, 1-yf(x) \right )^2, yf(x)\geq 1 loss = -4yf(x), \text{otherwise} The example usage is: @@ -6399,6 +6417,55 @@ def gated_unit_layer(input, layer_attr=layer_attr) +@layer_support() +@wrap_name_default('switch_order') +def switch_order_layer(input, + name=None, + reshape_axis=None, + act=None, + layer_attr=None): + """ + This layer switch dimension order of image input. + From order "batchSize, channels, height, width" + to order "batchSize, height, width, channels". + + The example usage is: + + .. code-block:: python + reshape_axis = 3 + switch = switch_order(input=layer, name='switch', reshape_axis=reshape_axis) + reshape = {'height':[ 0, 1, 2], 'width':[3]} + + :param input: The input layer. + :type input: LayerOutput + :param name: Name of this layer. + :type name: basestring + :param reshape: reshape matrix by axises. + :type reshape: Dict + :return: LayerOutput object. + :rtype: LayerOutput + """ + assert isinstance(input, LayerOutput) + assert reshape_axis != None and (reshape_axis > 0 and reshape_axis < 4) + height = [ele for ele in xrange(reshape_axis)] + width = [ele for ele in range(reshape_axis, 4)] + reshape = {'height': height, 'width': width} + + l = Layer( + name=name, + inputs=input.name, + reshape=reshape, + type=LayerType.SWITCH_ORDER_LAYER, + active_type=act.name, + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name=name, + layer_type=LayerType.SWITCH_ORDER_LAYER, + activation=act, + parents=input, + size=l.config.size) + + @wrap_name_default() @layer_support() def crop_layer(input, offset, axis=2, shape=None, name=None, layer_attr=None): diff --git a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh index df872a90ff388f0d96cef44763dbd076bc768ab9..8a204a96f3ef57673cef65306d0bf8e8c3409751 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh @@ -10,6 +10,6 @@ test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_la test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer test_seq_slice_layer test_cross_entropy_over_beam test_pooling3D_layer -test_conv3d_layer test_deconv3d_layer) +test_conv3d_layer test_deconv3d_layer test_BatchNorm3D) export whole_configs=(test_split_datasource) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/img_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/img_layers.protostr index 1a577b8d9b1e1915236ba6afcfa97040d70c707a..5ddf6052df021b055390a42c25ce6c0d650e4aee 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/img_layers.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/img_layers.protostr @@ -62,6 +62,7 @@ layers { moving_average_fraction: 0.9 height: 227 width: 227 + depth: 1 } layers { name: "__crmnorm_0__" diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr index 2818389b16cca75f5030b75fc4de8c89c06c5e02..c0252b945b4c7fd6b4dad8770e3e1dccb88df28a 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr @@ -62,6 +62,7 @@ layers { moving_average_fraction: 0.9 height: 256 width: 256 + depth: 1 } layers { name: "__crmnorm_0__" diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_BatchNorm3D.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_BatchNorm3D.protostr new file mode 100644 index 0000000000000000000000000000000000000000..832ed24a31dd2bedba9a4fce77d7a088d1796fdb --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_BatchNorm3D.protostr @@ -0,0 +1,92 @@ +type: "nn" +layers { + name: "data3D" + type: "data" + size: 360 + active_type: "" + height: 6 + width: 20 + depth: 3 +} +layers { + name: "__batch_norm_0__" + type: "batch_norm" + size: 360 + active_type: "relu" + inputs { + input_layer_name: "data3D" + input_parameter_name: "___batch_norm_0__.w0" + image_conf { + channels: 1 + img_size: 20 + img_size_y: 6 + img_size_z: 3 + } + } + inputs { + input_layer_name: "data3D" + input_parameter_name: "___batch_norm_0__.w1" + } + inputs { + input_layer_name: "data3D" + input_parameter_name: "___batch_norm_0__.w2" + } + bias_parameter_name: "___batch_norm_0__.wbias" + moving_average_fraction: 0.9 + height: 6 + width: 20 + depth: 3 +} +parameters { + name: "___batch_norm_0__.w0" + size: 1 + initial_mean: 1.0 + initial_std: 0.0 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___batch_norm_0__.w1" + size: 1 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 1 + initial_strategy: 0 + initial_smart: false + is_static: true + is_shared: true +} +parameters { + name: "___batch_norm_0__.w2" + size: 1 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 1 + initial_strategy: 0 + initial_smart: false + is_static: true + is_shared: true +} +parameters { + name: "___batch_norm_0__.wbias" + size: 1 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 1 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data3D" +output_layer_names: "__batch_norm_0__" +sub_models { + name: "root" + layer_names: "data3D" + layer_names: "__batch_norm_0__" + input_layer_names: "data3D" + output_layer_names: "__batch_norm_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bi_grumemory.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bi_grumemory.protostr index b110e91498ce7d112987714bd769868179141c54..8a1399efad0ff339e35f69400ac654a4787a6018 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bi_grumemory.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bi_grumemory.protostr @@ -74,6 +74,9 @@ layers { inputs { input_layer_name: "__bidirectional_gru_0___bw" } + height: 0 + width: 0 + depth: 1 } parameters { name: "___bidirectional_gru_0___fw_transform.w0" diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr index 96fb1d4ebde08b1bca2ffd09e8db0895842cbfd3..cec8a73db66f6091ec971527b3a42aa9e08154eb 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr @@ -45,7 +45,7 @@ layers { coeff: 1.0 } layers { - name: "__mse_cost_0__" + name: "__square_error_cost_0__" type: "square_error" size: 1 active_type: "" @@ -130,7 +130,7 @@ input_layer_names: "label" input_layer_names: "weight" input_layer_names: "multi_class_label" output_layer_names: "__cost_0__" -output_layer_names: "__mse_cost_0__" +output_layer_names: "__square_error_cost_0__" output_layer_names: "__nce_layer_0__" evaluators { name: "classification_error_evaluator" @@ -146,7 +146,7 @@ sub_models { layer_names: "weight" layer_names: "__fc_layer_0__" layer_names: "__cost_0__" - layer_names: "__mse_cost_0__" + layer_names: "__square_error_cost_0__" layer_names: "multi_class_label" layer_names: "__nce_layer_0__" input_layer_names: "input" @@ -154,7 +154,7 @@ sub_models { input_layer_names: "weight" input_layer_names: "multi_class_label" output_layer_names: "__cost_0__" - output_layer_names: "__mse_cost_0__" + output_layer_names: "__square_error_cost_0__" output_layer_names: "__nce_layer_0__" evaluator_names: "classification_error_evaluator" is_recurrent_layer_group: false diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr index 64d227565f2b21ff43d4391c682ca90c0f47908e..94ad56cab063df9e6a11bb1c293727fb9dec810f 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr @@ -14,6 +14,29 @@ layers { input_layer_name: "input" input_parameter_name: "___prelu_layer_0__.w0" } + partial_sum: 1 +} +layers { + name: "__prelu_layer_1__" + type: "prelu" + size: 300 + active_type: "" + inputs { + input_layer_name: "input" + input_parameter_name: "___prelu_layer_1__.w0" + } + partial_sum: 1 +} +layers { + name: "__prelu_layer_2__" + type: "prelu" + size: 300 + active_type: "" + inputs { + input_layer_name: "input" + input_parameter_name: "___prelu_layer_2__.w0" + } + partial_sum: 5 } parameters { name: "___prelu_layer_0__.w0" @@ -23,14 +46,32 @@ parameters { initial_strategy: 0 initial_smart: true } +parameters { + name: "___prelu_layer_1__.w0" + size: 300 + initial_mean: 0.0 + initial_std: 0.057735026919 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___prelu_layer_2__.w0" + size: 60 + initial_mean: 0.0 + initial_std: 0.129099444874 + initial_strategy: 0 + initial_smart: true +} input_layer_names: "input" -output_layer_names: "__prelu_layer_0__" +output_layer_names: "__prelu_layer_2__" sub_models { name: "root" layer_names: "input" layer_names: "__prelu_layer_0__" + layer_names: "__prelu_layer_1__" + layer_names: "__prelu_layer_2__" input_layer_names: "input" - output_layer_names: "__prelu_layer_0__" + output_layer_names: "__prelu_layer_2__" is_recurrent_layer_group: false } diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_recursive_topology.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_recursive_topology.protostr index 8133aa9c8d3e7c6843d1b27b70e87d394a1e0e47..046037936a6d85f54095c65f206e468aa69065d7 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_recursive_topology.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_recursive_topology.protostr @@ -16,6 +16,9 @@ layers { inputs { input_layer_name: "data" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_1__" @@ -28,6 +31,9 @@ layers { inputs { input_layer_name: "__addto_0__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_2__" @@ -40,6 +46,9 @@ layers { inputs { input_layer_name: "__addto_1__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_3__" @@ -52,6 +61,9 @@ layers { inputs { input_layer_name: "__addto_2__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_4__" @@ -64,6 +76,9 @@ layers { inputs { input_layer_name: "__addto_3__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_5__" @@ -76,6 +91,9 @@ layers { inputs { input_layer_name: "__addto_4__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_6__" @@ -88,6 +106,9 @@ layers { inputs { input_layer_name: "__addto_5__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_7__" @@ -100,6 +121,9 @@ layers { inputs { input_layer_name: "__addto_6__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_8__" @@ -112,6 +136,9 @@ layers { inputs { input_layer_name: "__addto_7__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_9__" @@ -124,6 +151,9 @@ layers { inputs { input_layer_name: "__addto_8__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_10__" @@ -136,6 +166,9 @@ layers { inputs { input_layer_name: "__addto_9__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_11__" @@ -148,6 +181,9 @@ layers { inputs { input_layer_name: "__addto_10__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_12__" @@ -160,6 +196,9 @@ layers { inputs { input_layer_name: "__addto_11__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_13__" @@ -172,6 +211,9 @@ layers { inputs { input_layer_name: "__addto_12__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_14__" @@ -184,6 +226,9 @@ layers { inputs { input_layer_name: "__addto_13__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_15__" @@ -196,6 +241,9 @@ layers { inputs { input_layer_name: "__addto_14__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_16__" @@ -208,6 +256,9 @@ layers { inputs { input_layer_name: "__addto_15__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_17__" @@ -220,6 +271,9 @@ layers { inputs { input_layer_name: "__addto_16__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_18__" @@ -232,6 +286,9 @@ layers { inputs { input_layer_name: "__addto_17__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_19__" @@ -244,6 +301,9 @@ layers { inputs { input_layer_name: "__addto_18__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_20__" @@ -256,6 +316,9 @@ layers { inputs { input_layer_name: "__addto_19__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_21__" @@ -268,6 +331,9 @@ layers { inputs { input_layer_name: "__addto_20__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_22__" @@ -280,6 +346,9 @@ layers { inputs { input_layer_name: "__addto_21__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_23__" @@ -292,6 +361,9 @@ layers { inputs { input_layer_name: "__addto_22__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_24__" @@ -304,6 +376,9 @@ layers { inputs { input_layer_name: "__addto_23__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_25__" @@ -316,6 +391,9 @@ layers { inputs { input_layer_name: "__addto_24__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_26__" @@ -328,6 +406,9 @@ layers { inputs { input_layer_name: "__addto_25__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_27__" @@ -340,6 +421,9 @@ layers { inputs { input_layer_name: "__addto_26__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_28__" @@ -352,6 +436,9 @@ layers { inputs { input_layer_name: "__addto_27__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_29__" @@ -364,6 +451,9 @@ layers { inputs { input_layer_name: "__addto_28__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_30__" @@ -376,6 +466,9 @@ layers { inputs { input_layer_name: "__addto_29__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__addto_31__" @@ -388,6 +481,9 @@ layers { inputs { input_layer_name: "__addto_30__" } + height: 0 + width: 0 + depth: 1 } layers { name: "__fc_layer_0__" diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_seq_select_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_sub_nested_seq_select_layer.protostr similarity index 100% rename from python/paddle/trainer_config_helpers/tests/configs/protostr/test_seq_select_layers.protostr rename to python/paddle/trainer_config_helpers/tests/configs/protostr/test_sub_nested_seq_select_layer.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/util_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/util_layers.protostr index d0ad388165007b8f96f059e5b003c52f756383e5..7a2f3eab38808a031c27cf7ab9d6273952e389eb 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/util_layers.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/util_layers.protostr @@ -22,6 +22,9 @@ layers { inputs { input_layer_name: "b" } + height: 0 + width: 0 + depth: 1 } layers { name: "__concat_0__" @@ -34,6 +37,9 @@ layers { inputs { input_layer_name: "b" } + height: 0 + width: 0 + depth: 1 } layers { name: "__concat_1__" diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py b/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py new file mode 100644 index 0000000000000000000000000000000000000000..a991b22252ba10eed895efd931108c2d8b0e52f1 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_BatchNorm3D.py @@ -0,0 +1,11 @@ +from paddle.trainer_config_helpers import * + +settings(batch_size=1000, learning_rate=1e-4) + +#data = data_layer(name='data', size=180, width=30, height=6) +#batchNorm = batch_norm_layer(data, num_channels=1) +#outputs(batchNorm) + +data3D = data_layer(name='data3D', size=120 * 3, width=20, height=6, depth=3) +batchNorm3D = batch_norm_layer(data3D, num_channels=1, img3D=True) +outputs(batchNorm3D) diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py index c369062930e2b067ceab0dc3b25ba6c1eabe2450..caa6aaa9430ffaee7ade93ee04ec90103bf8cf43 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py @@ -10,7 +10,7 @@ fc = fc_layer(input=data, size=10, act=SoftmaxActivation()) outputs( classification_cost( input=fc, label=lbl, weight=wt), - mse_cost( + square_error_cost( input=fc, label=lbl, weight=wt), nce_layer( input=fc, diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py index 2e3057f323db22ffc3911cce30ec2e8bb95e3dbe..aae90fab32db78a70c2169ed8fafb930433f4136 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py @@ -2,5 +2,7 @@ from paddle.trainer_config_helpers import * data = data_layer(name='input', size=300) prelu = prelu_layer(input=data) +prelu = prelu_layer(input=data, partial_sum=1) +prelu = prelu_layer(input=data, partial_sum=5) outputs(prelu) diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index 7589cc9917f26375d595e200245d5ba099bc38d7..e66bf67d7949057486eb54c46f39128fad5dae55 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -53,10 +53,13 @@ class BeginPass(object): class EndPass(WithMetric): """ Event On One Pass Training Complete. + To get the output of a specific layer, add "event.gm.getLayerOutputs('predict_layer')" + in your event_handler call back """ - def __init__(self, pass_id, evaluator): + def __init__(self, pass_id, evaluator, gm): self.pass_id = pass_id + self.gm = gm WithMetric.__init__(self, evaluator) @@ -73,10 +76,13 @@ class BeginIteration(object): class EndIteration(WithMetric): """ Event On One Batch Training Complete. + To get the output of a specific layer, add "event.gm.getLayerOutputs('predict_layer')" + in your event_handler call back """ - def __init__(self, pass_id, batch_id, cost, evaluator): + def __init__(self, pass_id, batch_id, cost, evaluator, gm): self.pass_id = pass_id self.batch_id = batch_id self.cost = cost + self.gm = gm WithMetric.__init__(self, evaluator) diff --git a/python/paddle/v2/framework/op.py b/python/paddle/v2/framework/op.py index 6ac656321e72f5b0c91008091753ee50ac8200a6..9e665adad2d3ad91d183c6815fbd7135ac4e8965 100644 --- a/python/paddle/v2/framework/op.py +++ b/python/paddle/v2/framework/op.py @@ -4,8 +4,8 @@ import paddle.v2.framework.proto.framework_pb2 as framework_pb2 def get_all_op_protos(): """ - Get all registered op proto from Paddle C++ - :return: list of OpProto + Get all registered op proto from PaddlePaddle C++ end. + :return: A list of registered OpProto. """ protostrs = core.get_all_op_protos() ret_values = [] @@ -21,8 +21,8 @@ def is_str(s): class OpDescCreationMethod(object): """ - A Functor object to convert user input(use key word args) to OpDesc based on - OpProto. + Convert the user's input(only keyword arguments are supported) to OpDesc + based on the OpProto. :param op_proto: The OpProto object. :type op_proto: op_proto_pb2.OpProto @@ -30,27 +30,28 @@ class OpDescCreationMethod(object): def __init__(self, op_proto): if not isinstance(op_proto, framework_pb2.OpProto): - raise TypeError("Argument should be OpProto") + raise TypeError( + "Type of op_proto should be OpProto in PaddlePaddle.") self.__op_proto__ = op_proto def __call__(self, *args, **kwargs): """ - Convert user input to OpDesc. Only key-word args are supported. - :return: OpDesc based on user input + Convert user's input to OpDesc. Only keyword arguments are supported. + :return: The OpDesc based on user input. :rtype: op_desc_pb2.OpDesc """ if len(args) != 0: - raise ValueError("Only keyword arguments is supported by Paddle") + raise ValueError("Only keyword arguments are supported.") op_desc = framework_pb2.OpDesc() - for input_parameter in self.__op_proto__.inputs: input_arguments = kwargs.get(input_parameter.name, []) if is_str(input_arguments): input_arguments = [input_arguments] if not input_parameter.duplicable and len(input_arguments) > 1: - raise ValueError("Input %s only accepts one input, but give %d" - % (input_parameter.name, len(input_arguments))) + raise ValueError( + "Input %s expects only one input, but %d are given." % + (input_parameter.name, len(input_arguments))) ipt = op_desc.inputs.add() ipt.parameter = input_parameter.name @@ -63,7 +64,7 @@ class OpDescCreationMethod(object): if not output_parameter.duplicable and len(output_arguments) > 1: raise ValueError( - "Output %s only accepts one output, but give %d" % + "Output %s expects only one output, but %d are given." % (output_parameter.name, len(output_arguments))) out = op_desc.outputs.add() @@ -94,16 +95,23 @@ class OpDescCreationMethod(object): new_attr.floats.extend(user_defined_attr) elif attr.type == framework_pb2.STRINGS: new_attr.strings.extend(user_defined_attr) + elif attr.type == framework_pb2.INT_PAIRS: + for p in user_defined_attr: + pair = new_attr.pairs.add() + pair.first = p[0] + pair.second = p[1] else: - raise NotImplementedError("Not support attribute type " + - attr.type) + raise NotImplementedError( + "A not supported attribute type: %s." % ( + str(attr.type))) return op_desc @staticmethod def any_is_true(generator): """ - Reduce a bool array to one. If any of them is True, then return True. + Reduce a boolean array to a single boolean parameter. If any element in + the array is True, this function will return True, otherwise False. """ for flag in generator: if flag: @@ -122,7 +130,7 @@ class OpInfo(object): def create_op_creation_method(op_proto): """ - Generate op creation method for an OpProto + Generate op creation method for an OpProto. """ method = OpDescCreationMethod(op_proto) @@ -133,28 +141,31 @@ def create_op_creation_method(op_proto): return OpInfo( method=__impl__, name=op_proto.type, - inputs=[var.name for var in op_proto.inputs], - outputs=[var.name for var in op_proto.outputs], + inputs=[(var.name, var.duplicable) for var in op_proto.inputs], + outputs=[(var.name, var.duplicable) for var in op_proto.outputs], attrs=[attr.name for attr in op_proto.attrs]) class OperatorFactory(object): def __init__(self): self.op_methods = dict() + for op_proto in get_all_op_protos(): method = create_op_creation_method(op_proto) self.op_methods[method.name] = method def __call__(self, *args, **kwargs): - if 'type' in kwargs: + if "type" in kwargs: if len(args) != 0: - raise ValueError("All Paddle argument should be key-word " - "argument except type") - t = kwargs.pop('type') + raise ValueError( + "Except the argument \"type\"," + "all of the other arguments should be keyword arguments.") + t = kwargs.pop("type") else: if len(args) != 1: - raise ValueError("All Paddle argument should be key-word " - "argument except type") + raise ValueError( + "Except the argument \"type\"," + "all of the other arguments should be keyword arguments.") t = args[0] return self.get_op_info(t).method(**kwargs) @@ -164,13 +175,19 @@ class OperatorFactory(object): def get_op_info(self, t): if t not in self.op_methods: - raise ValueError("operator %s is not registered", t) + raise ValueError("The operator: %s is not registered." % t) return self.op_methods.get(t) def get_op_input_names(self, type): + return map(lambda x: x[0], self.get_op_info(type).inputs) + + def get_op_inputs(self, type): return self.get_op_info(type).inputs def get_op_output_names(self, type): + return map(lambda x: x[0], self.get_op_info(type).outputs) + + def get_op_outputs(self, type): return self.get_op_info(type).outputs def get_op_attr_names(self, type): @@ -179,7 +196,7 @@ class OperatorFactory(object): class __RecurrentOp__(object): __proto__ = None - type = 'recurrent_op' + type = "recurrent" def __init__(self): # cache recurrent_op's proto @@ -189,8 +206,8 @@ class __RecurrentOp__(object): self.__proto__ = op_proto def __call__(self, *args, **kwargs): - if self.type not in args and 'type' not in kwargs: - kwargs['type'] = self.type + if self.type not in args and "type" not in kwargs: + kwargs["type"] = self.type # create proto create_method = OpDescCreationMethod(self.__proto__) proto = create_method(*args, **kwargs) @@ -198,5 +215,5 @@ class __RecurrentOp__(object): return core.RecurrentOp.create(proto.SerializeToString()) -Operator = OperatorFactory() # Default global factory +Operator = OperatorFactory() # The default global factory RecurrentOp = __RecurrentOp__() diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 661ebd89648feec77367c278e5f045b8238e1dc1..4d7664469e481344cf9eea84688f068b4fb99dee 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -1,34 +1,5 @@ -py_test(test_net SRCS test_net.py) - -py_test(test_scope SRCS test_scope.py) - -py_test(test_tensor SRCS test_tensor.py) -py_test(test_mul_op SRCS test_mul_op.py) - -py_test(test_mean_op SRCS test_mean_op.py) - -py_test(test_protobuf SRCS test_protobuf.py) - -py_test(test_add_two_op SRCS test_add_two_op.py) -py_test(test_sigmoid_op SRCS test_sigmoid_op.py) -py_test(test_softmax_op SRCS test_softmax_op.py) -py_test(test_cross_entropy_op SRCS test_cross_entropy_op.py) -py_test(test_gather_op SRCS test_gather_op.py) -py_test(test_scatter_op SRCS test_scatter_op.py) -py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) - -py_test(gradient_checker SRCS gradient_checker.py) - -py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py) - -py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) - -py_test(test_operator SRCS test_operator.py) -py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py) -py_test(test_uniform_random_op SRCS test_uniform_random_op.py) -py_test(test_recurrent_op SRCS test_recurrent_op.py) -py_test(test_sgd_op SRCS test_sgd_op.py) -py_test(test_gradient_checker SRCS test_gradient_checker.py) -py_test(test_lookup_table SRCS test_lookup_table.py) -py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py) -py_test(mnist SRCS mnist.py) +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py deleted file mode 100644 index 518f828bacd60e7cb8375b22c6c3296f9bfeb5ea..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ /dev/null @@ -1,309 +0,0 @@ -import unittest - -import numpy -import itertools -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator - -__all__ = ['get_numeric_gradient'] - - -def create_op(op_type): - # TODO need to set attrs - kwargs = dict() - for in_name in Operator.get_op_input_names(op_type): - kwargs[in_name] = in_name - for out_name in Operator.get_op_output_names(op_type): - kwargs[out_name] = out_name - - return Operator(op_type, **kwargs) - - -def grad_var_name(var_name): - return var_name + "@GRAD" - - -def empty_var_name(): - return "@EMPTY@" - - -def get_numeric_gradient(op, - input_values, - output_name, - input_to_check, - delta=0.005, - local_scope=None, - in_place=False): - """ - Get Numeric Gradient for an operator's input. - - :param op: C++ operator instance, could be an network - :param input_values: The input variables. Should be an dictionary, key is - variable name. Value is numpy array. - :param output_name: The final output variable name. - :param input_to_check: The input variable need to get gradient. - :param delta: The perturbation value for numeric gradient method. The - smaller delta is, the more accurate result will get. But if that delta is - too small, it could occur numerical stability problem. - :param local_scope: The local scope used for get_numeric_gradient. - :return: The gradient array in numpy format. - """ - if local_scope is None: - local_scope = core.Scope() - - # Create all input variable in local_scope - for var_name in input_values: - var = local_scope.new_var(var_name) - tensor = var.get_tensor() - tensor.set_dims(input_values[var_name].shape) - tensor.alloc_float(core.CPUPlace()) - tensor.set(input_values[var_name], core.CPUPlace()) - - # Create all output variable in local_scope - opts = op.outputs() - for key in opts: - for output in opts[key]: - if local_scope.find_var(output) is None: - local_scope.new_var(output).get_tensor() - op.infer_shape(local_scope) - - # allocate output memory - for key in opts: - for output in opts[key]: - local_scope.find_var(output).get_tensor().alloc_float(core.CPUPlace( - )) - - cpu_ctx = core.DeviceContext.create(core.CPUPlace()) - - def get_output(): - op.run(local_scope, cpu_ctx) - return numpy.array(local_scope.find_var(output_name).get_tensor()).sum() - - def product(dim): - return reduce(lambda a, b: a * b, dim, 1) - - def restore_inputs(): - for var_name in input_values: - tensor_ = local_scope.find_var(var_name).get_tensor() - tensor_.set(numpy.copy(input_values[var_name]), core.CPUPlace()) - - # get the input tensor that we want to get it's numeric gradient. - tensor_to_check = local_scope.find_var(input_to_check).get_tensor() - tensor_size = product(tensor_to_check.get_dims()) - # prepare a numpy array to store the gradient. - gradient_flat = numpy.zeros(shape=(tensor_size, ), dtype='float32') - - # we only compute gradient of one element each time. - # we use a for loop to compute the gradient of every element. - for i in xrange(tensor_size): - if in_place: - restore_inputs() - # get one input element throw it's index i. - origin = tensor_to_check.get_float_element(i) - - # add delta to it, run op and then get the sum of the result tensor. - x_pos = origin + delta - tensor_to_check.set_float_element(i, x_pos) - y_pos = get_output() - - # plus delta to this element, run op and get the sum of the result tensor. - if in_place: - restore_inputs() - x_neg = origin - delta - tensor_to_check.set_float_element(i, x_neg) - y_neg = get_output() - - # restore old value - tensor_to_check.set_float_element(i, origin) - - # compute the gradient of this element and store it into a numpy array. - gradient_flat[i] = (y_pos - y_neg) / delta / 2 - - # reshape the gradient result to the shape of the source tensor. - return gradient_flat.reshape(tensor_to_check.get_dims()) - - -class GradientChecker(unittest.TestCase): - def __get_gradient(self, forward_op, backward_op, input_value, grad_names, - place): - """Get the input gradients after running forward and backward operators - on the given places. - - :param forward_op: forward operator - :type forward_op: Operator - :param backward_op: backward operator - :type backward_op: Operator - :param input_value: input values. - :type input_value: dict{string:numpy.array} - :param grad_names: the names of returned input gradients. - :type input_value: a list of string - :param place: the device type. - :type place: CPUPlace or GPUPlace - :return: the input grdients of given grad_names. - :rtype: a list of numpy.array - """ - scope = core.Scope() - ctx = core.DeviceContext.create(place) - - inputs = forward_op.inputs() - in_names = [item for k in inputs for item in inputs[k]] - outputs = forward_op.outputs() - out_names = [item for k in outputs for item in outputs[k]] - - # create input var and set value - for name, value in input_value.iteritems(): - if name not in in_names: - raise ValueError(name + "does not exist in Op's inputs.") - var = scope.new_var(name).get_tensor() - var.set_dims(value.shape) - var.set(value, place) - - # run forward op - for out_name in out_names: - scope.new_var(out_name) - forward_op.infer_shape(scope) - forward_op.run(scope, ctx) - - # set output var's shape - # set output grad to ones - for name in out_names: - out_tensor = scope.find_var(name).get_tensor() - grad_tensor = scope.new_var(grad_var_name(name)).get_tensor() - grad_tensor.set_dims(out_tensor.shape()) - data = numpy.ones(out_tensor.shape(), dtype=numpy.float32) - grad_tensor.set(data, place) - - # run backward op - backward_outs = backward_op.outputs() - backward_names = [ - item for key in backward_outs for item in backward_outs[key] - ] - for name in backward_names: - scope.new_var(name) - - backward_op.infer_shape(scope) - backward_op.run(scope, ctx) - - outs = [ - numpy.array(scope.find_var(name).get_tensor()) - for name in grad_names - ] - return outs - - def compare_grad(self, forward_op, input_value, no_grad_set=None): - """ Compare the input gradients between CPU and GPU for the given forward - operator. - - :param forward_op: forward operator - :type forward_op: Operator - :param input_value: input values. - :type input_value: dict{string:numpy.array} - :param no_grad_set: the set of variables names without gradients. - :type no_grad_set: a set of string - :raises: AssertionError, there is different gradient value. - """ - if no_grad_set is None: - no_grad_set = set() - backward_op = core.Operator.backward(forward_op, no_grad_set) - # return if not compile with GPU or not implementing GPU kernel - if not (core.is_compile_gpu() and backward_op.support_gpu()): - return - - outputs = backward_op.outputs() - out_names = [item for k in outputs for item in outputs[k]] - out_names = filter(lambda x: x != empty_var_name(), out_names) - cpu_grads = self.__get_gradient(forward_op, backward_op, input_value, - out_names, core.CPUPlace()) - gpu_grads = self.__get_gradient(forward_op, backward_op, input_value, - out_names, core.GPUPlace(0)) - - for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads, - out_names): - self.assertTrue( - numpy.allclose( - c_grad, g_grad, atol=1e-4), - "output name: " + name + " has diff") - - def __assert_is_close(self, numeric_grads, analytic_grads, names, - max_relative_error, msg_prefix): - """Use relative error for the comparison. - - :param numeric_grads: the numerical graidents. - :type numeric_grads: a list of numpy.array - :param analytic_grads: the analytical graidents. - :type analytic_grads: a list of numpy.array - :param name: the names of gradients, used to print for debug. - :type names: a list of string - :param msg_prefix: string info, used to print for debug. - :type msf_prefix: string - """ - for a, b, name in itertools.izip(numeric_grads, analytic_grads, names): - abs_a = numpy.abs(a) - # if abs_a is nearly zero, then use abs error for a, not relative - # error. - abs_a[abs_a < 1e-3] = 1 - - diff_mat = numpy.abs(a - b) / abs_a - max_diff = numpy.max(diff_mat) - - def err_msg(): - offset = numpy.argmax(diff_mat > max_relative_error) - return "%s Variable %s max gradient diff %f over limit %f, the first " \ - "error element is %d" % ( - msg_prefix, name, max_diff, max_relative_error, offset) - - self.assertLessEqual(max_diff, max_relative_error, err_msg()) - - def check_grad(self, - forward_op, - input_vars, - inputs_to_check, - output_name, - no_grad_set=None, - only_cpu=False, - in_place=False, - max_relative_error=0.005): - """ - :param forward_op: used to create backward_op - :param input_vars: numpy value of input variable. The following - computation will use these variables. - :param inputs_to_check: inputs var names that should check gradient. - :param output_name: the output variable name of forward network. - :param max_relative_error: The relative tolerance parameter. - :param no_grad_set: used when create backward ops - :param only_cpu: only compute and check gradient on cpu kernel. - :return: - """ - if no_grad_set is None: - no_grad_set = set() - - no_tmp_out = forward_op.no_intermediate_outputs() - if len(no_tmp_out) != 1: - raise ValueError("non temp out_names should be 1") - - inputs = forward_op.inputs() - in_names = [item for k in inputs for item in inputs[k]] - for no_grad in no_grad_set: - if no_grad not in in_names: - raise ValueError("no_grad should be in in_names") - backward_op = core.Operator.backward(forward_op, no_grad_set) - - places = [core.CPUPlace()] - if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu(): - places.append(core.GPUPlace(0)) - - # get numerical gradients - numeric_grads = [ - get_numeric_gradient( - forward_op, input_vars, output_name, name, in_place=in_place) - for name in inputs_to_check - ] - - check_names = [grad_var_name(name) for name in inputs_to_check] - for place in places: - # get analytical gradients according to different device - analytic_grads = self.__get_gradient(forward_op, backward_op, - input_vars, check_names, place) - self.__assert_is_close(numeric_grads, analytic_grads, check_names, - max_relative_error, - "Gradient Check On %s" % str(place)) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4fec4c9109bf247abb2068177583acb47a8ebd97 --- /dev/null +++ b/python/paddle/v2/framework/tests/op_test.py @@ -0,0 +1,281 @@ +import unittest +import numpy as np +import itertools +import paddle.v2.framework.core as core +from paddle.v2.framework.op import Operator + + +def grad_var_name(var_name): + return var_name + "@GRAD" + + +def create_op(scope, op_type, inputs, outputs, attrs): + kwargs = dict() + + for in_name, in_dup in Operator.get_op_inputs(op_type): + if in_name in inputs: + kwargs[in_name] = [] + if in_dup: + sub_in = inputs[in_name] + for sub_in_name, _ in sub_in: + var = scope.new_var(sub_in_name) + kwargs[in_name].append(sub_in_name) + else: + var = scope.new_var(in_name) + kwargs[in_name].append(in_name) + + for out_name, out_dup in Operator.get_op_outputs(op_type): + if out_name in outputs: + kwargs[out_name] = [] + if out_dup: + sub_in = outputs[out_name] + for sub_in_name, _ in sub_in: + var = scope.new_var(sub_in_name) + kwargs[out_name].append(sub_in_name) + else: + var = scope.new_var(out_name) + kwargs[out_name].append(out_name) + + for attr_name in Operator.get_op_attr_names(op_type): + if attr_name in attrs: + kwargs[attr_name] = attrs[attr_name] + return Operator(op_type, **kwargs) + + +def set_input(scope, op, inputs, place): + for in_name, in_dup in Operator.get_op_inputs(op.type()): + if in_name in inputs: + if in_dup: + sub_in = inputs[in_name] + for sub_in_name, sub_in_array in sub_in: + var = scope.find_var(sub_in_name) + tensor = var.get_tensor() + tensor.set_dims(sub_in_array.shape) + tensor.set(sub_in_array, place) + else: + var = scope.find_var(in_name) + tensor = var.get_tensor() + arr = inputs[in_name] + tensor.set_dims(arr.shape) + tensor.set(arr, place) + + +def set_output_grad(scope, op, outputs, place): + for out_name, out_dup in Operator.get_op_outputs(op.type()): + if out_name in outputs: + if out_dup: + sub_out = outputs[out_name] + for sub_out_name, _ in sub_out: + out_tensor = scope.find_var(sub_out_name).get_tensor() + grad_tensor = scope.new_var(grad_var_name( + sub_out_name)).get_tensor() + grad_tensor.set_dims(out_tensor.shape()) + data = np.ones(out_tensor.shape(), dtype=np.float32) + grad_tensor.set(data, place) + else: + out_tensor = scope.find_var(out_name).get_tensor() + grad_tensor = scope.new_var(grad_var_name(out_name)).get_tensor( + ) + grad_tensor.set_dims(out_tensor.shape()) + data = np.ones(out_tensor.shape(), dtype=np.float32) + grad_tensor.set(data, place) + + +def get_numeric_gradient(scope, + op, + inputs, + input_to_check, + output_name, + delta=0.005, + in_place=False): + + set_input(scope, op, inputs, core.CPUPlace()) + op.infer_shape(scope) + + tensor_to_check = scope.find_var(input_to_check).get_tensor() + + def product(dim): + return reduce(lambda a, b: a * b, dim, 1) + + ctx = core.DeviceContext.create(core.CPUPlace()) + + def get_output(): + op.run(scope, ctx) + return np.array(scope.find_var(output_name).get_tensor()).sum() + + tensor_to_check = scope.find_var(input_to_check).get_tensor() + tensor_size = product(tensor_to_check.get_dims()) + gradient_flat = np.zeros(shape=(tensor_size, ), dtype='float32') + # we only compute gradient of one element each time. + # we use a for loop to compute the gradient of every element. + for i in xrange(tensor_size): + if in_place: + set_input(scope, op, inputs, core.CPUPlace()) + + # get one input element throw it's index i. + origin = tensor_to_check.get_float_element(i) + # add delta to it, run op and then get the sum of the result tensor. + x_pos = origin + delta + tensor_to_check.set_float_element(i, x_pos) + y_pos = get_output() + + if in_place: + set_input(scope, op, inputs, core.CPUPlace()) + + x_neg = origin - delta + tensor_to_check.set_float_element(i, x_neg) + y_neg = get_output() + + tensor_to_check.set_float_element(i, origin) + gradient_flat[i] = (y_pos - y_neg) / delta / 2 + + return gradient_flat.reshape(tensor_to_check.get_dims()) + + +def get_backward_op(scope, op, no_grad_set): + backward_op = core.Operator.backward(op, no_grad_set) + for input in backward_op.input_vars(): + var = scope.new_var(input) + var.get_tensor() + for output in backward_op.output_vars(): + var = scope.new_var(output) + var.get_tensor() + return backward_op + + +def get_gradient(scope, op, inputs, outputs, grad_name, place, + no_grad_set=None): + ctx = core.DeviceContext.create(place) + + set_input(scope, op, inputs, place) + + op.infer_shape(scope) + op.run(scope, ctx) + + if no_grad_set is None: + no_grad_set = set() + + backward_op = get_backward_op(scope, op, no_grad_set) + set_output_grad(scope, op, outputs, place) + + backward_op.infer_shape(scope) + backward_op.run(scope, ctx) + + out = np.array(scope.find_var(grad_name).get_tensor()) + return out + + +class OpTest(unittest.TestCase): + def check_output_with_place(self, place): + self.scope = core.Scope() + op_inputs = self.inputs if hasattr(self, "inputs") else dict() + op_attrs = self.attrs if hasattr(self, "attrs") else dict() + self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs, + op_attrs) + if isinstance(place, core.GPUPlace) and not self.op.support_gpu(): + return + set_input(self.scope, self.op, self.inputs, place) + self.op.infer_shape(self.scope) + ctx = core.DeviceContext.create(place) + self.op.run(self.scope, ctx) + + for out_name, out_dup in Operator.get_op_outputs(self.op.type()): + if out_dup: + sub_out = self.outputs[out_name] + for sub_out_name in sub_out: + actual = np.array( + self.scope.find_var(sub_out_name).get_tensor()) + expect = sub_out[sub_out_name] + self.assertTrue( + np.allclose( + actual, expect, atol=1e-05), + "output name: " + out_name + "has diff") + else: + actual = np.array(self.scope.find_var(out_name).get_tensor()) + expect = self.outputs[out_name] + self.assertTrue( + np.allclose( + actual, expect, atol=1e-05), + "output name: " + out_name + "has diff") + + def check_output(self): + places = [core.CPUPlace()] + if core.is_compile_gpu(): + places.append(core.GPUPlace(0)) + for place in places: + self.check_output_with_place(place) + + def __assert_is_close(self, numeric_grads, analytic_grads, names, + max_relative_error, msg_prefix): + + for a, b, name in itertools.izip(numeric_grads, analytic_grads, names): + abs_a = np.abs(a) + abs_a[abs_a < 1e-3] = 1 + + diff_mat = np.abs(a - b) / abs_a + max_diff = np.max(diff_mat) + + def err_msg(): + offset = np.argmax(diff_mat > max_relative_error) + return "%s Variable %s max gradient diff %f over limit %f, the first " \ + "error element is %d" % ( + msg_prefix, name, max_diff, max_relative_error, offset) + + self.assertLessEqual(max_diff, max_relative_error, err_msg()) + + def check_grad(self, + inputs_to_check, + output_name, + no_grad_set=None, + in_place=False, + max_relative_error=0.005): + self.scope = core.Scope() + op_inputs = self.inputs if hasattr(self, "inputs") else dict() + op_attrs = self.attrs if hasattr(self, "attrs") else dict() + self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs, + op_attrs) + if no_grad_set is None: + no_grad_set = set() + + numeric_grads = [ + get_numeric_gradient( + self.scope, + self.op, + self.inputs, + input_to_check, + output_name, + in_place=in_place) for input_to_check in inputs_to_check + ] + grad_names = [ + grad_var_name(input_to_check) for input_to_check in inputs_to_check + ] + + cpu_place = core.CPUPlace() + cpu_analytic_grads = [ + get_gradient(self.scope, self.op, self.inputs, self.outputs, + grad_name, cpu_place, no_grad_set) + for grad_name in grad_names + ] + + self.__assert_is_close(numeric_grads, cpu_analytic_grads, grad_names, + max_relative_error, + "Gradient Check On %s" % str(cpu_place)) + + if core.is_compile_gpu() and self.op.support_gpu(): + gpu_place = core.GPUPlace(0) + gpu_analytic_grads = [ + get_gradient(self.scope, self.op, self.inputs, self.outputs, + grad_name, gpu_place, no_grad_set) + for grad_name in grad_names + ] + + self.__assert_is_close(numeric_grads, gpu_analytic_grads, + grad_names, max_relative_error, + "Gradient Check On %s" % str(gpu_place)) + + for c_grad, g_grad, name in itertools.izip( + cpu_analytic_grads, gpu_analytic_grads, grad_names): + self.assertTrue( + np.allclose( + c_grad, g_grad, atol=1e-4), + "output name: " + name + " has diff") diff --git a/python/paddle/v2/framework/tests/op_test_util.py b/python/paddle/v2/framework/tests/op_test_util.py deleted file mode 100644 index 3bc05a0feccbbd3d5e7852d85bd3dc8edaccfd07..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/tests/op_test_util.py +++ /dev/null @@ -1,72 +0,0 @@ -import numpy -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator - - -class OpTestMeta(type): - """ - Operator Test ClassMeta. - - It injects `test_all` method into user's OperatorTest class, to make Python - unittest module run that method. - - The `test_all` read what value is stored in `self`. It use self's values to - create and run a operator, and check whether that op is OK or not. - - See `test_add_two_op` for example usage. - """ - - def __new__(cls, name, bases, attrs): - obj = super(OpTestMeta, cls).__new__(cls, name, bases, attrs) - - def test_all(self): - scope = core.Scope() - kwargs = dict() - places = [core.CPUPlace()] - if core.is_compile_gpu(): - places.append(core.GPUPlace(0)) - - for place in places: - for in_name in Operator.get_op_input_names(self.type): - if hasattr(self, "inputs") and in_name in self.inputs: - kwargs[in_name] = in_name - var = scope.new_var(in_name).get_tensor() - arr = self.inputs[in_name] - var.set_dims(arr.shape) - var.set(arr, place) - else: - kwargs[in_name] = "@EMPTY@" - - for out_name in Operator.get_op_output_names(self.type): - if not hasattr(self, "outputs"): - raise ValueError( - "The test op must set self.outputs dict.") - if out_name not in self.outputs: - raise ValueError("The %s is not in self.outputs dict." % - (out_name)) - kwargs[out_name] = out_name - scope.new_var(out_name).get_tensor() - - for attr_name in Operator.get_op_attr_names(self.type): - if hasattr(self, "attrs") and attr_name in self.attrs: - kwargs[attr_name] = self.attrs[attr_name] - - op = Operator(self.type, **kwargs) - if isinstance(place, core.GPUPlace) and not op.support_gpu(): - return - - op.infer_shape(scope) - - ctx = core.DeviceContext.create(place) - op.run(scope, ctx) - - for out_name in Operator.get_op_output_names(self.type): - actual = numpy.array(scope.find_var(out_name).get_tensor()) - expect = self.outputs[out_name] - self.assertTrue( - numpy.allclose( - actual, expect, atol=1e-05), - "output name: " + out_name + "has diff") - - obj.test_all = test_all - return obj diff --git a/python/paddle/v2/framework/tests/test_add_two_op.py b/python/paddle/v2/framework/tests/test_add_two_op.py index 0def484eddb88604398ee10390d3f28058714a57..3ca34d9b9fc2b7b54cc25ca0e0d1a08a71e37c52 100644 --- a/python/paddle/v2/framework/tests/test_add_two_op.py +++ b/python/paddle/v2/framework/tests/test_add_two_op.py @@ -1,23 +1,20 @@ import unittest +import numpy as np +from op_test import OpTest -import numpy -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator - -from op_test_util import OpTestMeta - - -class TestAddOp(unittest.TestCase): - __metaclass__ = OpTestMeta +class TestAddOp(OpTest): def setUp(self): - self.type = "add_two" + self.op_type = "add" self.inputs = { - 'X': numpy.random.random((102, 105)).astype("float32"), - 'Y': numpy.random.random((102, 105)).astype("float32") + 'X': np.random.random((102, 105)).astype("float32"), + 'Y': np.random.random((102, 105)).astype("float32") } self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} + def test_check_output(self): + self.check_output() + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_concat_op.py b/python/paddle/v2/framework/tests/test_concat_op.py new file mode 100644 index 0000000000000000000000000000000000000000..656563f96e52df30951ec0ec7042ad9c530e90b2 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_concat_op.py @@ -0,0 +1,22 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestConcatOp(OpTest): + def setUp(self): + self.op_type = "concat" + x0 = np.random.random((2, 3, 2, 5)).astype('float32') + x1 = np.random.random((2, 3, 3, 5)).astype('float32') + x2 = np.random.random((2, 3, 4, 5)).astype('float32') + axis = 2 + self.inputs = {'X': [('x0', x0), ('x1', x1), ('x2', x2)]} + self.attrs = {'axis': axis} + self.outputs = {'Out': np.concatenate((x0, x1, x2), axis=axis)} + + def test_check_output(self): + self.check_output() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_cos_sim_op.py b/python/paddle/v2/framework/tests/test_cos_sim_op.py new file mode 100644 index 0000000000000000000000000000000000000000..797cbd8cc5cf7f73d58ca713d02667731d5c8a0e --- /dev/null +++ b/python/paddle/v2/framework/tests/test_cos_sim_op.py @@ -0,0 +1,39 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestCosSimOp(OpTest): + def setUp(self): + self.op_type = "cos_sim" + self.inputs = { + 'X': np.random.random((10, 5)).astype("float32"), + 'Y': np.random.random((10, 5)).astype("float32") + } + expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) + expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) + expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=1) / \ + expect_x_norm / expect_y_norm + self.outputs = { + 'XNorm': np.expand_dims(expect_x_norm, 1), + 'YNorm': np.expand_dims(expect_y_norm, 1), + 'Out': np.expand_dims(expect_out, 1) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set('X')) + + def test_check_grad_ignore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y')) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py index d4277f2a42ce2e66e37405ccd3b2ee444d403d1a..c2fc102a8b8de82da5c3fc5fee273790325908f8 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py @@ -1,36 +1,27 @@ import unittest import numpy -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op +from op_test import OpTest -class TestCrossEntropy(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestCrossEntropy(OpTest): def setUp(self): - self.type = "onehot_cross_entropy" + self.op_type = "onehot_cross_entropy" batch_size = 30 class_num = 10 - X = numpy.random.random((batch_size, class_num)).astype("float32") - label = 5 * numpy.ones(batch_size).astype("int32") + X = numpy.random.uniform(0.1, 1.0, + [batch_size, class_num]).astype("float32") + label = (class_num / 2) * numpy.ones(batch_size).astype("int32") self.inputs = {'X': X, 'label': label} Y = [] for i in range(0, batch_size): Y.append(-numpy.log(X[i][label[i]])) self.outputs = {'Y': numpy.array(Y).astype("float32")} + def test_check_output(self): + self.check_output() -class CrossEntropyGradOpTest(GradientChecker): def test_check_grad(self): - op = create_op("onehot_cross_entropy") - batch_size = 30 - class_num = 10 - inputs = { - "X": numpy.random.uniform( - 0.1, 1.0, [batch_size, class_num]).astype("float32"), - "label": (class_num / 2) * numpy.ones(batch_size).astype("int32") - } - self.check_grad(op, inputs, set("X"), "Y") + self.check_grad(['X'], 'Y') if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py index e5c862605fb11a5ea1426cf8f9054589dc377ff1..2473daaba24438819f3f55ccc40fe1c64ee59960 100644 --- a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py +++ b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py @@ -1,16 +1,17 @@ import unittest -from op_test_util import OpTestMeta -import numpy +import numpy as np +from op_test import OpTest -class TestFillZerosLikeOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestFillZerosLikeOp(OpTest): def setUp(self): - self.type = "fill_zeros_like" - self.inputs = {'Src': numpy.random.random((219, 232)).astype("float32")} - self.outputs = {'Dst': numpy.zeros_like(self.inputs['Src'])} + self.op_type = "fill_zeros_like" + self.inputs = {'Src': np.random.random((219, 232)).astype("float32")} + self.outputs = {'Dst': np.zeros_like(self.inputs["Src"])} + + def test_check_output(self): + self.check_output() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_gather_op.py b/python/paddle/v2/framework/tests/test_gather_op.py index e3de3fd0a1dddb3edb0de5987bd71d8a176d97ef..b0ab429ef1b53640dfb696f6ea2f7b745564b874 100644 --- a/python/paddle/v2/framework/tests/test_gather_op.py +++ b/python/paddle/v2/framework/tests/test_gather_op.py @@ -1,30 +1,20 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op -import numpy -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import numpy as np +from op_test import OpTest -class TestGatherOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestGatherOp(OpTest): def setUp(self): - self.type = "gather" - xnp = numpy.random.random((10, 20)).astype("float32") - self.inputs = { - 'X': xnp, - 'Index': numpy.array([1, 3, 5]).astype("int32") - } - self.outputs = {'Out': self.inputs['X'][self.inputs['Index']]} + self.op_type = "gather" + xnp = np.random.random((10, 20)).astype("float32") + self.inputs = {'X': xnp, 'Index': np.array([1, 3, 5]).astype("int32")} + self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} + def test_check_output(self): + self.check_output() -class TestGatherGradOp(GradientChecker): - def test_gather_grad(self): - op = create_op("gather") - xnp = numpy.random.random((10, 20)).astype("float32") - inputs = {'X': xnp, 'Index': numpy.array([1, 3, 5]).astype("int32")} - self.check_grad(op, inputs, set("X"), "Out") + def test_check_grad(self): + self.check_grad(['X'], 'Out') if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_gaussian_random_op.py b/python/paddle/v2/framework/tests/test_gaussian_random_op.py index f95ed70b58d611b3233a21d3f2a34c864ae4d1b3..1f9e4db783c9907a22db72c8a6ff06c7ca0735da 100644 --- a/python/paddle/v2/framework/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/framework/tests/test_gaussian_random_op.py @@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase): def gaussian_random_test(self, place): scope = core.Scope() - scope.new_var("Out").get_tensor() + scope.new_var('Out').get_tensor() op = Operator( "gaussian_random", - Out="Out", + Out='Out', dims=[1000, 784], mean=.0, std=1., @@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase): op.infer_shape(scope) context = core.DeviceContext.create(place) op.run(scope, context) - tensor = numpy.array(scope.find_var("Out").get_tensor()) + tensor = numpy.array(scope.find_var('Out').get_tensor()) self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_gradient_checker.py b/python/paddle/v2/framework/tests/test_gradient_checker.py index e0b315120862bea284e067070492dcdfbb661081..abeb01cb34158a43b5dcce5e39efc0e21e9fe638 100644 --- a/python/paddle/v2/framework/tests/test_gradient_checker.py +++ b/python/paddle/v2/framework/tests/test_gradient_checker.py @@ -1,43 +1,45 @@ import unittest -import numpy -from paddle.v2.framework.op import Operator -from gradient_checker import GradientChecker -from gradient_checker import get_numeric_gradient +import numpy as np +import paddle.v2.framework.core as core +from op_test import get_numeric_gradient +from op_test import create_op class GetNumericGradientTest(unittest.TestCase): def test_add_op(self): - add_op = Operator('add_two', X="X", Y="Y", Out="Z") - x = numpy.random.random((10, 1)).astype("float32") - y = numpy.random.random((10, 1)).astype("float32") - - arr = get_numeric_gradient(add_op, {'X': x, "Y": y}, 'Z', 'X') + x = np.random.random((10, 1)).astype("float32") + y = np.random.random((10, 1)).astype("float32") + z = x + y + scope = core.Scope() + add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict()) + arr = get_numeric_gradient(scope, add_op, {'X': x, 'Y': y}, 'X', 'Out') self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) def test_softmax_op(self): def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" - shiftx = x - numpy.max(x) - exps = numpy.exp(shiftx) - return exps / numpy.sum(exps) + shiftx = x - np.max(x) + exps = np.exp(shiftx) + return exps / np.sum(exps) def label_softmax_grad(Y, dY): dX = Y * 0.0 for i in range(Y.shape[0]): - d = numpy.dot(Y[i, :], dY[i, :]) + d = np.dot(Y[i, :], dY[i, :]) dX[i, :] = Y[i, :] * (dY[i, :] - d) return dX - softmax_op = Operator("softmax", X="X", Y="Y") - - X = numpy.random.random((2, 2)).astype("float32") - Y = numpy.apply_along_axis(stable_softmax, 1, X) - dY = numpy.ones(Y.shape) + X = np.random.random((2, 2)).astype("float32") + Y = np.apply_along_axis(stable_softmax, 1, X) + dY = np.ones(Y.shape) dX = label_softmax_grad(Y, dY) - arr = get_numeric_gradient(softmax_op, {"X": X}, 'Y', 'X') - numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2) + scope = core.Scope() + softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict()) + + arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y") + np.testing.assert_almost_equal(arr, dX, decimal=1e-2) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_lookup_table.py b/python/paddle/v2/framework/tests/test_lookup_table.py index 19eb464baa555fb67a994f3cfb4d3ed628367c73..b259bb67e832adcb31b0ab4e992738be2b85f884 100644 --- a/python/paddle/v2/framework/tests/test_lookup_table.py +++ b/python/paddle/v2/framework/tests/test_lookup_table.py @@ -1,31 +1,22 @@ import unittest import numpy as np -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op +from op_test import OpTest -class TestSigmoidOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestLookupTableOp(OpTest): def setUp(self): - self.type = 'lookup_table' - table = np.random.random((17, 31)).astype('float32') - ids = np.random.randint(0, 17, 4).astype('int32') + self.op_type = "lookup_table" + table = np.random.random((17, 31)).astype("float32") + ids = np.random.randint(0, 17, 4).astype("int32") self.inputs = {'W': table, 'Ids': ids} self.outputs = {'Out': table[ids]} + def test_check_output(self): + self.check_output() -class TestSigmoidGradOp(GradientChecker): - def test_grad(self): - op = create_op('lookup_table') - table = np.random.random((17, 31)).astype('float32') - ids = np.random.randint(0, 17, 4).astype('int32') - inputs = {'W': table, 'Ids': ids} - # comapre gradients - self.compare_grad(op, inputs, set(['Ids'])) - # check gradients - self.check_grad(op, inputs, set('W'), 'Out') + def test_check_grad(self): + self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_mean_op.py b/python/paddle/v2/framework/tests/test_mean_op.py index f32b3160d651a290823223c46c45bb3b6950a505..7823abd8f813aad6462c98a9ace9a13dc286a157 100644 --- a/python/paddle/v2/framework/tests/test_mean_op.py +++ b/python/paddle/v2/framework/tests/test_mean_op.py @@ -1,24 +1,20 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op import numpy as np +from op_test import OpTest -class TestMeanOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestMeanOp(OpTest): def setUp(self): - self.type = "mean" - self.inputs = {'X': np.random.random((32, 784)).astype("float32")} - self.outputs = {'Out': np.mean(self.inputs['X'])} + self.op_type = "mean" + self.inputs = {'X': np.random.random((10, 10)).astype("float32")} + self.outputs = {'Out': np.mean(self.inputs["X"])} + def test_check_output(self): + self.check_output() -class MeanGradOpTest(GradientChecker): - def test_normal(self): - op = create_op("mean") - inputs = {"X": np.random.random((10, 10)).astype("float32")} - self.check_grad(op, inputs, set("X"), "Out") + def test_checkout_grad(self): + self.check_grad(['X'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_minus_op.py b/python/paddle/v2/framework/tests/test_minus_op.py index 5abdd4a69bf3faa2f3341f338e195815389a7cef..dea797a1fea34265d0a32e097f413f421abf2521 100644 --- a/python/paddle/v2/framework/tests/test_minus_op.py +++ b/python/paddle/v2/framework/tests/test_minus_op.py @@ -1,30 +1,23 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest -class MinusOpTest(unittest.TestCase): - __metaclass__ = OpTestMeta - +class MinusOpTest(OpTest): def setUp(self): - self.type = "minus" + self.op_type = "minus" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((32, 84)).astype("float32") } self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])} + def test_check_output(self): + self.check_output() -class MinusGradTest(GradientChecker): - def test_left(self): - op = create_op("minus") - inputs = { - "X": np.random.random((10, 10)).astype("float32"), - "Y": np.random.random((10, 10)).astype("float32") - } - self.check_grad(op, inputs, ["X", 'Y'], "Out") + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/mnist.py b/python/paddle/v2/framework/tests/test_mnist.py similarity index 87% rename from python/paddle/v2/framework/tests/mnist.py rename to python/paddle/v2/framework/tests/test_mnist.py index a68f302f9c344bf6d63e8d9b48836d69338c3d0b..f6f8f49b797fb6e5016a5e309f12f192d5096431 100644 --- a/python/paddle/v2/framework/tests/mnist.py +++ b/python/paddle/v2/framework/tests/test_mnist.py @@ -38,9 +38,9 @@ def feed_data(name, data): assert isinstance(data, numpy.ndarray) tensor = scope.find_var(name).get_tensor() tensor.set_dims(data.shape) - if data.dtype == numpy.dtype('int32'): + if data.dtype == numpy.dtype("int32"): tensor.alloc_int(place) - elif data.dtype == numpy.dtype('float32'): + elif data.dtype == numpy.dtype("float32"): tensor.alloc_float(place) else: raise ValueError("data type not supported") @@ -74,22 +74,25 @@ def init_param(net, param_name, dims): # fc_layer def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): """ - Add a fc layer to net + The fully connected layer. - :param input: input variable name. + :param input: The name of input variable. :type input: str - :param size: fully connected layer size. - :param act: activation name - :param param: parameter attribute, used for initialize parameters. - :param bias: bias attribute. False will not have a bias. - :param name: the name of fc layer. If not set, model will generate a - readable name - :return: output variable name. + :param size: The size of fully connected layer. + :param act: The name of activation. + :param param: The attribute of learnable parameter which can be used to + modify initialization mean and std of the parameter. + :param bias: The attribute of bias. If set False, this layer does not have + a bias. + :param name: The name of this layer. If it is not set explictly, a name + will be generated automatically. + :return: The name of the output variable. """ + if name is None: - name = 'fc_%d' % uniq_id() + name = "fc_%d" % uniq_id() if not isinstance(name, str): - raise ValueError("name should be string") + raise ValueError("The name of a layer should be a string.") input_dims = scope.find_var(input).get_tensor().get_dims() @@ -123,7 +126,7 @@ def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None): def cross_entropy_layer(net, input, label): - cost_name = 'cross_entropy_%d' % uniq_id() + cost_name = "cross_entropy_%d" % uniq_id() cross_entropy_op = Operator( "onehot_cross_entropy", X=input, label=label, Y=cost_name) net.append_op(cross_entropy_op) @@ -177,8 +180,8 @@ def error_rate(predict, label): return error_num / float(len(label)) -images = data_layer(name='pixel', dims=[BATCH_SIZE, 784]) -labels = data_layer(name='label', dims=[BATCH_SIZE]) +images = data_layer(name="pixel", dims=[BATCH_SIZE, 784]) +labels = data_layer(name="label", dims=[BATCH_SIZE]) fc1 = fc_layer(net=forward_net, input=images, size=100, act="sigmoid") fc2 = fc_layer(net=forward_net, input=fc1, size=100, act="sigmoid") predict = fc_layer(net=forward_net, input=fc2, size=10, act="softmax") diff --git a/python/paddle/v2/framework/tests/test_mul_op.py b/python/paddle/v2/framework/tests/test_mul_op.py index ee0d81a64efcb81bae8b11b856c201a86da274e9..b3d95a56b88e510734da54f36ff21ccd7e1baabb 100644 --- a/python/paddle/v2/framework/tests/test_mul_op.py +++ b/python/paddle/v2/framework/tests/test_mul_op.py @@ -1,34 +1,59 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest -class TestMulOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestMulOp(OpTest): def setUp(self): - self.type = "mul" + self.op_type = "mul" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((84, 100)).astype("float32") } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} + def test_check_output(self): + self.check_output() -class MulGradOpTest(GradientChecker): - def test_mul(self): - op = create_op("mul") - inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'Y': np.random.random((84, 100)).astype("float32") + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) + + +class TestMulOp2(OpTest): + def setUp(self): + self.op_type = "mul" + self.inputs = { + 'X': np.random.random((15, 4, 12, 10)).astype("float32"), + 'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32") + } + self.attrs = {'x_num_col_dims': 2, 'y_num_col_dims': 2} + self.outputs = { + 'Out': np.dot(self.inputs['X'].reshape(15 * 4, 12 * 10), + self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9)) } - # mul op will enlarge the relative error + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) + + def test_check_grad_ingore_x(self): self.check_grad( - op, inputs, set(["X", "Y"]), "Out", max_relative_error=0.5) + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X')) + def test_check_grad_ignore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) -# TODO(dzh,qijun) : mulgrad test case need transpose feature of blas library -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_net.py b/python/paddle/v2/framework/tests/test_net.py index 9339cf28dabc95b46b958777200fb1db9dcf284f..50cfb855f2b01d8fd32342855d46716da7e07856 100644 --- a/python/paddle/v2/framework/tests/test_net.py +++ b/python/paddle/v2/framework/tests/test_net.py @@ -15,7 +15,7 @@ def fc(X, W, Y): class TestNet(unittest.TestCase): def test_net_all(self): net = core.Net.create() - op1 = Operator("add_two", X="X", Y="Y", Out="Out") + op1 = Operator("add", X="X", Y="Y", Out="Out") net.append_op(op1) net2 = core.Net.create() @@ -26,7 +26,7 @@ class TestNet(unittest.TestCase): expected = ''' Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}. - Op(add_two), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}. + Op(add), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. @@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]} self.assertEqual(expected, "\n" + str(net)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_operator.py b/python/paddle/v2/framework/tests/test_operator.py index 1abc4eeb57bcedc81e34b0e156048ee4f5cfdc2d..040556322d79cbb594eb9af585a5b9920d7ab625 100644 --- a/python/paddle/v2/framework/tests/test_operator.py +++ b/python/paddle/v2/framework/tests/test_operator.py @@ -193,10 +193,10 @@ class TestOpDescCreationMethod(unittest.TestCase): class TestOpCreations(unittest.TestCase): def test_all(self): - add_op = op.Operator("add_two", X="a", Y="b", Out="z") + add_op = op.Operator("add", X="a", Y="b", Out="z") self.assertIsNotNone(add_op) # Invoke C++ DebugString() - self.assertEqual('Op(add_two), inputs:{X[a], Y[b]}, outputs:{Out[z]}.', + self.assertEqual('Op(add), inputs:{X[a], Y[b]}, outputs:{Out[z]}.', str(add_op)) diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index d6000ab9f9d5b969f96128b183f48d49000c8a5e..22e680fd783ec681e95326fb84db34570265cffc 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -146,7 +146,7 @@ class TestRecurrentOp(unittest.TestCase): stepnet = core.Net.create() x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx") h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") - sum_op = Operator("add_two", X="Wx", Y="Uh", Out="sum") + sum_op = Operator("add", X="Wx", Y="Uh", Out="sum") sig_op = Operator("sigmoid", X="sum", Y="h@alias") for op in [x_fc_op, h_fc_op, sum_op, sig_op]: diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py new file mode 100644 index 0000000000000000000000000000000000000000..16bb6bb2af67f7d32a2fafc1cb37412084ec0829 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -0,0 +1,21 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestReshapeOp(OpTest): + def setUp(self): + self.op_type = "reshape" + self.inputs = {'X': np.random.random((10, 20)).astype("float32")} + self.attrs = {'shape': [10 * 20]} + self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_rowwise_add_op.py b/python/paddle/v2/framework/tests/test_rowwise_add_op.py index 45d569da29d13cf8e2a3cb9d67c2d01e8b365453..336645bd993ff743cbe20bb5cae5cd278db57ce7 100644 --- a/python/paddle/v2/framework/tests/test_rowwise_add_op.py +++ b/python/paddle/v2/framework/tests/test_rowwise_add_op.py @@ -1,30 +1,51 @@ import unittest import numpy as np -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op +from op_test import OpTest -class TestRowwiseAddOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestRowwiseAddOp(OpTest): def setUp(self): - self.type = "rowwise_add" + self.op_type = "rowwise_add" self.inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'b': np.random.random(84).astype("float32") + 'X': np.random.uniform(0.1, 1, [5, 10]).astype("float32"), + 'b': np.random.uniform(0.1, 1, [10]).astype("float32") } self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'b'], 'Out') + + def test_check_grad_ingore_b(self): + self.check_grad(['X'], 'Out', no_grad_set=set('b')) -class RowwiseAddGradOpTest(GradientChecker): - def test_rowwise_add(self): - op = create_op("rowwise_add") - inputs = { - "X": np.random.uniform(0.1, 1, [5, 10]).astype("float32"), - "b": np.random.uniform(0.1, 1, [10]).astype("float32") + def test_check_grad_ingore_x(self): + self.check_grad(['b'], 'Out', no_grad_set=set('X')) + + +class TestRowwiseAddOp2(OpTest): + def setUp(self): + self.op_type = "rowwise_add" + self.inputs = { + 'X': np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"), + 'b': np.random.uniform(0.1, 1, [2, 5]).astype("float32") } - self.check_grad(op, inputs, set(["X", "b"]), "Out") + self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'b'], 'Out') + + def test_check_grad_ignore_b(self): + self.check_grad(['X'], 'Out', no_grad_set=set('b')) + + def test_check_grad_ignore_x(self): + self.check_grad(['b'], 'Out', no_grad_set=set('X')) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_scale_and_identity_op.py b/python/paddle/v2/framework/tests/test_scale_and_identity_op.py index 69b301c376ee7a4ebb2e2dadc645c7d10f823a08..05d76d428299c8176d1a6adf6da15a203fa7502a 100644 --- a/python/paddle/v2/framework/tests/test_scale_and_identity_op.py +++ b/python/paddle/v2/framework/tests/test_scale_and_identity_op.py @@ -1,43 +1,34 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op import numpy as np -from paddle.v2.framework.op import Operator +from op_test import OpTest -class IdentityTest(unittest.TestCase): - __metaclass__ = OpTestMeta - +class IdentityTest(OpTest): def setUp(self): - self.type = "identity" - self.inputs = {'X': np.random.random((32, 784)).astype("float32")} + self.op_type = "identity" + self.inputs = {'X': np.random.random((10, 10)).astype("float32")} self.outputs = {'Out': self.inputs['X']} + def test_check_output(self): + self.check_output() -class IdentityGradOpTest(GradientChecker): - def test_normal(self): - op = create_op("identity") - inputs = {"X": np.random.random((10, 10)).astype("float32")} - self.check_grad(op, inputs, set("X"), "Out") - + def test_check_grad(self): + self.check_grad(['X'], 'Out') -class ScaleTest(unittest.TestCase): - __metaclass__ = OpTestMeta +class ScaleTest(OpTest): def setUp(self): - self.type = "scale" - self.inputs = {'X': np.random.random((32, 784)).astype("float32")} + self.op_type = "scale" + self.inputs = {'X': np.random.random((10, 10)).astype("float32")} self.attrs = {'scale': -2.3} self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']} + def test_check_output(self): + self.check_output() -class ScaleGradTest(GradientChecker): - def test_normal(self): - op = Operator("scale", X="X", Out="Out", scale=3.2) - self.check_grad(op, - {"X": np.random.random((10, 10)).astype("float32")}, - set("X"), "Out") + def test_check_grad(self): + self.check_grad(['X'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_scatter_op.py b/python/paddle/v2/framework/tests/test_scatter_op.py index c1f9444889372104e39ded78fc7207a59b80a293..33c73c52631a09ea0fefdeb9467991ae9c04321c 100644 --- a/python/paddle/v2/framework/tests/test_scatter_op.py +++ b/python/paddle/v2/framework/tests/test_scatter_op.py @@ -1,37 +1,24 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op -import numpy -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import numpy as np +from op_test import OpTest -class TestScatterOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestScatterOp(OpTest): def setUp(self): - self.type = "scatter" - ref_np = numpy.ones((3, 3)).astype("float32") - index_np = numpy.array([1, 2]).astype("int32") - updates_np = numpy.random.random((2, 3)).astype("float32") - output_np = numpy.copy(ref_np) + self.op_type = "scatter" + ref_np = np.ones((3, 3)).astype("float32") + index_np = np.array([1, 2]).astype("int32") + updates_np = np.random.random((2, 3)).astype("float32") + output_np = np.copy(ref_np) output_np[index_np] += updates_np self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} self.outputs = {'Out': output_np} + def test_check_output(self): + self.check_output() -class TestScatterGradOp(GradientChecker): - def test_scatter_grad(self): - op = create_op("scatter") - # test data setup - ref_np = numpy.ones((3, 10)).astype("float32") - index_np = numpy.array([1, 2]).astype("int32") - updates_np = numpy.random.random((2, 10)).astype("float32") - output_np = numpy.copy(ref_np) - output_np[index_np] += updates_np - inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} - self.check_grad( - op, inputs, set(["Updates", "Ref"]), "Out", in_place=True) + def test_check_grad(self): + self.check_grad(['Updates', 'Ref'], 'Out', in_place=True) if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py index e5f9ef865e84f1a78e28884ad7e2e758f9ca8054..557cf15ace63e336462c7dcdbbc10f30aeedc6f4 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -1,21 +1,22 @@ import unittest -import numpy -from op_test_util import OpTestMeta +import numpy as np +from op_test import OpTest -class TestSGD(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSGD(OpTest): def setUp(self): - self.type = "sgd" - w = numpy.random.random((102, 105)).astype("float32") - g = numpy.random.random((102, 105)).astype("float32") + self.op_type = "sgd" + w = np.random.random((102, 105)).astype("float32") + g = np.random.random((102, 105)).astype("float32") lr = 0.1 self.inputs = {'param': w, 'grad': g} self.attrs = {'learning_rate': lr} self.outputs = {'param_out': w - lr * g} + def test_check_output(self): + self.check_output() + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_sigmoid_op.py b/python/paddle/v2/framework/tests/test_sigmoid_op.py index 273c2e5ab1a84d12621fe9568c4cf22073b6aed4..2316e49eff7bb1cdb53acb3889a6ef05060b59f3 100644 --- a/python/paddle/v2/framework/tests/test_sigmoid_op.py +++ b/python/paddle/v2/framework/tests/test_sigmoid_op.py @@ -1,27 +1,21 @@ import unittest import numpy as np -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op +from op_test import OpTest -class TestSigmoidOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSigmoid(OpTest): def setUp(self): - self.type = "sigmoid" - self.inputs = {'X': np.random.random((15, 31)).astype("float32")} + self.op_type = "sigmoid" + self.inputs = { + 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") + } self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))} + def test_check_output(self): + self.check_output() -class TestSigmoidGradOp(GradientChecker): - def test_grad(self): - op = create_op("sigmoid") - inputs = {"X": np.random.uniform(0.1, 1, [11, 17]).astype("float32")} - # compare gpu and cpu results for backward op. - # this test will be skiped if only compiling CPU version. - self.compare_grad(op, inputs) - # check gradients - self.check_grad(op, inputs, set("X"), "Y", max_relative_error=0.007) + def test_check_grad(self): + self.check_grad(["X"], "Y", max_relative_error=0.007) if __name__ == '__main__': diff --git a/python/paddle/v2/framework/tests/test_softmax_op.py b/python/paddle/v2/framework/tests/test_softmax_op.py index e670d93653e07d35e5019c9daac45c214eddf367..1b948f252fa631e9886840b377de2996e110dc91 100644 --- a/python/paddle/v2/framework/tests/test_softmax_op.py +++ b/python/paddle/v2/framework/tests/test_softmax_op.py @@ -1,9 +1,6 @@ import unittest - import numpy as np - -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest def stable_softmax(x): @@ -13,23 +10,22 @@ def stable_softmax(x): return exps / np.sum(exps) -class TestSoftmaxOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSoftmaxOp(OpTest): def setUp(self): - self.type = "softmax" - self.inputs = {'X': np.random.random((32, 100)).astype("float32")} + self.op_type = "softmax" + self.inputs = { + 'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32") + } self.outputs = { 'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X']) } + def test_check_output(self): + self.check_output() -class SoftmaxGradOpTest(GradientChecker): - def test_softmax(self): - op = create_op("softmax") - inputs = {"X": np.random.uniform(0.1, 1, [10, 10]).astype("float32")} - self.check_grad(op, inputs, set("X"), "Y") + def test_check_grad(self): + self.check_grad(['X'], 'Y') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py new file mode 100644 index 0000000000000000000000000000000000000000..dc6ebf5d30369231b4918a168bbdf25c7096c808 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py @@ -0,0 +1,71 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestSquaredL2DistanceOp_f0(OpTest): + def setUp(self): + self.op_type = "squared_l2_distance" + self.inputs = { + 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"), + 'Y': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32") + } + sub_res = self.inputs['X'] - self.inputs['Y'] + output = sub_res * sub_res + self.outputs = { + 'sub_result': sub_res, + 'Out': np.expand_dims(output.sum(1), 1) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') + + +class TestSquaredL2DistanceOp_f1(OpTest): + def setUp(self): + self.op_type = "squared_l2_distance" + self.inputs = { + 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"), + 'Y': np.random.uniform(0.1, 0.6, (1, 3)).astype("float32") + } + sub_res = self.inputs['X'] - self.inputs['Y'] + output = sub_res * sub_res + self.outputs = { + 'sub_result': sub_res, + 'Out': np.expand_dims(output.sum(1), 1) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') + + +class TestSquaredL2DistanceOp_f2(OpTest): + def setUp(self): + self.op_type = "squared_l2_distance" + self.inputs = { + 'X': np.random.uniform(0.1, 0.6, (2, 3, 4)).astype("float32"), + 'Y': np.random.uniform(0.1, 0.6, (1, 3, 4)).astype("float32") + } + sub_res = self.inputs['X'] - self.inputs['Y'] + sub_res = sub_res.reshape((2, 3 * 4)) + output = sub_res * sub_res + self.outputs = { + 'sub_result': sub_res, + 'Out': np.expand_dims(output.sum(1), 1) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_sum_op.py b/python/paddle/v2/framework/tests/test_sum_op.py new file mode 100644 index 0000000000000000000000000000000000000000..60254291e2ab9215e2bc37c12d5e2e1ca6d33d5d --- /dev/null +++ b/python/paddle/v2/framework/tests/test_sum_op.py @@ -0,0 +1,24 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestSumOp(OpTest): + def setUp(self): + self.op_type = "sum" + x0 = np.random.random((3, 4)).astype('float32') + x1 = np.random.random((3, 4)).astype('float32') + x2 = np.random.random((3, 4)).astype('float32') + self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} + y = x0 + x1 + x2 + self.outputs = {'Out': y} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['x0'], 'Out') + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_tensor.py b/python/paddle/v2/framework/tests/test_tensor.py index 1af39818a305215b45219b8c5f0a10630fd64279..f26ed4964c521be1cd839b39d7244f96c653cb1a 100644 --- a/python/paddle/v2/framework/tests/test_tensor.py +++ b/python/paddle/v2/framework/tests/test_tensor.py @@ -3,7 +3,7 @@ import unittest import numpy -class TestScope(unittest.TestCase): +class TestTensor(unittest.TestCase): def test_int_tensor(self): scope = core.Scope() var = scope.new_var("test_tensor") @@ -20,8 +20,8 @@ class TestScope(unittest.TestCase): tensor.set(tensor_array, place) tensor_array_2 = numpy.array(tensor) - self.assertEqual(1.0, tensor_array_2[3, 9]) - self.assertEqual(2.0, tensor_array_2[19, 11]) + self.assertEqual(1, tensor_array_2[3, 9]) + self.assertEqual(2, tensor_array_2[19, 11]) def test_float_tensor(self): scope = core.Scope() @@ -43,6 +43,84 @@ class TestScope(unittest.TestCase): self.assertAlmostEqual(1.0, tensor_array_2[3, 9]) self.assertAlmostEqual(2.0, tensor_array_2[19, 11]) + def test_int_lod_tensor(self): + places = [core.CPUPlace(), core.GPUPlace(0)] + for place in places: + scope = core.Scope() + var = scope.new_var("test_tensor") + var_lod = scope.new_var("test_lod_tensor") + + tensor = var.get_tensor() + lod_tensor = var_lod.get_lod_tensor() + + tensor.set_dims([4, 4, 6]) + tensor.alloc_int(place) + array = numpy.array(tensor) + array[0, 0, 0] = 3 + array[3, 3, 5] = 10 + tensor.set(array, place) + + lod_tensor.set_tensor(tensor) + lod_tensor.set_lod([[0, 2, 4]]) + + lod_v = numpy.array(lod_tensor.tensor()) + self.assertTrue(numpy.alltrue(array == lod_v)) + + lod = lod_tensor.lod() + self.assertEqual(0, lod[0][0]) + self.assertEqual(2, lod[0][1]) + self.assertEqual(4, lod[0][2]) + + def test_float_lod_tensor(self): + places = [core.CPUPlace(), core.GPUPlace(0)] + for place in places: + scope = core.Scope() + var = scope.new_var("test_tensor") + var_lod = scope.new_var("test_lod_tensor") + + tensor = var.get_tensor() + lod_tensor = var_lod.get_lod_tensor() + + tensor.set_dims([5, 2, 3, 4]) + tensor.alloc_float(place) + + tensor_array = numpy.array(tensor) + self.assertEqual((5, 2, 3, 4), tensor_array.shape) + tensor_array[0, 0, 0, 0] = 1.0 + tensor_array[0, 0, 0, 1] = 2.0 + tensor.set(tensor_array, place) + + lod_tensor.set_tensor(tensor) + + lod_v = numpy.array(lod_tensor.tensor()) + self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) + self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) + self.assertEqual(len(lod_tensor.lod()), 0) + + lod_py = [[0, 2, 5], [0, 2, 4, 5]] + lod_tensor.set_lod(lod_py) + lod = lod_tensor.lod() + self.assertListEqual(lod_py, lod) + + def test_lod_tensor_init(self): + scope = core.Scope() + var = scope.new_var("test_tensor") + place = core.CPUPlace() + tensor = var.get_tensor() + tensor.set_dims([5, 2, 3, 4]) + tensor.alloc_float(place) + tensor_array = numpy.array(tensor) + tensor_array[0, 0, 0, 0] = 1.0 + tensor_array[0, 0, 0, 1] = 2.0 + tensor.set(tensor_array, place) + lod_py = [[0, 2, 5], [0, 2, 4, 5]] + + lod_tensor = core.LoDTensor(lod_py, tensor) + lod_v = numpy.array(lod_tensor.tensor()) + self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) + self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) + self.assertListEqual(lod_py, lod_tensor.lod()) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/framework/tests/test_top_k_op.py b/python/paddle/v2/framework/tests/test_top_k_op.py new file mode 100644 index 0000000000000000000000000000000000000000..cab799256d791889c295aa7f9048080f5caaf2dc --- /dev/null +++ b/python/paddle/v2/framework/tests/test_top_k_op.py @@ -0,0 +1,47 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestTopkOp(OpTest): + def setUp(self): + self.op_type = "top_k" + k = 1 + input = np.random.random((32, 84)).astype("float32") + output = np.ndarray((32, k)) + indices = np.ndarray((32, k)) + + self.inputs = {'X': input} + self.attrs = {'k': k} + + for rowid in xrange(32): + row = input[rowid] + output[rowid] = np.sort(row)[-k:] + indices[rowid] = row.argsort()[-k:] + + self.outputs = {'Out': output, 'Indices': indices} + + +class TestTopkOp3d(OpTest): + def setUp(self): + self.op_type = "top_k" + k = 1 + input = np.random.random((32, 2, 84)).astype("float32") + input_flat_2d = input.reshape(64, 84) + output = np.ndarray((64, k)) + indices = np.ndarray((64, k)).astype("int") + + # FIXME: should use 'X': input for a 3d input + self.inputs = {'X': input_flat_2d} + self.attrs = {'k': k} + + for rowid in xrange(64): + row = input_flat_2d[rowid] + output[rowid] = np.sort(row)[-k:] + indices[rowid] = row.argsort()[-k:] + + self.outputs = {'Out': output, 'Indices': indices} + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_uniform_random_op.py b/python/paddle/v2/framework/tests/test_uniform_random_op.py index c3d2bb44da3977c0899b2609a8efe15b7e1789f2..76a5e36e56ab08230bdc2597d209fcf5d1d2acb0 100644 --- a/python/paddle/v2/framework/tests/test_uniform_random_op.py +++ b/python/paddle/v2/framework/tests/test_uniform_random_op.py @@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase): def uniform_random_test(self, place): scope = core.Scope() - scope.new_var("X").get_tensor() + scope.new_var('X').get_tensor() op = Operator( "uniform_random", - Out="X", + Out='X', dims=[1000, 784], min=-5.0, max=10.0, @@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase): op.infer_shape(scope) ctx = core.DeviceContext.create(place) op.run(scope, ctx) - tensor = numpy.array(scope.find_var("X").get_tensor()) + tensor = numpy.array(scope.find_var('X').get_tensor()) self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/inference.py b/python/paddle/v2/inference.py index 8acea6155c588f2e8e5ad009cd8f0a0c09afb92b..e80456d9bbeb3c34ac9eab873a84dbf8f06e34df 100644 --- a/python/paddle/v2/inference.py +++ b/python/paddle/v2/inference.py @@ -2,6 +2,7 @@ import numpy import collections import topology import minibatch +import cPickle __all__ = ['infer', 'Inference'] @@ -25,11 +26,23 @@ class Inference(object): :type parameters: paddle.v2.parameters.Parameters """ - def __init__(self, output_layer, parameters): + def __init__(self, parameters, output_layer=None, fileobj=None): import py_paddle.swig_paddle as api - topo = topology.Topology(output_layer) - gm = api.GradientMachine.createFromConfigProto( - topo.proto(), api.CREATE_MODE_TESTING, [api.PARAMETER_VALUE]) + + if output_layer is not None: + topo = topology.Topology(output_layer) + gm = api.GradientMachine.createFromConfigProto( + topo.proto(), api.CREATE_MODE_TESTING, [api.PARAMETER_VALUE]) + self.__data_types__ = topo.data_type() + elif fileobj is not None: + tmp = cPickle.load(fileobj) + gm = api.GradientMachine.createByConfigProtoStr( + tmp['protobin'], api.CREATE_MODE_TESTING, + [api.PARAMETER_VALUE]) + self.__data_types__ = tmp['data_type'] + else: + raise ValueError("Either output_layer or fileobj must be set") + for param in gm.getParameters(): val = param.getBuf(api.PARAMETER_VALUE) name = param.getName() @@ -43,7 +56,6 @@ class Inference(object): # called here, but it's better to call this function in one place. param.setValueUpdated() self.__gradient_machine__ = gm - self.__data_types__ = topo.data_type() def iter_infer(self, input, feeding=None): from data_feeder import DataFeeder diff --git a/python/paddle/v2/tests/test_layer.py b/python/paddle/v2/tests/test_layer.py index 783a0ca85dc61b9f00ac8126e03788884dfb44cb..de932ad715bea8db158393c3c192ef67502e2fa3 100644 --- a/python/paddle/v2/tests/test_layer.py +++ b/python/paddle/v2/tests/test_layer.py @@ -134,8 +134,9 @@ class CostLayerTest(unittest.TestCase): cost3 = layer.cross_entropy_cost(input=inference, label=label) cost4 = layer.cross_entropy_with_selfnorm_cost( input=inference, label=label) - cost5 = layer.mse_cost(input=inference, label=label) - cost6 = layer.mse_cost(input=inference, label=label, weight=weight) + cost5 = layer.square_error_cost(input=inference, label=label) + cost6 = layer.square_error_cost( + input=inference, label=label, weight=weight) cost7 = layer.multi_binary_label_cross_entropy_cost( input=inference, label=label) cost8 = layer.rank_cost(left=score, right=score, label=score) diff --git a/python/paddle/v2/topology.py b/python/paddle/v2/topology.py index a20e878d0817d0a75e9c47a44f8765deca99225c..2db66be2505dde38a501edf45984e1f36beb351d 100644 --- a/python/paddle/v2/topology.py +++ b/python/paddle/v2/topology.py @@ -18,6 +18,7 @@ from paddle.proto.ModelConfig_pb2 import ModelConfig import paddle.trainer_config_helpers as conf_helps import layer as v2_layer import config_base +import cPickle __all__ = ['Topology'] @@ -100,6 +101,14 @@ class Topology(object): return layer return None + def serialize_for_inference(self, stream): + protobin = self.proto().SerializeToString() + data_type = self.data_type() + cPickle.dump({ + 'protobin': protobin, + 'data_type': data_type + }, stream, cPickle.HIGHEST_PROTOCOL) + def __check_layer_type__(layer): if not isinstance(layer, config_base.Layer): diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 0654a301049dcb347b79879076a869a0c14a07ae..ca95ef13bd440ac0ba3d46f6e4680d4d7aa94c42 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -174,13 +174,18 @@ class SGD(object): pass_id=pass_id, batch_id=batch_id, cost=cost, - evaluator=batch_evaluator)) + evaluator=batch_evaluator, + gm=self.__gradient_machine__)) self.__parameter_updater__.finishBatch(cost) batch_evaluator.finish() self.__parameter_updater__.finishPass() pass_evaluator.finish() - event_handler(v2_event.EndPass(pass_id, evaluator=pass_evaluator)) + event_handler( + v2_event.EndPass( + pass_id, + evaluator=pass_evaluator, + gm=self.__gradient_machine__)) self.__gradient_machine__.finish() def test(self, reader, feeding=None):