diff --git a/.clang-format b/.clang-format index 9ba433b17362424973626470d930356c2173dd84..aff93435f58c522f5ed1090aef2005f76e91cf31 100644 --- a/.clang-format +++ b/.clang-format @@ -25,4 +25,3 @@ AllowAllParametersOfDeclarationOnNextLine: true BinPackParameters: false BinPackArguments: false ... - diff --git a/.gitignore b/.gitignore index 020d3f0c303f7d850f4ec9c0efe58ab2d57dce2e..ac56a3320ec85769d2c87c072512f5217eca0c24 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ cmake_install.cmake paddle/.timestamp python/paddlepaddle.egg-info/ paddle/pybind/pybind.h +python/paddle/version.py diff --git a/.travis.yml b/.travis.yml index c51e02eb79a9e53a2b8d1d663e8f0c3e0d8c3a61..e2d49daa1981396628efa5d16459eb70e9e76884 100644 --- a/.travis.yml +++ b/.travis.yml @@ -42,7 +42,7 @@ before_install: script: - | timeout 2580 paddle/scripts/travis/${JOB}.sh # 43min timeout - RESULT=$?; if [ $RESULT -eq 0 ] || [ $RESULT -eq 142 ]; then true; else false; fi; + RESULT=$?; if [ $RESULT -eq 0 ] || [ $RESULT -eq 142 ]; then true ;else exit 1; fi; - | if [[ "$JOB" != "build_doc" ]]; then exit 0; fi; if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit 0; fi; diff --git a/CMakeLists.txt b/CMakeLists.txt index 65164b8472b902be8b0b9d5fb99807d012b8a666..b309ff37e52b4fd28b14925bdd7e3740e1e2fa47 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,10 +16,14 @@ cmake_minimum_required(VERSION 3.0) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") set(PADDLE_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(PADDLE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) +SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") +SET(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") include(system) project(paddle CXX C Go) +message(STATUS "CXX compiler: " ${CMAKE_CXX_COMPILER} ", version: " ${CMAKE_CXX_COMPILER_VERSION}) +message(STATUS "C compiler: " ${CMAKE_C_COMPILER} ", version: " ${CMAKE_C_COMPILER_VERSION}) find_package(Sphinx) if(NOT CMAKE_CROSSCOMPILING) @@ -54,7 +58,9 @@ option(WITH_C_API "Compile PaddlePaddle with C-API(Prediction)" OFF) option(WITH_GOLANG "Compile PaddlePaddle with GOLANG" OFF) option(GLIDE_INSTALL "Download and install go dependencies " ON) option(USE_NNPACK "Compile PaddlePaddle with NNPACK library" OFF) +option(WITH_DISTRIBUTE "Compile with grpc distributed support" OFF) option(USE_EIGEN_FOR_BLAS "Use matrix multiplication in Eigen" OFF) +option(WITH_ARM_FP16 "Use half precision support on armv8.2-a cpu" OFF) # CMAKE_BUILD_TYPE if(NOT CMAKE_BUILD_TYPE) @@ -67,9 +73,6 @@ if(ANDROID OR IOS) if(ANDROID) if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16") message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16") - elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") - # TODO: support glog for Android api 16 ~ 19 in the future - message(WARNING "Using the unofficial git repository instead") endif() endif() @@ -83,6 +86,8 @@ if(ANDROID OR IOS) "Disable RDMA when cross-compiling for Android and iOS" FORCE) set(WITH_MKL OFF CACHE STRING "Disable MKL when cross-compiling for Android and iOS" FORCE) + set(WITH_GOLANG OFF CACHE STRING + "Disable golang when cross-compiling for Android and iOS" FORCE) # Compile PaddlePaddle mobile inference library if (NOT WITH_C_API) @@ -133,6 +138,8 @@ include(external/any) # download libn::any include(external/eigen) # download eigen3 include(external/pybind11) # download pybind11 include(external/nccl) +include(external/cares) +include(external/grpc) include(cudnn) # set cudnn libraries, must before configure include(configure) # add paddle env configuration diff --git a/Dockerfile b/Dockerfile index 150344a8116e2be9b5bab8e5fdcc9c37f4025020..857d3f3e5f64791146741ffb29feabfcb2ecbb84 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,7 +29,7 @@ RUN apt-get update && \ automake locales clang-format swig doxygen cmake \ liblapack-dev liblapacke-dev libboost-dev \ clang-3.8 llvm-3.8 libclang-3.8-dev \ - net-tools && \ + net-tools libtool && \ apt-get clean -y # Install Go and glide diff --git a/README.md b/README.md index db0fbd88b250cdc2a3cc77521cc1c2cea77c6e87..bbb2d498589092de78b21a662f03171a0721f840 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddl examples: - Optimized math operations through SSE/AVX intrinsics, BLAS libraries - (e.g. MKL, ATLAS, cuBLAS) or customized CPU/GPU kernels. + (e.g. MKL, OpenBLAS, cuBLAS) or customized CPU/GPU kernels. - Highly optimized recurrent networks which can handle **variable-length** sequence without padding. - Optimized local and distributed training for models with high dimensional diff --git a/RELEASE.cn.md b/RELEASE.cn.md index 5deaf230a8f5dd3089993f0fc79b9460fd049750..494c59730dd3c2830514e8924aa3d59a34ac412e 100644 --- a/RELEASE.cn.md +++ b/RELEASE.cn.md @@ -1,3 +1,62 @@ +# v0.11.0版本 + +## PaddlePaddle Fluid + +- PaddlePaddle发布版本v0.11.0包含一个新的特性*PaddlePaddle Fluid*. Fluid 是设计用来让用户像Pytorch和Tensorflow Eager Execution一样执行程序。在这些系统中,不再有*模型*这个概念,应用也不再包含一个用于描述Operator图或者一系列层的符号描述,而是像通用程序那样描述训练或者预测的过程。而Fluid与PyTorch或Eager Execution的区别在于Fluid不依赖Python提供的控制流,例如 if-else-then或者for,而是提供了基于C++实现的控制流并暴露了对应的用with语法实现的Python接口。例如: + + https://github.com/PaddlePaddle/Paddle/blob/3df78ed2a98d37f7ae6725894cc7514effd5664b/python/paddle/v2/fluid/tests/test_while_op.py#L36-L44 + +- 在v0.11.0版本中,我们提供了一个C++类`Executor`用于运行一个Fluid程序。Executor类似一个解释器。在未来的版本中,我们将提升和优化Executor成为一个调试器,就像GDB。并可能提供一些编译器,这个编译器会读取一个上文所描述的应用然后编译成一个等价的 +源代码,这个源代码可以被nvcc编译成可以使用CUDA的二进制,或者被icc编译成可以充分利用Intel CPU的二进制。 + + +## 新特点 + +* 发布 `PaddlePaddle Fluid`。 +* 增加了用于模型预测的C-API。 +* 用Fluid API实现了一个简单的GAN的例子。 +* 增加了关于性能调优的文档。 +* 为`paddle.v2.dataset`下载数据集提供了重试机制. +* C++中使用protobuf-lite替换protobuf减少了二进制的大小。 +* 发布了新特性 [Elastic Deep Learning (EDL)](https://github.com/PaddlePaddle/cloud/tree/develop/doc/autoscale/experiment). +* 基于Bazel API利用cmake实现了一个的新的构建系统函数库。 +* 当使用编译选项`WITH_MKL=ON`时自动下载和编译Intel® [MKLML](https://github.com/01org/mkl-dnn/releases/download/v0.11/mklml_lnx_2018.0.1.20171007.tgz) 函数库. +* [Intel® MKL-DNN on PaddlePaddle](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/design/mkldnn): + - 完成了 11个 MKL-DNN 层: Convolution, Fully connectivity, Pooling, ReLU, Tanh, ELU, Softmax, BatchNorm, AddTo, Concat, LRN。 + - 完成了 3个 MKL-DNN 网络: VGG-19, ResNet-50, GoogleNet + - 基于Intel Skylake 6148 CPU的[性能测试](https://github.com/PaddlePaddle/Paddle/blob/develop/benchmark/IntelOptimizedPaddle.md) : 相对于MKLML有2~3倍的训练加速。 +* 增加 [softsign activation](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/activation.html#softsign) +* 增加 [dot product layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#dot-prod) +* 增加 [L2 distance layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#l2-distance) +* 增加 [sub-nested sequence layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#sub-nested-seq) +* 增加 [kmax sequence score layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#kmax-sequence-score) +* 增加 [sequence slice layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#seq-slice) +* 增加 [row convolution layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#row-conv) +* 增加移动端友好的网页 + +## 改进 + +* 使用一个Python`whl`包即可安装. +* [V2 API可以实现用户定制化评估](https://github.com/PaddlePaddle/models/tree/develop/ltr#训练过程中输出自定义评估指标)。 +* 将 `PADDLE_ONLY_CPU` 改为 `PADDLE_WITH_GPU`, 因为我们会支持多种设备。 +* 删除了有一些bug的BarrierStat。 +* 清理和删除了paddle::Parameter中未使用的函数。 +* 删除了ProtoDataProvider。 +* Huber loss同时支持回归和分类。 +* 为sequence pooling 层增加`stride`参数。 +* v2 API自动使用cudnn batch normalization。 +* 可以使用一个固定的参数名共享BN层的参数。 +* 2D convolution operation支持variable-dimension input特性。 +* 重构cmake中关于CUDA的部分并实现自动检测GPU架构的功能。 +* 优化网页导航。 + +## 错误修复 + +* 修复ROI pooling的Bug. cc9a761 +* 修复当label是dense vector是AUC变成0的问题. #5274 +* 修复WarpCTC 层的Bug. + + # v0.10.0版本 我们非常高兴发布了PaddlePaddle V0.10.0版,并开发了新的[Python API](http://research.baidu.com/paddlepaddles-new-api-simplifies-deep-learning-programs/)。 diff --git a/RELEASE.md b/RELEASE.md index 146f7afa7dfbc152500b82fde28445ae3155c16c..5a62c955131007c9f3329d162c20d1b462550019 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,75 @@ +# Release v0.11.0 + +## PaddlePaddle Fluid + +- Release 0.11.0 includes a new feature *PaddlePaddle Fluid*. Fluid is + designed to allow users to program like PyTorch and TensorFlow Eager Execution. + In these systems, there is no longer the concept *model* and applications + do not include a symbolic description of a graph of operators nor a sequence + of layers. Instead, applications look exactly like a usual program that + describes a process of training or inference. The difference between + Fluid and PyTorch or Eager Execution is that Fluid doesn't rely on Python's + control-flow, `if-then-else` nor `for`. Instead, Fluid provides its + C++ implementations and their Python binding using the `with` statement. For an example + + https://github.com/PaddlePaddle/Paddle/blob/3df78ed2a98d37f7ae6725894cc7514effd5664b/python/paddle/v2/fluid/tests/test_while_op.py#L36-L44 + +- In 0.11.0, we provides a C++ class `Executor` to run a Fluid program. +Executor works like an interpreter. In future version, we will improve +`Executor` into a debugger like GDB, and we might provide some compilers, +which, for example, takes an application like the above one, and outputs +an equivalent C++ source program, which can be compiled using +[`nvcc`](http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html) +to generate binaries that use CUDA, or using +[`icc`](https://software.intel.com/en-us/c-compilers) to generate binaries +that make full use of Intel CPUs. + +## New Features + +* Release `PaddlePaddle Fluid`. +* Add C-API for model inference +* Use fluid API to create a simple GAN demo. +* Add develop guide about performance tunning. +* Add retry when download `paddle.v2.dataset`. +* Linking protobuf-lite not protobuf in C++. Reduce the binary size. +* Feature [Elastic Deep Learning (EDL)](https://github.com/PaddlePaddle/cloud/tree/develop/doc/autoscale/experiment) released. +* A new style cmake functions for Paddle. It is based on Bazel API. +* Automatically download and compile with Intel® [MKLML](https://github.com/01org/mkl-dnn/releases/download/v0.11/mklml_lnx_2018.0.1.20171007.tgz) library as CBLAS when build `WITH_MKL=ON`. +* [Intel® MKL-DNN on PaddlePaddle](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/design/mkldnn): + - Complete 11 MKL-DNN layers: Convolution, Fully connectivity, Pooling, ReLU, Tanh, ELU, Softmax, BatchNorm, AddTo, Concat, LRN. + - Complete 3 MKL-DNN networks: VGG-19, ResNet-50, GoogleNet + - [Benchmark](https://github.com/PaddlePaddle/Paddle/blob/develop/benchmark/IntelOptimizedPaddle.md) on Intel Skylake 6148 CPU: 2~3x training speedup compared with MKLML. +* Add the [`softsign` activation](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/activation.html#softsign). +* Add the [dot product layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#dot-prod). +* Add the [L2 distance layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#l2-distance). +* Add the [sub-nested sequence layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#sub-nested-seq). +* Add the [kmax sequence score layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#kmax-sequence-score). +* Add the [sequence slice layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#seq-slice). +* Add the [row convolution layer](http://www.paddlepaddle.org/docs/develop/documentation/zh/api/v2/config/layer.html#row-conv) +* Add mobile friendly webpages. + +## Improvements + +* Build and install using a single `whl` package. +* [Custom evaluating in V2 API](https://github.com/PaddlePaddle/models/tree/develop/ltr#训练过程中输出自定义评估指标). +* Change `PADDLE_ONLY_CPU` to `PADDLE_WITH_GPU`, since we will support many kinds of devices. +* Remove buggy BarrierStat. +* Clean and remove unused functions in paddle::Parameter. +* Remove ProtoDataProvider. +* Huber loss supports both regression and classification. +* Add the `stride` parameter for sequence pooling layers. +* Enable v2 API use cudnn batch normalization automatically. +* The BN layer's parameter can be shared by a fixed the parameter name. +* Support variable-dimension input feature for 2D convolution operation. +* Refine cmake about CUDA to automatically detect GPU architecture. +* Improved website navigation. + +## Bug Fixes + +* Fix bug in ROI pooling. cc9a761 +* Fix AUC is zero when label is dense vector. #5274 +* Fix bug in WarpCTC layer. + # Release v0.10.0 We are glad to release version 0.10.0. In this version, we are happy to release the new diff --git a/benchmark/IntelOptimizedPaddle.md b/benchmark/IntelOptimizedPaddle.md index 16c2390fd31bf1c79f29735fb98180d3f7302eb2..8ee7fd28c58f2a2bcb82040eb824a37062bd4e9c 100644 --- a/benchmark/IntelOptimizedPaddle.md +++ b/benchmark/IntelOptimizedPaddle.md @@ -2,27 +2,25 @@ Machine: -- Server - - Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz, 2 Sockets, 20 Cores per socket -- Laptop - - DELL XPS15-9560-R1745: i7-7700HQ 8G 256GSSD - - i5 MacBook Pro (Retina, 13-inch, Early 2015) -- Desktop - - i7-6700k +- Server: Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz, 2 Sockets, 20 Cores per socket +- Laptop: TBD System: CentOS release 6.3 (Final), Docker 1.12.1. -PaddlePaddle: paddlepaddle/paddle:latest (for MKLML and MKL-DNN), paddlepaddle/paddle:latest-openblas (for OpenBLAS) -- MKL-DNN tag v0.11 -- MKLML 2018.0.1.20171007 -- OpenBLAS v0.2.20 -(TODO: will rerun after 0.11.0) +PaddlePaddle: (TODO: will rerun after 0.11.0) +- paddlepaddle/paddle:latest (for MKLML and MKL-DNN) + - MKL-DNN tag v0.11 + - MKLML 2018.0.1.20171007 +- paddlepaddle/paddle:latest-openblas (for OpenBLAS) + - OpenBLAS v0.2.20 On each machine, we will test and compare the performance of training on single node using MKL-DNN / MKLML / OpenBLAS respectively. ## Benchmark Model ### Server + +#### Training Test on batch size 64, 128, 256 on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz Input image size - 3 * 224 * 224, Time: images/second @@ -35,9 +33,7 @@ Input image size - 3 * 224 * 224, Time: images/second | MKLML | 12.12 | 13.70 | 16.18 | | MKL-DNN | 28.46 | 29.83 | 30.44 | - -chart on batch size 128 -TBD + - ResNet-50 @@ -47,9 +43,7 @@ TBD | MKLML | 32.52 | 31.89 | 33.12 | | MKL-DNN | 81.69 | 82.35 | 84.08 | - -chart on batch size 128 -TBD + - GoogLeNet @@ -59,10 +53,35 @@ TBD | MKLML | 128.46| 137.89| 158.63 | | MKL-DNN     | 250.46| 264.83| 269.50 | -chart on batch size 128 -TBD + + +#### Inference +Test on batch size 1, 2, 4, 8, 16 on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz +- VGG-19 + +| BatchSize | 1 | 2 | 4 | 8 | 16 | +|-----------|-------|-------|-------|-------|-------| +| OpenBLAS | 1.07 | 1.08 | 1.06 | 0.88 | 0.65 | +| MKLML | 5.58 | 9.80 | 15.15 | 21.21 | 28.67 | +| MKL-DNN | 75.07 | 88.64 | 82.58 | 92.29 | 96.75 | + +- ResNet-50 + +| BatchSize | 1 | 2 | 4 | 8 | 16 | +|-----------|-------|--------|--------|--------|--------| +| OpenBLAS | 3.35 | 3.19 | 3.09 | 2.55 | 1.96 | +| MKLML | 6.33 | 12.02 | 22.88 | 40.53 | 63.09 | +| MKL-DNN | 107.83| 148.84 | 177.78 | 189.35 | 217.69 | + + +- GoogLeNet + +| BatchSize | 1 | 2 | 4 | 8 | 16 | +|-----------|--------|--------|--------|--------|--------| +| OpenBLAS | 12.04 | 11.31 | 10.00 | 9.07 | 4.34 | +| MKLML | 22.74 | 41.56 | 81.22 | 133.47 | 210.53 | +| MKL-DNN | 175.10 | 272.92 | 450.70 | 512.00 | 600.94 | + ### Laptop TBD -### Desktop -TBD diff --git a/benchmark/figs/googlenet-cpu-train.png b/benchmark/figs/googlenet-cpu-train.png new file mode 100644 index 0000000000000000000000000000000000000000..c3f67faf096fe9b45dd815f294b41679dc7c9e54 Binary files /dev/null and b/benchmark/figs/googlenet-cpu-train.png differ diff --git a/benchmark/figs/resnet-cpu-train.png b/benchmark/figs/resnet-cpu-train.png new file mode 100644 index 0000000000000000000000000000000000000000..b96ecd5ff940c0d000613b1ed1f11fb16796cf47 Binary files /dev/null and b/benchmark/figs/resnet-cpu-train.png differ diff --git a/benchmark/figs/vgg-cpu-train.png b/benchmark/figs/vgg-cpu-train.png new file mode 100644 index 0000000000000000000000000000000000000000..f830ca6a87d10b72a5113636dd5686ab25a2e864 Binary files /dev/null and b/benchmark/figs/vgg-cpu-train.png differ diff --git a/benchmark/paddle/image/googlenet.py b/benchmark/paddle/image/googlenet.py index a88ecac67d9e677f14f6dc24ba9a337b1245243f..7059c13bd2c2b98eb3fbcf633a6f7064e54d5402 100644 --- a/benchmark/paddle/image/googlenet.py +++ b/benchmark/paddle/image/googlenet.py @@ -6,10 +6,21 @@ width = 224 num_class = 1000 batch_size = get_config_arg('batch_size', int, 128) use_gpu = get_config_arg('use_gpu', bool, True) - -args = {'height': height, 'width': width, 'color': True, 'num_class': num_class} +is_infer = get_config_arg("is_infer", bool, False) + +args = { + 'height': height, + 'width': width, + 'color': True, + 'num_class': num_class, + 'is_infer': is_infer +} define_py_data_sources2( - "train.list", None, module="provider", obj="process", args=args) + "train.list" if not is_infer else None, + "test.list" if is_infer else None, + module="provider", + obj="process", + args=args) settings( batch_size=batch_size, @@ -146,7 +157,6 @@ def inception(name, input, channels, \ return cat -lab = data_layer(name="label", size=1000) data = data_layer(name="input", size=3 * height * width) # stage 1 @@ -224,6 +234,10 @@ pool5 = img_pool_layer( dropout = dropout_layer(name="dropout", input=pool5, dropout_rate=0.4) out3 = fc_layer( name="output3", input=dropout, size=1000, act=SoftmaxActivation()) -loss3 = cross_entropy(name='loss3', input=out3, label=lab) -outputs(loss3) +if is_infer: + outputs(out3) +else: + lab = data_layer(name="label", size=num_class) + loss3 = cross_entropy(name='loss3', input=out3, label=lab) + outputs(loss3) diff --git a/benchmark/paddle/image/provider.py b/benchmark/paddle/image/provider.py index 4703944c8722552d56ba80a8e0663de5fb4df53d..927b1759941f362ef4b5ffe84dd01332986d9306 100644 --- a/benchmark/paddle/image/provider.py +++ b/benchmark/paddle/image/provider.py @@ -13,14 +13,20 @@ def initHook(settings, height, width, color, num_class, **kwargs): settings.data_size = settings.height * settings.width * 3 else: settings.data_size = settings.height * settings.width - - settings.slots = [dense_vector(settings.data_size), integer_value(1)] + settings.is_infer = kwargs.get('is_infer', False) + if settings.is_infer: + settings.slots = [dense_vector(settings.data_size)] + else: + settings.slots = [dense_vector(settings.data_size), integer_value(1)] @provider( init_hook=initHook, min_pool_size=-1, cache=CacheType.CACHE_PASS_IN_MEM) def process(settings, file_list): - for i in xrange(1024): + for i in xrange(2560 if settings.is_infer else 1024): img = np.random.rand(1, settings.data_size).reshape(-1, 1).flatten() - lab = random.randint(0, settings.num_class - 1) - yield img.astype('float32'), int(lab) + if settings.is_infer: + yield img.astype('float32') + else: + lab = random.randint(0, settings.num_class - 1) + yield img.astype('float32'), int(lab) diff --git a/benchmark/paddle/image/resnet.py b/benchmark/paddle/image/resnet.py index 6ae1857642e8df4b3859eec68a3a5227d1c4fcb3..4a14363ff1db48a5072cbb5f5eb3bc9241ffca8f 100644 --- a/benchmark/paddle/image/resnet.py +++ b/benchmark/paddle/image/resnet.py @@ -6,11 +6,21 @@ width = 224 num_class = 1000 batch_size = get_config_arg('batch_size', int, 64) layer_num = get_config_arg("layer_num", int, 50) -is_test = get_config_arg("is_test", bool, False) - -args = {'height': height, 'width': width, 'color': True, 'num_class': num_class} +is_infer = get_config_arg("is_infer", bool, False) + +args = { + 'height': height, + 'width': width, + 'color': True, + 'num_class': num_class, + 'is_infer': is_infer +} define_py_data_sources2( - "train.list", None, module="provider", obj="process", args=args) + "train.list" if not is_infer else None, + "test.list" if is_infer else None, + module="provider", + obj="process", + args=args) settings( batch_size=batch_size, @@ -45,7 +55,10 @@ def conv_bn_layer(name, act=LinearActivation(), bias_attr=False) return batch_norm_layer( - name=name + "_bn", input=tmp, act=active_type, use_global_stats=is_test) + name=name + "_bn", + input=tmp, + act=active_type, + use_global_stats=is_infer) def bottleneck_block(name, input, num_filters1, num_filters2): @@ -207,7 +220,9 @@ elif layer_num == 152: else: print("Wrong layer number.") -lbl = data_layer(name="label", size=num_class) -loss = cross_entropy(name='loss', input=resnet, label=lbl) -inputs(img, lbl) -outputs(loss) +if is_infer: + outputs(resnet) +else: + lbl = data_layer(name="label", size=num_class) + loss = cross_entropy(name='loss', input=resnet, label=lbl) + outputs(loss) diff --git a/benchmark/paddle/image/run_mkldnn_infer.sh b/benchmark/paddle/image/run_mkldnn_infer.sh new file mode 100755 index 0000000000000000000000000000000000000000..d795bcab1b7d098295066f79189d17e8299d28fb --- /dev/null +++ b/benchmark/paddle/image/run_mkldnn_infer.sh @@ -0,0 +1,86 @@ +set -e + +function clock_to_seconds() { + hours=`echo $1 | awk -F ':' '{print $1}'` + mins=`echo $1 | awk -F ':' '{print $2}'` + secs=`echo $1 | awk -F ':' '{print $3}'` + echo `awk 'BEGIN{printf "%.2f",('$secs' + '$mins' * 60 + '$hours' * 3600)}'` +} + +function infer() { + unset OMP_NUM_THREADS MKL_NUM_THREADS OMP_DYNAMIC KMP_AFFINITY + topology=$1 + layer_num=$2 + bs=$3 + use_mkldnn=$4 + if [ $4 == "True" ]; then + thread=1 + log="logs/infer-${topology}-${layer_num}-mkldnn-${bs}.log" + elif [ $4 == "False" ]; then + thread=`nproc` + if [ $thread -gt $bs ]; then + thread=$bs + fi + log="logs/infer-${topology}-${layer_num}-${thread}mklml-${bs}.log" + else + echo "Wrong input $4, use True or False." + exit 0 + fi + + models_in="models/${topology}-${layer_num}/pass-00000/" + if [ ! -d $models_in ]; then + echo "Training model ${topology}_${layer_num}" + paddle train --job=train \ + --config="${topology}.py" \ + --use_mkldnn=True \ + --use_gpu=False \ + --trainer_count=1 \ + --num_passes=1 \ + --save_dir="models/${topology}-${layer_num}" \ + --config_args="batch_size=128,layer_num=${layer_num}" \ + > /dev/null 2>&1 + echo "Done" + fi + log_period=$((256 / bs)) + paddle train --job=test \ + --config="${topology}.py" \ + --use_mkldnn=$use_mkldnn \ + --use_gpu=False \ + --trainer_count=$thread \ + --log_period=$log_period \ + --config_args="batch_size=${bs},layer_num=${layer_num},is_infer=True" \ + --init_model_path=$models_in \ + 2>&1 | tee ${log} + + # calculate the last 5 logs period time of 1280 samples, + # the time before are burning time. + start=`tail ${log} -n 7 | head -n 1 | awk -F ' ' '{print $2}' | xargs` + end=`tail ${log} -n 2 | head -n 1 | awk -F ' ' '{print $2}' | xargs` + start_sec=`clock_to_seconds $start` + end_sec=`clock_to_seconds $end` + fps=`awk 'BEGIN{printf "%.2f",(1280 / ('$end_sec' - '$start_sec'))}'` + echo "Last 1280 samples start: ${start}(${start_sec} sec), end: ${end}(${end_sec} sec;" >> ${log} + echo "FPS: $fps images/sec" 2>&1 | tee -a ${log} +} + +if [ ! -f "train.list" ]; then + echo " " > train.list +fi +if [ ! -f "test.list" ]; then + echo " " > test.list +fi +if [ ! -d "logs" ]; then + mkdir logs +fi +if [ ! -d "models" ]; then + mkdir -p models +fi + +# inference benchmark +for use_mkldnn in True False; do + for batchsize in 1 2 4 8 16; do + infer googlenet v1 $batchsize $use_mkldnn + infer resnet 50 $batchsize $use_mkldnn + infer vgg 19 $batchsize $use_mkldnn + done +done diff --git a/benchmark/paddle/image/run_mkldnn.sh b/benchmark/paddle/image/run_mkldnn_train.sh similarity index 79% rename from benchmark/paddle/image/run_mkldnn.sh rename to benchmark/paddle/image/run_mkldnn_train.sh index f768f6c29a84b40f917e0ccfde4d8c15f65c818b..320206239ae960bd088b05d3b10934a98da741b1 100755 --- a/benchmark/paddle/image/run_mkldnn.sh +++ b/benchmark/paddle/image/run_mkldnn_train.sh @@ -8,13 +8,13 @@ function train() { use_mkldnn=$4 if [ $4 == "True" ]; then thread=1 - log="logs/${topology}-${layer_num}-mkldnn-${bs}.log" + log="logs/train-${topology}-${layer_num}-mkldnn-${bs}.log" elif [ $4 == "False" ]; then thread=`nproc` # each trainer_count use only 1 core to avoid conflict - log="logs/${topology}-${layer_num}-${thread}mklml-${bs}.log" + log="logs/train-${topology}-${layer_num}-${thread}mklml-${bs}.log" else - echo "Wrong input $3, use True or False." + echo "Wrong input $4, use True or False." exit 0 fi args="batch_size=${bs},layer_num=${layer_num}" @@ -30,13 +30,14 @@ function train() { 2>&1 | tee ${log} } -if [ ! -d "train.list" ]; then +if [ ! -f "train.list" ]; then echo " " > train.list fi if [ ! -d "logs" ]; then mkdir logs fi +# training benchmark for use_mkldnn in True False; do for batchsize in 64 128 256; do train vgg 19 $batchsize $use_mkldnn diff --git a/benchmark/paddle/image/vgg.py b/benchmark/paddle/image/vgg.py index 420884ed8e1ae36a3f1772bfbe8323f3d0ea71e6..8d0a1e97a451cd52ef17e4e326673cc90059ef3c 100644 --- a/benchmark/paddle/image/vgg.py +++ b/benchmark/paddle/image/vgg.py @@ -6,10 +6,21 @@ width = 224 num_class = 1000 batch_size = get_config_arg('batch_size', int, 64) layer_num = get_config_arg('layer_num', int, 19) +is_infer = get_config_arg("is_infer", bool, False) -args = {'height': height, 'width': width, 'color': True, 'num_class': num_class} +args = { + 'height': height, + 'width': width, + 'color': True, + 'num_class': num_class, + 'is_infer': is_infer +} define_py_data_sources2( - "train.list", None, module="provider", obj="process", args=args) + "train.list" if not is_infer else None, + "test.list" if is_infer else None, + module="provider", + obj="process", + args=args) settings( batch_size=batch_size, @@ -98,6 +109,9 @@ elif layer_num == 19: else: print("Wrong layer number.") -lab = data_layer('label', num_class) -loss = cross_entropy(input=vgg, label=lab) -outputs(loss) +if is_infer: + outputs(vgg) +else: + lab = data_layer('label', num_class) + loss = cross_entropy(input=vgg, label=lab) + outputs(loss) diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index b21fc43904d9aafe9f7d019dfbe5b1c0d3f9e2d6..6320b17520a687f88993b6f464d9115838b0f96b 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -3,7 +3,7 @@ # It will search MKLML, atlas, OpenBlas, reference-cblas in order. # # If any cblas implementation found, the following variable will be set. -# CBLAS_PROVIDER # one of MKLML, ATLAS, OPENBLAS, REFERENCE +# CBLAS_PROVIDER # one of MKLML, OPENBLAS, REFERENCE # CBLAS_INC_DIR # the include directory for cblas. # CBLAS_LIBS # a list of libraries should be linked by paddle. # # Each library should be full path to object file. @@ -17,7 +17,7 @@ if(WITH_MKLML AND MKLML_INC_DIR AND MKLML_LIB) set(CBLAS_INC_DIR ${MKLML_INC_DIR}) set(CBLAS_LIBRARIES ${MKLML_LIB}) - add_definitions(-DPADDLE_USE_MKLML) + add_definitions(-DPADDLE_WITH_MKLML) add_definitions(-DLAPACK_FOUND) message(STATUS "Found cblas and lapack in MKLML " @@ -25,42 +25,6 @@ if(WITH_MKLML AND MKLML_INC_DIR AND MKLML_LIB) return() endif() -## Then find atlas. -set(ATLAS_ROOT $ENV{ATLAS_ROOT} CACHE PATH "Folder contains Atlas") -set(ATLAS_INCLUDE_SEARCH_PATHS - ${ATLAS_ROOT}/include - /usr/include - /usr/include/atlas) -set(ATLAS_LIB_SEARCH_PATHS - ${ATLAS_ROOT}/lib - /usr/lib - /usr/lib/blas/atlas - /usr/lib/atlas - /usr/lib/atlas-base # special for ubuntu 14.04. - ) -find_path(ATLAS_INC_DIR NAMES cblas.h - PATHS ${ATLAS_INCLUDE_SEARCH_PATHS}) -find_path(ATLAS_CLAPACK_INC_DIR NAMES clapack.h - PATHS ${ATLAS_INCLUDE_SEARCH_PATHS}) -find_library(ATLAS_CBLAS_LIB NAMES cblas libcblas.so.3 - PATHS ${ATLAS_LIB_SEARCH_PATHS}) -find_library(ATLAS_CLAPACK_LIB NAMES lapack_atlas liblapack_atlas.so.3 - PATHS ${ATLAS_LIB_SEARCH_PATHS}) - -if(ATLAS_CLAPACK_INC_DIR AND ATLAS_INC_DIR AND ATLAS_CBLAS_LIB AND ATLAS_CLAPACK_LIB) - set(CBLAS_FOUND ON) - set(CBLAS_PROVIDER ATLAS) - set(CBLAS_INC_DIR ${ATLAS_INC_DIR} ${ATLAS_CLAPACK_INC_DIR}) - set(CBLAS_LIBRARIES ${ATLAS_CLAPACK_LIB} ${ATLAS_CBLAS_LIB}) - - add_definitions(-DPADDLE_USE_ATLAS) - add_definitions(-DLAPACK_FOUND) - - message(STATUS "Found ATLAS (include: ${ATLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") - message(STATUS "Found lapack in ATLAS (include: ${ATLAS_CLAPACK_INC_DIR})") - return() -endif() - ## Then find openblas. set(OPENBLAS_ROOT $ENV{OPENBLAS_ROOT} CACHE PATH "Folder contains Openblas") set(OPENBLAS_INCLUDE_SEARCH_PATHS diff --git a/cmake/configure.cmake b/cmake/configure.cmake index e550ec285668ea25757eeee9e7c5dc48fc9d339d..5c6bcfde76a1201f792d04766d698db8cd395a49 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -24,6 +24,11 @@ if(WITH_DOUBLE) add_definitions(-DPADDLE_TYPE_DOUBLE) endif(WITH_DOUBLE) +if(WITH_ARM_FP16) + add_definitions(-DPADDLE_ARM_FP16) + add_definitions("-march=armv8.2-a+fp16+simd") +endif(WITH_ARM_FP16) + if(WITH_TESTING) add_definitions(-DPADDLE_WITH_TESTING) endif(WITH_TESTING) diff --git a/cmake/external/cares.cmake b/cmake/external/cares.cmake new file mode 100644 index 0000000000000000000000000000000000000000..aec51410b33669f8a549f2eca193cc6aa2d07a13 --- /dev/null +++ b/cmake/external/cares.cmake @@ -0,0 +1,45 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +IF(MOBILE_INFERENCE OR NOT WITH_DISTRIBUTE) + return() +ENDIF() + +include (ExternalProject) + +# NOTE: c-ares is needed when linking with grpc. + +SET(CARES_SOURCES_DIR ${THIRD_PARTY_PATH}/cares) +SET(CARES_INSTALL_DIR ${THIRD_PARTY_PATH}/install/cares) +SET(CARES_INCLUDE_DIR "${CARES_INSTALL_DIR}/include/" CACHE PATH "cares include directory." FORCE) + +ExternalProject_Add( + extern_cares + GIT_REPOSITORY "https://github.com/c-ares/c-ares.git" + GIT_TAG "cares-1_13_0" + PREFIX ${CARES_SOURCES_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND ./buildconf && ./configure --disable-shared --prefix=${CARES_INSTALL_DIR} + BUILD_IN_SOURCE 1 + BUILD_COMMAND make -j8 + INSTALL_COMMAND make install +) + +ADD_LIBRARY(cares STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET cares PROPERTY IMPORTED_LOCATION + "${CARES_INSTALL_DIR}/lib/libcares.a") + +include_directories(${CARES_INCLUDE_DIR}) +ADD_DEPENDENCIES(cares extern_cares) diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index 08bdc1e1623b0d917061c7368e9b2a8f7e9517fd..0c6b3aafcb4e990b9d4549820137474e5968a7aa 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -26,12 +26,21 @@ ENDIF(WIN32) INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR}) +IF(ANDROID AND ${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") + # Using the unofficial glog for Android API < 21 + SET(GLOG_REPOSITORY "https://github.com/Xreki/glog.git") + SET(GLOG_TAG "8a547150548b284382ccb6582408e9140ff2bea8") +ELSE() + SET(GLOG_REPOSITORY "https://github.com/google/glog.git") + SET(GLOG_TAG "v0.3.5") +ENDIF() + ExternalProject_Add( extern_glog ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS gflags - GIT_REPOSITORY "https://github.com/google/glog.git" - GIT_TAG v0.3.5 + GIT_REPOSITORY ${GLOG_REPOSITORY} + GIT_TAG ${GLOG_TAG} PREFIX ${GLOG_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/cmake/external/grpc.cmake b/cmake/external/grpc.cmake new file mode 100644 index 0000000000000000000000000000000000000000..abee6698e30b7e76ca42825ed225876bf2ba5ec0 --- /dev/null +++ b/cmake/external/grpc.cmake @@ -0,0 +1,66 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +IF(MOBILE_INFERENCE OR NOT WITH_DISTRIBUTE) + return() +ENDIF() + +include (ExternalProject) + +SET(GRPC_SOURCES_DIR ${THIRD_PARTY_PATH}/grpc) +SET(GRPC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/grpc) +SET(GRPC_INCLUDE_DIR "${GRPC_INSTALL_DIR}/include/" CACHE PATH "grpc include directory." FORCE) +SET(GRPC_CPP_PLUGIN "${GRPC_INSTALL_DIR}/bin/grpc_cpp_plugin" CACHE FILEPATH "GRPC_CPP_PLUGIN" FORCE) +IF(APPLE) + SET(BUILD_CMD make -n HAS_SYSTEM_PROTOBUF=false -s -j8 static grpc_cpp_plugin | sed "s/-Werror//g" | sh) +ELSE() + SET(BUILD_CMD make HAS_SYSTEM_PROTOBUF=false -s -j8 static grpc_cpp_plugin) +ENDIF() + +ExternalProject_Add( + extern_grpc + DEPENDS protobuf zlib + GIT_REPOSITORY "https://github.com/grpc/grpc.git" + GIT_TAG "v1.7.x" + PREFIX ${GRPC_SOURCES_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_IN_SOURCE 1 + # NOTE(yuyang18): + # Disable -Werror, otherwise the compile will fail in MacOS. + # It seems that we cannot configure that by make command. + # Just dry run make command and remove `-Werror`, then use a shell to run make commands + BUILD_COMMAND ${BUILD_CMD} + INSTALL_COMMAND make prefix=${GRPC_INSTALL_DIR} install +) + +# FIXME(typhoonzero): hack to get static lib path, try a better way like merge them. +ADD_LIBRARY(grpc++_unsecure STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET grpc++_unsecure PROPERTY IMPORTED_LOCATION + "${GRPC_INSTALL_DIR}/lib/libgrpc++_unsecure.a") + +ADD_LIBRARY(grpc++ STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET grpc++ PROPERTY IMPORTED_LOCATION + "${GRPC_INSTALL_DIR}/lib/libgrpc++.a") +ADD_LIBRARY(gpr STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET gpr PROPERTY IMPORTED_LOCATION + "${GRPC_INSTALL_DIR}/lib/libgpr.a") + +ADD_LIBRARY(grpc_unsecure STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET grpc_unsecure PROPERTY IMPORTED_LOCATION + "${GRPC_INSTALL_DIR}/lib/libgrpc_unsecure.a") + +include_directories(${GRPC_INCLUDE_DIR}) +ADD_DEPENDENCIES(grpc++_unsecure extern_grpc) diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake index fc52d339d7a336b44c97f2e0a9fc8d6604854365..5d24caebdcc5a28823164d718fb1628be5c4179d 100644 --- a/cmake/external/mkldnn.cmake +++ b/cmake/external/mkldnn.cmake @@ -67,5 +67,5 @@ ADD_LIBRARY(mkldnn SHARED IMPORTED GLOBAL) SET_PROPERTY(TARGET mkldnn PROPERTY IMPORTED_LOCATION ${MKLDNN_LIB}) ADD_DEPENDENCIES(mkldnn ${MKLDNN_PROJECT}) MESSAGE(STATUS "MKLDNN library: ${MKLDNN_LIB}") -add_definitions(-DPADDLE_USE_MKLDNN) +add_definitions(-DPADDLE_WITH_MKLDNN) LIST(APPEND external_project_dependencies mkldnn) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 4c4f59656dae68739f2f07f3febd510e727fe2dd..97857a686b38d935b19f510ecdcb66bcca91fe03 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -114,11 +114,7 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) # linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas) SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c) FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";") -IF("${CBLAS_PROVIDER}" STREQUAL "MKLML") - ADD_LIBRARY(cblas SHARED ${dummyfile}) -ELSE() - ADD_LIBRARY(cblas STATIC ${dummyfile}) -ENDIF() +ADD_LIBRARY(cblas STATIC ${dummyfile}) TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) IF(NOT ${CBLAS_FOUND}) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index be7f6a9465970711170bd15dcecaadeaa8a55f86..fab2af362bb070a54987b6499748056f3d12a56b 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -15,7 +15,18 @@ INCLUDE(ExternalProject) # Always invoke `FIND_PACKAGE(Protobuf)` for importing function protobuf_generate_cpp FIND_PACKAGE(Protobuf QUIET) -SET(PROTOBUF_FOUND "OFF") +macro(UNSET_VAR VAR_NAME) + UNSET(${VAR_NAME} CACHE) + UNSET(${VAR_NAME}) +endmacro() +UNSET_VAR(PROTOBUF_INCLUDE_DIR) +UNSET_VAR(PROTOBUF_FOUND) +UNSET_VAR(PROTOBUF_PROTOC_EXECUTABLE) +UNSET_VAR(PROTOBUF_PROTOC_LIBRARY) +UNSET_VAR(PROTOBUF_LITE_LIBRARY) +UNSET_VAR(PROTOBUF_LIBRARY) +UNSET_VAR(PROTOBUF_INCLUDE_DIR) +UNSET_VAR(Protobuf_PROTOC_EXECUTABLE) if(NOT COMMAND protobuf_generate_python) # before cmake 3.4, protobuf_genrerate_python is not defined. function(protobuf_generate_python SRCS) @@ -110,7 +121,6 @@ macro(PROMPT_PROTOBUF_LIB) # FIND_Protobuf.cmake uses `Protobuf_PROTOC_EXECUTABLE`. # make `protobuf_generate_cpp` happy. SET(Protobuf_PROTOC_EXECUTABLE ${PROTOBUF_PROTOC_EXECUTABLE}) - FOREACH(dep ${protobuf_DEPS}) ADD_DEPENDENCIES(protobuf ${dep}) ADD_DEPENDENCIES(protobuf_lite ${dep}) @@ -128,11 +138,11 @@ endmacro() set(PROTOBUF_ROOT "" CACHE PATH "Folder contains protobuf") if (NOT "${PROTOBUF_ROOT}" STREQUAL "") - find_path(PROTOBUF_INCLUDE_DIR google/protobuf/message.h PATHS ${PROTOBUF_ROOT}/include) - find_library(PROTOBUF_LIBRARY protobuf PATHS ${PROTOBUF_ROOT}/lib) - find_library(PROTOBUF_LITE_LIBRARY protobuf-lite PATHS ${PROTOBUF_ROOT}/lib) - find_library(PROTOBUF_PROTOC_LIBRARY protoc PATHS ${PROTOBUF_ROOT}/lib) - find_program(PROTOBUF_PROTOC_EXECUTABLE protoc PATHS ${PROTOBUF_ROOT}/bin) + find_path(PROTOBUF_INCLUDE_DIR google/protobuf/message.h PATHS ${PROTOBUF_ROOT}/include NO_DEFAULT_PATH) + find_library(PROTOBUF_LIBRARY protobuf PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH) + find_library(PROTOBUF_LITE_LIBRARY protobuf-lite PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH) + find_library(PROTOBUF_PROTOC_LIBRARY protoc PATHS ${PROTOBUF_ROOT}/lib NO_DEFAULT_PATH) + find_program(PROTOBUF_PROTOC_EXECUTABLE protoc PATHS ${PROTOBUF_ROOT}/bin NO_DEFAULT_PATH) if (PROTOBUF_INCLUDE_DIR AND PROTOBUF_LIBRARY AND PROTOBUF_LITE_LIBRARY AND PROTOBUF_PROTOC_LIBRARY AND PROTOBUF_PROTOC_EXECUTABLE) message(STATUS "Using custom protobuf library in ${PROTOBUF_ROOT}.") SET_PROTOBUF_VERSION() @@ -178,14 +188,26 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST) SET(OPTIONAL_CACHE_ARGS "-DZLIB_ROOT:STRING=${ZLIB_ROOT}") ENDIF() + SET(PROTOBUF_REPO "https://github.com/google/protobuf.git") + SET(PROTOBUF_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546") + IF(MOBILE_INFERENCE) + # The reason why the official version is not used is described in + # https://github.com/PaddlePaddle/Paddle/issues/6114 + SET(PROTOBUF_REPO "https://github.com/qingqing01/protobuf.git") + SET(PROTOBUF_TAG "v3.2.0") + IF(NOT BUILD_FOR_HOST) + SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} "-Dprotobuf_BUILD_PROTOC_BINARIES=OFF") + ENDIF() + ENDIF() + ExternalProject_Add( ${TARGET_NAME} ${EXTERNAL_PROJECT_LOG_ARGS} PREFIX ${PROTOBUF_SOURCES_DIR} UPDATE_COMMAND "" DEPENDS zlib - GIT_REPOSITORY "https://github.com/google/protobuf.git" - GIT_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546" + GIT_REPOSITORY ${PROTOBUF_REPO} + GIT_TAG ${PROTOBUF_TAG} CONFIGURE_COMMAND ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake ${OPTIONAL_ARGS} @@ -203,7 +225,11 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST) ) ENDFUNCTION() -SET(PROTOBUF_VERSION 3.1) +IF(NOT MOBILE_INFERENCE) + SET(PROTOBUF_VERSION 3.1) +ELSE() + SET(PROTOBUF_VERSION 3.2) +ENDIF() IF(CMAKE_CROSSCOMPILING) build_protobuf(protobuf_host TRUE) LIST(APPEND external_project_dependencies protobuf_host) diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index a98e069b7cd1654ddd5868560d0905eab6d9c692..1638cd8fdfc34575132462859e056a1907f0b2f1 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -50,6 +50,8 @@ ExternalProject_Add( ) LIST(APPEND external_project_dependencies zlib) +ADD_LIBRARY(zlib_target STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET zlib_target PROPERTY IMPORTED_LOCATION ${ZLIB_LIBRARIES}) IF(WITH_C_API) INSTALL(DIRECTORY ${ZLIB_INCLUDE_DIR} DESTINATION third_party/zlib) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 2b125cef6aa8d1021afe8a7a0d232d84d36be4bc..1120677a37e0d44163816b66600121c8f0d545af 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -111,6 +111,8 @@ set(COMMON_FLAGS -Wno-error=sign-compare -Wno-error=unused-local-typedefs -Wno-error=parentheses-equality # Warnings in pybind11 + -Wno-error=ignored-attributes # Warnings in Eigen, gcc 6.3 + -Wno-error=terminate # Warning in PADDLE_ENFORCE ) set(GPU_COMMON_FLAGS diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 7b82d409a3b64a5fc8fdfe526a2e82a4e1c9fa8e..66c8e3ad7ef7c80c1f388c25983425a0db5c0220 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -227,8 +227,8 @@ function(cc_test TARGET_NAME) set(multiValueArgs SRCS DEPS) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_executable(${TARGET_NAME} ${cc_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} gtest gtest_main) - add_dependencies(${TARGET_NAME} ${cc_test_DEPS} gtest gtest_main) + target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) + add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) endif() endfunction(cc_test) @@ -288,8 +288,8 @@ function(nv_test TARGET_NAME) set(multiValueArgs SRCS DEPS) cmake_parse_arguments(nv_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cuda_add_executable(${TARGET_NAME} ${nv_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} gtest gtest_main) - add_dependencies(${TARGET_NAME} ${nv_test_DEPS} gtest gtest_main) + target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) + add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) add_test(${TARGET_NAME} ${TARGET_NAME}) endif() endfunction(nv_test) @@ -467,3 +467,50 @@ function(py_test TARGET_NAME) WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) endif() endfunction() + +# grpc_library generate grpc code using grpc_cpp_plugin and protoc +# then build the generated protobuf code and grpc code with your +# implementation source codes together. Use SRCS argument for your +# implementation source files and PROTO argument for your .proto +# files. +# +# Usage: grpc_library(my_target SRCS my_client.cc PROTO my_target.proto DEPS my_dep) + +function(grpc_library TARGET_NAME) + set(oneValueArgs PROTO) + set(multiValueArgs SRCS DEPS) + set(options "") + cmake_parse_arguments(grpc_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + message(STATUS "generating grpc ${grpc_library_PROTO}") + + get_filename_component(ABS_PROTO ${grpc_library_PROTO} ABSOLUTE) + get_filename_component(PROTO_WE ${grpc_library_PROTO} NAME_WE) + get_filename_component(PROTO_PATH ${ABS_PROTO} PATH) + + protobuf_generate_cpp(grpc_proto_srcs grpc_proto_hdrs "${ABS_PROTO}") + set(grpc_grpc_srcs "${CMAKE_CURRENT_BINARY_DIR}/${PROTO_WE}.grpc.pb.cc") + set(grpc_grpc_hdrs "${CMAKE_CURRENT_BINARY_DIR}/${PROTO_WE}.grpc.pb.h") + cc_library("${TARGET_NAME}_proto" SRCS "${grpc_proto_srcs}") + + add_custom_command( + OUTPUT "${grpc_grpc_srcs}" "${grpc_grpc_hdrs}" + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} + ARGS --grpc_out "${CMAKE_CURRENT_BINARY_DIR}" -I "${PROTO_PATH}" + --plugin=protoc-gen-grpc="${GRPC_CPP_PLUGIN}" "${ABS_PROTO}" + DEPENDS "${ABS_PROTO}" ${PROTOBUF_PROTOC_EXECUTABLE} extern_grpc) + + # FIXME(typhoonzero): grpc generated code do not generate virtual-dtor, mark it + # as compiler warnings instead of error. Should try remove the warnings also. + set_source_files_properties( + ${grpc_grpc_srcs} + PROPERTIES + COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") + cc_library("${TARGET_NAME}_grpc" SRCS "${grpc_grpc_srcs}") + + set_source_files_properties( + ${grpc_library_SRCS} + PROPERTIES + COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") + cc_library("${TARGET_NAME}" SRCS "${grpc_library_SRCS}" DEPS "${TARGET_NAME}_grpc" "${TARGET_NAME}_proto" "${grpc_library_DEPS}") +endfunction() diff --git a/doc/api/index_en.rst b/doc/api/index_en.rst index 25c1dd00b9cbb3ab647e04cdc2b4c27c552a2332..e6f632e1a5b9c4b50b7c6aa96a120030bd6ce338 100644 --- a/doc/api/index_en.rst +++ b/doc/api/index_en.rst @@ -7,3 +7,4 @@ API v2/model_configs.rst v2/data.rst v2/run_logic.rst + v2/fluid.rst diff --git a/doc/api/v2/config/activation.rst b/doc/api/v2/config/activation.rst index eca3ce03bcdc599edca802d8dfca48d4f28275a2..5317e66b64bbd85c61f19700a9d2c1d239dee573 100644 --- a/doc/api/v2/config/activation.rst +++ b/doc/api/v2/config/activation.rst @@ -99,3 +99,10 @@ STanh .. automodule:: paddle.v2.activation :members: STanh :noindex: + +SoftSign +======== + +.. automodule:: paddle.v2.activation + :members: SoftSign + :noindex: diff --git a/doc/api/v2/config/layer.rst b/doc/api/v2/config/layer.rst index d4d182f6692e09b3e40f3620b77d9a0f20ec5af3..c3f9c18d0663a7a24880b441981875c1e4f015aa 100644 --- a/doc/api/v2/config/layer.rst +++ b/doc/api/v2/config/layer.rst @@ -54,7 +54,7 @@ img_conv .. _api_v2.layer_context_projection: -context_projection +context_projection ------------------ .. autoclass:: paddle.v2.layer.context_projection :noindex: @@ -70,7 +70,7 @@ Image Pooling Layer img_pool -------- .. autoclass:: paddle.v2.layer.img_pool - :noindex: + :noindex: spp --- @@ -104,7 +104,7 @@ sum_to_one_norm --------------- .. autoclass:: paddle.v2.layer.sum_to_one_norm :noindex: - + cross_channel_norm ------------------ .. autoclass:: paddle.v2.layer.cross_channel_norm @@ -114,7 +114,7 @@ row_l2_norm ----------- .. autoclass:: paddle.v2.layer.row_l2_norm :noindex: - + Recurrent Layers ================ @@ -415,6 +415,13 @@ multiplex .. autoclass:: paddle.v2.layer.multiplex :noindex: +Factorization Machine Layer +============================ + +factorization_machine +--------------------- +.. autoclass:: paddle.v2.layer.factorization_machine + :noindex: Slicing and Joining Layers ========================== diff --git a/doc/api/v2/fluid.rst b/doc/api/v2/fluid.rst new file mode 100644 index 0000000000000000000000000000000000000000..43fc19dc492bbc119f2356034b81c65e443db2fa --- /dev/null +++ b/doc/api/v2/fluid.rst @@ -0,0 +1,18 @@ +====================== +Fluid +====================== + +.. toctree:: + :maxdepth: 1 + + fluid/layers.rst + fluid/data_feeder.rst + fluid/executor.rst + fluid/initializer.rst + fluid/evaluator.rst + fluid/nets.rst + fluid/optimizer.rst + fluid/param_attr.rst + fluid/profiler.rst + fluid/regularizer.rst + diff --git a/doc/api/v2/fluid/data_feeder.rst b/doc/api/v2/fluid/data_feeder.rst new file mode 100644 index 0000000000000000000000000000000000000000..0fa78f7dfb04c13be7eb83b7fd35cb03f2f4a7fa --- /dev/null +++ b/doc/api/v2/fluid/data_feeder.rst @@ -0,0 +1,9 @@ +=========== +DataFeeder +=========== + +DataFeeder +----------- +.. automodule:: paddle.v2.fluid.data_feeder + :members: DataFeeder + :noindex: diff --git a/doc/api/v2/fluid/evaluator.rst b/doc/api/v2/fluid/evaluator.rst new file mode 100644 index 0000000000000000000000000000000000000000..a23f3301d0331e0ea3733f06444515eb4680cd31 --- /dev/null +++ b/doc/api/v2/fluid/evaluator.rst @@ -0,0 +1,9 @@ +=========== +Evaluator +=========== + +Evaluator +----------- +.. automodule:: paddle.v2.fluid.evaluator + :members: Evaluator + :noindex: diff --git a/doc/api/v2/fluid/executor.rst b/doc/api/v2/fluid/executor.rst new file mode 100644 index 0000000000000000000000000000000000000000..3a283538c120cfa1ef646c390bb71c6251c23675 --- /dev/null +++ b/doc/api/v2/fluid/executor.rst @@ -0,0 +1,9 @@ +=========== +Executor +=========== + +Executor +----------- +.. automodule:: paddle.v2.fluid.executor + :members: Executor + :noindex: diff --git a/doc/api/v2/fluid/initializer.rst b/doc/api/v2/fluid/initializer.rst new file mode 100644 index 0000000000000000000000000000000000000000..8f587837e9873370722062404f511654a9460587 --- /dev/null +++ b/doc/api/v2/fluid/initializer.rst @@ -0,0 +1,50 @@ +=========== +Initializer +=========== + + + +Initializer +----------- +.. automodule:: paddle.v2.fluid.initializer + :members: Initializer + :noindex: + + + +ConstantInitializer +------------------- +.. automodule:: paddle.v2.fluid.initializer + :members: ConstantInitializer + :noindex: + + + +UniformInitializer +------------------ +.. automodule:: paddle.v2.fluid.initializer + :members: UniformInitializer + :noindex: + + + +NormalInitializer +----------------- +.. automodule:: paddle.v2.fluid.initializer + :members: NormalInitializer + :noindex: + + +XavierInitializer +----------------- +.. automodule:: paddle.v2.fluid.initializer + :members: XavierInitializer + :noindex: + + +MSRAInitializer +--------------- +.. automodule:: paddle.v2.fluid.initializer + :members: MSRAInitializer + :noindex: + diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/v2/fluid/layers.rst new file mode 100644 index 0000000000000000000000000000000000000000..89e5fec13bf9062dc7a7187b1334c8f5486a980b --- /dev/null +++ b/doc/api/v2/fluid/layers.rst @@ -0,0 +1,302 @@ +========== +Layers +========== + + +fc +--- +.. autofunction:: paddle.v2.fluid.layers.fc + :noindex: + +embedding +--------- +.. autofunction:: paddle.v2.fluid.layers.embedding + :noindex: + +dynamic_lstm +------------ +.. autofunction:: paddle.v2.fluid.layers.dynamic_lstm + :noindex: + +data +--------- +.. autofunction:: paddle.v2.fluid.layers.data + :noindex: + +mean +--------- +.. autofunction:: paddle.v2.fluid.layers.mean + :noindex: + +mul +--------- +.. autofunction:: paddle.v2.fluid.layers.mul + :noindex: + +elementwise_add +--------------- +.. autofunction:: paddle.v2.fluid.layers.elementwise_add + :noindex: + +elementwise_div +--------------- +.. autofunction:: paddle.v2.fluid.layers.elementwise_div + :noindex: + + +dropout +--------- +.. autofunction:: paddle.v2.fluid.layers.dropout + :noindex: + + +reshape +--------- +.. autofunction:: paddle.v2.fluid.layers.reshape + :noindex: + + +sigmoid +--------- +.. autofunction:: paddle.v2.fluid.layers.sigmoid + :noindex: + + +scale +--------- +.. autofunction:: paddle.v2.fluid.layers.scale + :noindex: + + +reshape +--------- +.. autofunction:: paddle.v2.fluid.layers.reshape + :noindex: + + +transpose +--------- +.. autofunction:: paddle.v2.fluid.layers.transpose + :noindex: + + +sigmoid_cross_entropy_with_logits +--------- +.. autofunction:: paddle.v2.fluid.layers.esigmoid_cross_entropy_with_logits + :noindex: + + +cast +--------- +.. autofunction:: paddle.v2.fluid.layers.cast + :noindex: + + +concat +--------- +.. autofunction:: paddle.v2.fluid.layers.concat + :noindex: + + +sums +--------- +.. autofunction:: paddle.v2.fluid.layers.sums + :noindex: + + +linear_chain_crf +--------- +.. autofunction:: paddle.v2.fluid.layers.linear_chain_crf + :noindex: + + +assign +--------- +.. autofunction:: paddle.v2.fluid.layers.embedding + :noindex: + + +split_lod_tensor +--------- +.. autofunction:: paddle.v2.fluid.layers.split_lod_tensor + :noindex: + + +merge_lod_tensor +--------- +.. autofunction:: paddle.v2.fluid.layers.merge_lod_tensor + :noindex: + +cos_sim +--------- +.. autofunction:: paddle.v2.fluid.layers.cos_sim + :noindex: + + +cross_entropy +--------- +.. autofunction:: paddle.v2.fluid.layers.cross_entropy + :noindex: + + + +square_error_cost +--------- +.. autofunction:: paddle.v2.fluid.layers.square_error_cost + :noindex: + + +accuracy +--------- +.. autofunction:: paddle.v2.fluid.layers.accuracy + :noindex: + + +sequence_conv +--------- +.. autofunction:: paddle.v2.fluid.layers.sequence_conv + :noindex: + + +conv2d +--------- +.. autofunction:: paddle.v2.fluid.layers.conv2d + :noindex: + + +sequence_pool +--------- +.. autofunction:: paddle.v2.fluid.layers.sequence_pool + :noindex: + + +pool2d +--------- +.. autofunction:: paddle.v2.fluid.layers.pool2d + :noindex: + + +batch_norm +--------- +.. autofunction:: paddle.v2.fluid.layers.batch_norm + :noindex: + + +beam_search_decode +--------- +.. autofunction:: paddle.v2.fluid.layers.beam_search_decode + :noindex: + + +lstm +--------- +.. autofunction:: paddle.v2.fluid.layers.lstm + :noindex: + + +lod_rank_table +--------- +.. autofunction:: paddle.v2.fluid.layers.lod_rank_table + :noindex: + + +max_sequence_len +--------- +.. autofunction:: paddle.v2.fluid.layers.max_sequence_len + :noindex: + + +topk +--------- +.. autofunction:: paddle.v2.fluid.layers.topk + :noindex: + + +lod_tensor_to_array +--------- +.. autofunction:: paddle.v2.fluid.layers.lod_tensor_to_array + :noindex: + + + +array_to_lod_tensor +--------- +.. autofunction:: paddle.v2.fluid.layers.array_to_lod_tensor + :noindex: + + + + +fill_constant +--------- +.. autofunction:: paddle.v2.fluid.layers.fill_constant + :noindex: + + + +fill_constant_batch_size_like +--------- +.. autofunction:: paddle.v2.fluid.layers.fill_constant_batch_size_like + :noindex: + + +ones +--------- +.. autofunction:: paddle.v2.fluid.layers.ones + :noindex: + + +zeros +--------- +.. autofunction:: paddle.v2.fluid.layers.zeros + :noindex: + + +increment +--------- +.. autofunction:: paddle.v2.fluid.layers.increment + :noindex: + + +array_write +--------- +.. autofunction:: paddle.v2.fluid.layers.array_write + :noindex: + + + +create_array +--------- +.. autofunction:: paddle.v2.fluid.layers.create_array + :noindex: + + +less_than +--------- +.. autofunction:: paddle.v2.fluid.layers.less_than + :noindex: + + +array_read +--------- +.. autofunction:: paddle.v2.fluid.layers.array_read + :noindex: + + +shrink_memory +--------- +.. autofunction:: paddle.v2.fluid.layers.shrink_memory + :noindex: + + +array_length +--------- +.. autofunction:: paddle.v2.fluid.layers.array_length + :noindex: + + +conv2d_transpose +--------- +.. autofunction:: paddle.v2.fluid.layers.conv2d_transpose + :noindex: + diff --git a/doc/api/v2/fluid/nets.rst b/doc/api/v2/fluid/nets.rst new file mode 100644 index 0000000000000000000000000000000000000000..2c3d075422de29c96e25458e831133a30270dd39 --- /dev/null +++ b/doc/api/v2/fluid/nets.rst @@ -0,0 +1,22 @@ +=========== +Nets +=========== + +simple_img_conv_pool +----------- +.. autofunction:: paddle.v2.fluid.nets.simple_img_conv_pool + :noindex: + + +img_conv_group +----------- +.. autofunction:: paddle.v2.fluid.nets.img_conv_group + :noindex: + + +sequence_conv_pool +----------- +.. autofunction:: paddle.v2.fluid.nets.sequence_conv_pool + :noindex: + + diff --git a/doc/api/v2/fluid/optimizer.rst b/doc/api/v2/fluid/optimizer.rst new file mode 100644 index 0000000000000000000000000000000000000000..233762fcdfb39e592740adef6721a556fae3feef --- /dev/null +++ b/doc/api/v2/fluid/optimizer.rst @@ -0,0 +1,54 @@ +=========== +Optimizer +=========== + +Optimizer +----------- +.. automodule:: paddle.v2.fluid.optimizer + :members: Optimizer + :noindex: + + +SGDOptimizer +----------- +.. automodule:: paddle.v2.fluid.optimizer + :members: SGDOptimizer + :noindex: + + + +MomentumOptimizer +----------- +.. automodule:: paddle.v2.fluid.optimizer + :members: MomentumOptimizer + :noindex: + + + +AdagradOptimizer +----------- +.. automodule:: paddle.v2.fluid.optimizer + :members: AdagradOptimizer + :noindex: + + +AdamOptimizer +----------- +.. automodule:: paddle.v2.fluid.optimizer + :members: AdamOptimizer + :noindex: + + +AdamaxOptimizer +----------- +.. automodule:: paddle.v2.fluid.optimizer + :members: AdamaxOptimizer + :noindex: + + +DecayedAdagradOptimizer +----------- +.. automodule:: paddle.v2.fluid.optimizer + :members: DecayedAdagradOptimizer + :noindex: + diff --git a/doc/api/v2/fluid/param_attr.rst b/doc/api/v2/fluid/param_attr.rst new file mode 100644 index 0000000000000000000000000000000000000000..ca0c8af9e8c4f2271de7a131ad0d27c0e8635f50 --- /dev/null +++ b/doc/api/v2/fluid/param_attr.rst @@ -0,0 +1,11 @@ +=========== +ParamAttr +=========== + + + +ParamAttr +----------- +.. automodule:: paddle.v2.fluid.param_attr + :members: ParamAttr + :noindex: diff --git a/doc/api/v2/fluid/profiler.rst b/doc/api/v2/fluid/profiler.rst new file mode 100644 index 0000000000000000000000000000000000000000..7d4042d1f41c12c4a551ba6576559d612116872a --- /dev/null +++ b/doc/api/v2/fluid/profiler.rst @@ -0,0 +1,10 @@ +=========== +Profiler +=========== + + + +Profiler +----------- +.. autofunction:: paddle.v2.fluid.profiler.cuda_profiler + :noindex: diff --git a/doc/api/v2/fluid/regularizer.rst b/doc/api/v2/fluid/regularizer.rst new file mode 100644 index 0000000000000000000000000000000000000000..3af2b07d2ae55d99df705fbf1ad2402eee05c435 --- /dev/null +++ b/doc/api/v2/fluid/regularizer.rst @@ -0,0 +1,25 @@ +=========== +Regularizer +=========== + +WeightDecayRegularizer +----------- +.. automodule:: paddle.v2.fluid.regularizer + :members: WeightDecayRegularizer + :noindex: + + +L2DecayRegularizer +----------- +.. automodule:: paddle.v2.fluid.regularizer + :members: L2DecayRegularizer + :noindex: + + + +L1DecayRegularizer +----------- +.. automodule:: paddle.v2.fluid.regularizer + :members: L1DecayRegularizer + + diff --git a/doc/design/evaluator.md b/doc/design/evaluator.md index a62d75ffef14962aec8c7587e172d78dfe0cb4be..11cc129d56905a9ee666da92fbe6f8559c6d325a 100644 --- a/doc/design/evaluator.md +++ b/doc/design/evaluator.md @@ -1,22 +1,22 @@ ## Evaluator Design -### The Problem +### Problem Statement -During training or serving, we provide the evaluation function to measure the model performance, e.g., accuracy, precision. In the operator based framework design, the data go through the network pipeline batch by batch. As a result, inside the operator, we only can calculate one minibatch metrics. We need to provide a mechanism to calculate the metrics for each N pass/batch the user wanted. +During training or inference, we provide an evaluation function to measure the model performance, for example, accuracy, precision, etc. In the operator based framework design, the data passes through the network pipeline batch by batch. As a result, inside the operator, we only calculate the metrics for one minibatch. Thus, we need to provide a mechanism to calculate the metrics for each N pass/batch the user wants. ### Evaluator Design -Currently, every operation is expressed in the graph. we divide the evaluator process into three steps. +Currently, every operation is expressed in the graph. We divide the evaluator process into three steps. 1. Initialize the metric state and add it into the block. -2. Calculate the statistic of the metric state in every mini-batch. The single operator is only responsible for calculating necessary statistics for one mini-batch. For example, accuracy operator only calculate a minibatch data if run once. +2. Calculate the concerned metrics for every mini-batch. The single evaluator operator is only responsible for calculating the necessary statistics for one mini-batch. For example, the accuracy operator only calculates the accuracy for a minibatch data if run once. 3. Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. When it comes to distributed training/Multi-GPU training, aggregate the value from different devices. ### Implementation -This design is shown in python API. -Each metric operator need to caculate the metric statistic and return the batch aware states, Python side responsible for accumulate the states for each pass. +This design is shown in the Python API. +Each metric operator needs to caculate the metric statistic and return the batch-aware states. Python side is responsible for accumulating the states for each pass. ```python diff --git a/doc/design/float16.md b/doc/design/float16.md index 078801ba2ed969d26dd31d5ec4ed268686cf7016..1ea95ed6b5d6792171569b6ff76d09be92fcb13e 100644 --- a/doc/design/float16.md +++ b/doc/design/float16.md @@ -28,6 +28,51 @@ The goal of float16 is to serve as a key for the executor to find and run the co - [Eigen](https://github.com/RLovelett/eigen) >= 3.3 supports float16 calculation on both GPU and CPU using the `Eigen::half` class. It is mostly useful for Nvidia GPUs because of the overloaded arithmetic operators using cuda intrinsics. It falls back to using software emulation on CPU for calculation and there is no special treatment to ARM processors. - [ARM compute library](https://github.com/ARM-software/ComputeLibrary) >= 17.02.01 supports NEON FP16 kernels (requires ARMv8.2-A CPU). +### CUDA version issue +There are currently three versions of CUDA that supports `__half` data type, namely, CUDA 7.5, 8.0, and 9.0. +CUDA 7.5 and 8.0 define `__half` as a simple struct that has a `uint16_t` data (see [`cuda_fp16.h`](https://github.com/ptillet/isaac/blob/9212ab5a3ddbe48f30ef373f9c1fb546804c7a8c/include/isaac/external/CUDA/cuda_fp16.h)) as follows: +``` +typedef struct __align__(2) { + unsigned short x; +} __half; + +typedef __half half; +``` +This struct does not define any overloaded arithmetic operators. So you have to directly use `__hadd` instead of `+` to correctly add two half types: +``` +__global__ void Add() { + half a, b, c; + c = __hadd(a, b); // correct + c = a + b; // compiler error: no operator "+" matches these operands +} +``` +CUDA 9.0 provides a major update to the half data type. The related code can be found in the updated [`cuda_fp16.h`](https://github.com/ptillet/isaac/blob/master/include/isaac/external/CUDA/cuda_fp16.h) and the newly added [`cuda_fp16.hpp`](https://github.com/ptillet/isaac/blob/master/include/isaac/external/CUDA/cuda_fp16.hpp). + +Essentially, CUDA 9.0 renames the original `__half` type in 7.5 and 8.0 as `__half_raw`, and defines a new `__half` class type that has constructors, conversion operators, and also provides overloaded arithmetic operators such as follows: +``` +typedef struct __CUDA_ALIGN__(2) { + unsigned short x; +} __half_raw; + + +struct __CUDA_ALIGN__(2) __half { +protected: + unsigned short __x; +public: + // constructors and conversion operators from/to + // __half_raw and other built-in data types +} + +typedef __half half; + +__device__ __forceinline__ +__half operator+(const __half &lh, const __half &rh) { + return __hadd(lh, rh); +} + +// Other overloaded operators +``` +This new design makes `c = a + b` work correctly for CUDA half data type. ## Implementation The float16 class holds a 16-bit `uint16_t` data internally. diff --git a/doc/design/fluid-compiler.graffle b/doc/design/fluid-compiler.graffle new file mode 100644 index 0000000000000000000000000000000000000000..c933df2cb855462c52b2d25f7f9a99b95652961d Binary files /dev/null and b/doc/design/fluid-compiler.graffle differ diff --git a/doc/design/fluid-compiler.png b/doc/design/fluid-compiler.png new file mode 100644 index 0000000000000000000000000000000000000000..1b0ffed2039c91a3a00bbb719da08c91c3acf7bb Binary files /dev/null and b/doc/design/fluid-compiler.png differ diff --git a/doc/design/fluid.md b/doc/design/fluid.md new file mode 100644 index 0000000000000000000000000000000000000000..585dc8ef39c0cfb30f470d79f7b27a59ceb5e940 --- /dev/null +++ b/doc/design/fluid.md @@ -0,0 +1,122 @@ +# Design Doc: PaddlePaddle Fluid + +## Why Fluid + +When Baidu developed PaddlePaddle in 2013, the only well-known open source deep learning system at the time was Caffe. However, when PaddlePaddle was open-sourced in 2016, many other choices were available. There was a challenge -- what is the need for open sourcing yet another deep learning framework? + +Fluid is the answer. Fluid is similar to PyTorch and TensorFlow Eager Execution, which describes the "process" of training or inference using the concept of a model. In fact in PyTorch, TensorFlow Eager Execution and Fluid, there is no concept of a model at all. The details are covered in the sections below. Fluid is currently more extreme in the above mentioned idea than PyTorch and Eager Execution, and we are trying to push Fluid towards the directions of a compiler and a new programming language for deep learning. + +## The Evolution of Deep Learning Systems + +Deep learning infrastructure is one of the fastest evolving technologies. Within four years, there have already been three generations of technologies invented. + +| Existed since | model as sequence of layers | model as graph of operators | No model | +|--|--|--|--| +| 2013 | Caffe, Theano, Torch, PaddlePaddle | | | +| 2015 | | TensorFlow, MxNet, Caffe2, ONNX, n-graph | | +| 2016 | | | PyTorch, TensorFlow Eager Execution, PaddlePaddle Fluid | + +From the above table, we see that the deep learning technology is evolving towards getting rid of the concept of a model. To understand the reasons behind this direction, a comparison of the *programming paradigms* or the ways to program deep learning applications using these systems, would be helpful. The following section goes over these. + +## Deep Learning Programming Paradigms + +With the systems listed as the first or second generation, e.g., Caffe or TensorFlow, an AI application training program looks like the following: + +```python +x = layer.data("image") +l = layer.data("label") +f = layer.fc(x, W) +s = layer.softmax(f) +c = layer.mse(l, s) + +for i in xrange(1000): # train for 1000 iterations + m = read_minibatch() + forward({input=x, data=m}, minimize=c) + backward(...) + +print W # print the trained model parameters. +``` + +The above program includes two parts: + +1. The first part describes the model, and +2. The second part describes the training process (or inference process) for the model. + +This paradigm has a well-known problem that limits the productivity of programmers. If the programmer made a mistake in configuring the model, the error messages wouldn't show up until the second part is executed and `forward` and `backward` propagations are performed. This makes it difficult for the programmer to debug and locate a mistake that is located blocks away from the actual error prompt. + +This problem of being hard to debug and re-iterate fast on a program is the primary reason that programmers, in general, prefer PyTorch over the older systems. Using PyTorch, we would write the above program as following: + +```python +W = tensor(...) + +for i in xrange(1000): # train for 1000 iterations + m = read_minibatch() + x = m["image"] + l = m["label"] + f = layer.fc(x, W) + s = layer.softmax(f) + c = layer.mse(l, s) + backward() + +print W # print the trained model parameters. +``` + +We can see that the main difference is the moving the model configuration part (the first step) into the training loop. This change would allow the mistakes in model configuration to be reported where they actually appear in the programming block. This change also represents the model better, or its forward pass, by keeping the configuration process in the training loop. + +## Describe Arbitrary Models for the Future + +Describing the process instead of the model also brings Fluid, the flexibility to define different non-standard models that haven't been invented yet. + +As we write out the program for the process, we can write an RNN as a loop, instead of an RNN as a layer or as an operator. A PyTorch example would look like the following: + +```python +for i in xrange(1000): + m = read_minibatch() + x = m["sentence"] + for t in xrange x.len(): + h[t] = the_step(x[t]) +``` + +With Fluid, the training loop and the RNN in the above program are not really Python loops, but just a "loop structure" provided by Fluid and implemented in C++ as the following: + +```python +train_loop = layers.While(cond) +with train_loop.block(): + m = read_minibatch() + x = m["sentence"] + rnn = layers.While(...) + with rnn.block(): + h[t] = the_step(input[t]) +``` + +An actual Fluid example is described [here](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/python/paddle/v2/fluid/tests/test_while_op.py#L36-L44). + +From the example, the Fluid programs look very similar to their PyTorch equivalent programs, except that Fluid's loop structure, wrapped with Python's `with` statement, could run much faster than just a Python loop. + +We have more examples of the [`if-then-else`](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/if_else_op.md) structure of Fluid. + +## Turing Completeness + +In computability theory, a system of data-manipulation rules, such as a programming language, is said to be Turing complete if it can be used to simulate any Turing machine. For a programming language, if it provides if-then-else and loop, it is Turing complete. From the above examples, Fluid seems to be Turing complete; however, it is noteworthy to notice that there is a slight difference between the `if-then-else` of Fluid and that of a programming language. The difference being that the former runs both of its branches and splits the input mini-batch into two -- one for the True condition and another for the False condition. This hasn't been researched in depth if this is equivalent to the `if-then-else` in programming languages that makes them Turing-complete. Based on a conversation with [Yuang Yu](https://research.google.com/pubs/104812.html), it seems to be the case but this needs to be looked into in-depth. + +## The Execution of a Fluid Program + +There are two ways to execute a Fluid program. When a program is executed, it creates a protobuf message [`ProgramDesc`](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/paddle/framework/framework.proto#L145) that describes the process and is conceptually like an [abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree). + +There is a C++ class [`Executor`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.h), which runs a `ProgramDesc`, similar to how an interpreter runs a Python program. + +Fluid is moving towards the direction of a compiler, which is explain in more detail later in this article. + +## Backward Compatibility of Fluid + +Given all the advantages from the removal of the concept of a *model*, hardware manufacturers might still prefer the existence of the concept of a model, so it would be easier for them to support multiple frameworks all at once and could run a trained model during inference. For example, Nervana, a startup company acquired by Intel, has been working on an XPU that reads the models in the format known as [n-graph](https://github.com/NervanaSystems/ngraph). Similarly, [Movidius](https://www.movidius.com/) is producing a mobile deep learning chip that reads and runs graphs of operators. The well-known [ONNX](https://github.com/onnx/onnx) is also a file format of graphs of operators. + +For Fluid, we can write a converter that extracts the parts in the `ProgramDesc` protobuf message, converts them into a graph of operators, and exports the graph into the ONNX or n-graph format. + +## Towards a Deep Learning Language and the Compiler + +We can change the `if-then-else` and loop structure a little bit in the above Fluid example programs, to make it into a new programming language, different than Python. + +Even if we do not invent a new language, as long as we get the `ProgramDesc` message filled in, we can write a transpiler, which translates each invocation to an operator, into a C++ call to a kernel function of that operator. For example, a transpiler that weaves the CUDA kernels outputs an NVIDIA-friendly C++ program, which can be built using `nvcc`. Another transpiler could generate MKL-friendly code that should be built using `icc` from Intel. More interestingly, we can translate a Fluid program into its distributed version of two `ProgramDesc` messages, one for running on the trainer process, and the other one for the parameter server. For more details of the last example, the [concurrent programming design](concurrent_programming.md) document would be a good pointer. The following figure explains the proposed two-stage process: + +![](fluid-compiler.png) diff --git a/doc/design/mkldnn/README.MD b/doc/design/mkldnn/README.MD index ec6d4681836e189f46dbb9b915a237dc15cda7cf..61d453de243c25defc56161641bc4a888a88a3b7 100644 --- a/doc/design/mkldnn/README.MD +++ b/doc/design/mkldnn/README.MD @@ -1,72 +1,164 @@ # Intel® MKL-DNN on PaddlePaddle: Design Doc -我们计划将Intel深度神经网络数学库(**MKL-DNN**\[[1](#references)\])集成到PaddlePaddle,充分展现英特尔平台的优势,有效提升PaddlePaddle在英特尔架构上的性能。 +我们计划将英特尔深度神经网络数学库[Intel MKL-DNN](https://github.com/01org/mkl-dnn) +(Intel Math Kernel Library for Deep Neural Networks)集成到PaddlePaddle, +充分展现英特尔平台的优势,有效提升PaddlePaddle在英特尔架构上的性能。 -我们短期内的基本目标是: +
+
+Figure 1. PaddlePaddle on IA +
+ +近期目标 -- 完成常用layer的MKL-DNN实现。 +- 完成常用Layer的MKL-DNN实现。 - 完成常见深度神经网络VGG,GoogLeNet 和 ResNet的MKL-DNN实现。 +目前的优化,主要针对PaddlePaddle在重构之前的代码框架以及V1的API。 +具体的完成状态可以参见[这里](https://github.com/PaddlePaddle/Paddle/projects/21)。 ## Contents - [Overview](#overview) - [Actions](#actions) - [CMake](#cmake) + - [Matrix](#matrix) - [Layers](#layers) - [Activations](#activations) - - [Weights](#weights) + - [Parameters](#parameters) + - [Gradients](#gradients) - [Unit Tests](#unit-tests) - - [Protobuf Messages](#protobuf-messages) - [Python API](#python-api) - - [Demos](#demos) - [Benchmarking](#benchmarking) - [Others](#others) - [Design Concerns](#design-concerns) ## Overview -我们会把MKL-DNN作为第三方库集成进PaddlePaddle,整体框架图 +我们会把MKL-DNN会作为第三方库集成进PaddlePaddle,与其他第三方库一样,会在编译PaddlePaddle的时候下载并编译MKL-DNN。 + +同时,为了进一步提升PaddlePaddle在基本数学运算的计算速度,我们也将MKLML即(MKL small library\[[1](#references)\]) +作为另一个第三方库集成进PaddlePaddle,它只会包括生成好的动态库和头文件。 + +MKL,MKLML以及MKL-DNN三者关系如下表: + +| Name | Open Source | License | Descriptions | +| :---------- | :--------------- | :---------- | :------------ | +| MKL | No | Proprietary | Accelerate math processing routines | +| MKLML | No | Proprietary | Small package of MKL, especially for Machine Learning | +| MKL-DNN | Yes | Apache 2.0 | Accelerate primitives processing routines especially for Deep Neural Networks | + +MKLML可以与MKL-DNN共同使用,以此达到最好的性能。 +
-
-Figure 1. PaddlePaddle on IA. +
+Figure 2. PaddlePaddle with MKL Engines
## Actions -我们把集成方案大致分为了如下几个方面。 + +添加的相关文件和目录结构如下: + +```txt +PaddlePaddle/Paddle +├── ... +├── cmake/ +│ ├── external/ +│ │ ├── ... +│ │ ├── mkldnn.cmake +│ │ └── mklml.cmake +└── paddle/ + ├── ... + ├── math/ + │ ├── ... + │ └── MKLDNNMatrix.* + └── gserver/ + ├── ... + ├── layers/ + │ ├── ... + │ └── MKLDNN*Layer.* + ├── activations/ + │ ├── ... + │ └── MKLDNNActivations.* + └── tests/ + ├── ... + ├── MKLDNNTester.* + └── test_MKLDNN.cpp +``` ### CMake -我们会在`CMakeLists.txt`中会给用户添加一个`WITH_MKL`的开关,他是负责`WITH_MKLML`和`WITH_MKLDNN`的总开关。 +在`CMakeLists.txt`中提供一个与MKL有关的总开关:`WITH_MKL`,它负责决定编译时是否使用MKLML和MKL-DNN -当打开`WITH_MKL`时,会开启MKLML的功能,作为PaddlePaddle的CBLAS和LAPACK库,同时会开启Intel OpenMP用于提高MKLML的性能。 如果系统支持AVX2指令集及以上,同时会开启MKL-DNN功能。 +- `WITH_MKLML` 控制是否使用MKLML库。 +当打开`WITH_MKL`时,会自动使用MKLML库作为PaddlePaddle的CBLAS和LAPACK库,同时会开启Intel OpenMP用于提高MKLML的性能。 +编译时会把对应的头文件和库放在`build/third_party/install/mklml/*`目录下对应的地方。 +MKLML的库目前都是动态库,主要包括`libiomp5.so`和`libmklml_intel.so`。 +- `WITH_MKLDNN` 控制是否使用MKL-DNN。 +当开启`WITH_MKL`时,会自动根据硬件配置[[2](#references)]选择是否编译MKL-DNN。 +编译时会把对应的头文件和库放在`build/third_party/install/mkldnn/*`目录下对应的地方。 +MKL-DNN的库目前只有动态库`libmkldnn.so`。 -当关闭`WITH_MKL`时,MKLML和MKL-DNN功能会同时关闭。 +### Matrix +目前在PaddlePaddle中数据都是以`NCHW`的格式存储,但是在MKL-DNN中的排列方式不止这一种。 +所以我们定义了一个`MKLDNNMatrix`用于管理MKL-DNN数据的不同格式以及相互之间的转换。 -所以,我们会在`cmake/external`目录新建`mkldnn.cmake`和`mklml.cmake`文件,它们会在编译PaddlePaddle的时候下载对应的软件包,并放到PaddlePaddle的third party目录中。 +
+
+Figure 3. MKLDNNMatrix +
### Layers -所有MKL-DNN相关的C++ layers,都会按照PaddlePaddle的目录结构存放在 -`paddle/gserver/layers`中,并且文件名都会一以*MKLDNN*开头。 +所有MKL-DNN的Layers都会继承于`MKLDNNLayer`,该类继承于PaddlePaddle的基类`Layer`。 +在`MKLDNNLayer`中会提供一些必要的接口和函数,并且会写好`forward`和`backward`的基本逻辑, +子类只需要使用定义好的接口,实现具体的函数功能即可。 + +
+
+Figure 4. MKLDNNLayer +
+ +每个MKLDNNLayer都包含用于内部存储和外部存储的一系列MKLDNNMatrix: -所有MKL-DNN的layers都会继承于一个叫做`MKLDNNLayer`的父类,该父类继承于PaddlePaddle的基类`Layer`。 +- 内部存储(internel memory):`inVal_`,`inGrad_`,`outVal_`和`outGrad_`,分别代表输入数据,输入梯度,输出数据和输出梯度。 +- 外部存储(external memory):都是以ext开头,比如`extInVal_`和`extInGrad_`,它们主要是用于, +当数据格式与PaddlePaddle默认的`NCHW`格式不匹配时,转换内存的工作。 +需要注意的是,PaddlePaddle的activation会直接使用`output_.value`和`output_.grad`, +所以`extOutVal_`和`extOutGrad_`必须分别与`output_.value`和`output_.grad`共享内存, +如果不需要外部存储用于转换,那么对应的内部存储也会与它们共享内存。 +- 转换函数(resetXXX): 包括`resetInValue`,`resetInGrad`,`resetOutValue`和`resetOutGrad`, +表示对输入数据,输入梯度,输出数据和输出梯度的转换。 +这些函数会根据输入参数重新设置内部和外部存储,当然这两者也可以相等,即表示不需要转换。 -在`MKLDNNLayer`中会提供一些必要的接口和函数,并且会写好`forward`和`backward`的基本逻辑。部分函数定义为纯虚函数,子类只需要实现这些函数即可。 +注意:每个`MKLDNNlayer`的子类只需要使用内部存储就可以了,所有外部的转换工作都会在reset系列函数中都准备好。 ### Activations -由于在PaddlePaddle中,激活函数是独立于layer概念的,所以会在`paddle/gserver/activations`目录下添加`MKLDNNActivation.h`和`MKLDNNActivation.cpp`文件用于定义和使用MKL-DNN的接口。 +在重构前的PaddlePaddle中,激活函数是独立于`Layer`的概念,并且输入输出都是共用一块内存, +所以添加了对应的`MKLDNNActivation`来实现,方式类似于`MKLDNNLayer`。 + +### Parameters +对于有参数的层,我们会保证`MKLDNNLayer`使用的参数与PaddlePaddle申请的buffer共用一块内存。 +如果存在数据排列格式不一样的情况时,我们会在网络训练之前把格式转换为MKL-DNN希望的格式, +在训练结束的时候再保存为PaddlePaddle的格式,但是整个训练过程中不需要任何转换。 +这样既使得最终保存的参数格式与PaddlePaddle一致,又可以避免不必要的转换。 + +### Gradients +由于MKL-DNN的操作都是直接覆盖的形式,也就是说输出的结果不会在原来的数据上累加, +这样带来的好处就是不需要一直清空memory,节省了不必要的操作。 +但是注意的是,当网络出现分支且在`backward`的时候,需要累加不同Layer传过来的梯度。 +所以在`MKLDNNlayer`中实现了一个merge的方法,此时每个小分支的`Input Gradient` +会先临时保存在`MKLDNNMatrix`中,由分支处的Layer负责求和,并把结果放到当前层的`output_.grad`中。 +所以整体上,在实现每个子类的时候就不需要关心分支的事情了。 -### Weights -由于有些layer是含有参数的,我们会尽量让MKL-DNN的参数与PaddlePaddle中`parameter`共享一块内存。 -同时,由于MKL-DNN在训练时使用的参数layout可能与PaddlePaddle默认的`nchw`不一致,我们会在网络训练的开始和结束时分别转换这个layout,使得最终保存的参数格式与PaddlePaddle一致。 +
+
+Figure 5. Merge Gradients +
### Unit Tests -会在`paddle/gserver/test`目录下添加`test_MKLDNN.cpp`和`MKLDNNTester.*`用于MKL-DNN的测试。 -测试分为每个layer(或activation)的单元测试和简单网络的整体测试。 +我们会添加`test_MKLDNN.cpp`和`MKLDNNTester.*`用于MKL-DNN的测试。 +测试分为每个Layer(或Activation)的单元测试和简单网络的整体测试。 每个测试会对比PaddlePaddle中CPU算出的结果与MKL-DNN的结果,小于某个比较小的阈值认为通过。 -### Protobuf Messages -根据具体layer的需求可能会在`proto/ModelConfig.proto`里面添加必要的选项。 - ### Python API 目前只考虑**v1 API**。 @@ -80,41 +172,40 @@ if use_mkldnn self.layer_type = mkldnn_* ``` -所有MKL-DNN的layer type会以*mkldnn_*开头,以示区分。 - -并且可能在`python/paddle/trainer_config_helper`目录下的`activations.py `和`layers.py`里面添加必要的MKL-DNN的接口。 +所有MKL-DNN的`layer_type`会以*mkldnn_*开头,这些会在`MKLDNN*Layer`注册layer的时候保证,以示区分。 -### Demos - -会在`v1_api_demo`目录下添加一个`mkldnn`的文件夹,里面放入一些用于MKL-DNN测试的demo脚本。 +同时,会在`paddle/utils.Flags`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。 ### Benchmarking -会添加`benchmark/paddle/image/run_mkldnn.sh`,用于测试使用MKL-DNN之后的性能。 +会添加相应的脚本在[这里](https://github.com/PaddlePaddle/Paddle/tree/develop/benchmark/paddle/image),用于测试和对比在使用MKL-DNN前后的CNN网络性能。 +测试的性能对比结果会在[IntelOptimizedPaddle.md](https://github.com/PaddlePaddle/Paddle/blob/develop/benchmark/IntelOptimizedPaddle.md) ### Others -1. 如果在使用MKL-DNN的情况下,会把CPU的Buffer对齐为64。 +1. 如果在使用MKL-DNN的情况下,会把CPU的Buffer对齐为4096,具体可以参考MKL-DNN中的[memory](https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp#L673)。 2. 深入PaddlePaddle,寻找有没有其他可以优化的可能,进一步优化。比如可能会用OpenMP改进SGD的更新性能。 ## Design Concerns -为了更好的符合PaddlePaddle的代码风格\[[2](#references)\],同时又尽可能少的牺牲MKL-DNN的性能\[[3](#references)\]。 +为了更好的符合PaddlePaddle的代码风格\[[3](#references)\],同时又尽可能少的牺牲MKL-DNN的性能\[[4](#references)\]。 我们总结出一些特别需要注意的点: -1. 使用**deviceId_**。为了尽可能少的在父类Layer中添加变量或者函数,我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2`为`MKLDNNLayer`特有的设备ID。 -2. 重写父类Layer的**init**函数,修改`deviceId_`为`-2`,代表这个layer是用于跑在MKL-DNN的环境下。 -3. 创建`MKLDNNMatrix`,同时继承`CpuMatrix`和`mkldnn::memory`。用于管理MKL-DNN会用到的相关memory函数、接口以及会用的到格式信息。 -4. 创建`MKLDNNBase`,定义一些除了layer和memory相关的类和函数。包括MKL-DNN会用到`MKLDNNStream`和`CPUEngine`,和未来可能还会用到`FPGAEngine`等。 -5. 每个`MKLDNNlayer`都会有`inVal_`,`inGrad_`,`outVal_`和`outGrad_`,分别代表input value, input gradient,output value和output gradient。他们会存放MKL-DNN用到的internal memory。同时还会定义以*ext*开头的`MKLDNNMatrix`(表示external的memory),主要是在格式与PaddlePaddle默认的`nchw`格式不匹配时,用于转换内存的工作。必要的转换函数也会在`MKLDNNLayer`中提前定义好,每个子类只需要调用定义好的reset buffer函数即可。 -6. 每个`MKLDNNlayer`的resetbuffer相关的函数(包括reset input、output的Value和grad),他们会根据输入参数reset internal和external的memory,当然这两者也可以相等,即表示不需要转换。只需要把握一个原则,每个`MKLDNNlayer`的子类,只需要使用internal的memory就可以了,所有external的转换工作在父类的reset函数中都提前准备好了。 -7. 一般来说,external的memory会尽量与PaddlePaddle中的`value`和`grad`共享内存。同时每个`MKLDNNLayer`中的external output value和gradient(也就是`extOutVal_`和`extOutGrad_`)必须分别与`output_.value`和`output_.grad`共享内存,因为PaddlePaddle的activation会直接使用`output_.value`和`output_.grad`。如果不需要external的buffer用于转换,那么internal的buffer也会与他们共享内存。 -8. 如果MKL-DNN layer的后面接有cpu device,那么就会使`output_.value`与`extOutVal_`共享内存,同时数据格式就是`nchw`,这样下一个cpu device就能拿到正确的数据。在有cpu device的时候,external的memory的格式始终是`nchw`或者`nc`。 -9. 由于MKL-DNN的输出操作都是覆盖data的,不是在原来的数据上累加,所以当网络出现分支时,在`backward`时会需要merge不同layer的梯度。`MKLDNNlayer`中会实现merge的方法,此时每个小分支的input gradient会先临时保存在一个`MKLDNNMatrix`中,由分支处的layer负责求和,并把结果放到这个layer的`output_.grad`中。所以整体上,每个子类并不会需要关心分支的事情,也是在父类都实现好了。 -10. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。 +1. 使用**deviceId_**。为了尽可能少的在父类Layer中添加变量或者函数, +我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2`为`MKLDNNLayer`特有的设备ID。 +2. 重写父类Layer的**init**函数,修改`deviceId_`为`-2`,代表这个layer是用于跑在MKL-DNN的环境下。 +3. 创建`MKLDNNBase`,定义一些除了layer和memory相关的类和函数。 +包括MKL-DNN会用到`MKLDNNStream`和`CPUEngine`,和未来可能还会用到`FPGAEngine`等。 +4. 如果MKL-DNN layer的后面接有cpu device,那么就会使`output_.value`与`extOutVal_`共享内存, +同时数据格式就是`NCHW`,这样下一个cpu device就能拿到正确的数据。 +在有普通的CPU layer时, `extOutVal_`和`extOutGrad_`的格式始终是`NCHW`或者`NC`。 ## References - -1. [Intel Math Kernel Library for Deep Neural Networks (Intel MKL-DNN)](https://github.com/01org/mkl-dnn "Intel MKL-DNN") -2. [原来的方案](https://github.com/PaddlePaddle/Paddle/pull/3096)会引入**nextLayer**的信息。但是在PaddlePaddle中,无论是重构前的layer还是重构后的op,都不会想要知道next layer/op的信息。 -3. MKL-DNN的高性能格式与PaddlePaddle原有的`NCHW`不同(PaddlePaddle中的CUDNN部分使用的也是`NCHW`,所以不存在这个问题),所以需要引入一个转换方法,并且只需要在必要的时候转换这种格式,才能更好的发挥MKL-DNN的性能。 +1. [MKL small library](https://github.com/01org/mkl-dnn#linking-your-application)是[Intel MKL](https://software.intel.com/en-us/mkl)的一个子集。 +主要包括了深度学习相关的数学原语与操作,一般由MKL-DNN在发布[新版本](https://github.com/01org/mkl-dnn/releases)时一起更新。 +2. [MKL-DNN System Requirements](https://github.com/01org/mkl-dnn#system-requirements)。 +目前在PaddlePaddle中,仅会在支持AVX2指令集及以上的机器才使用MKL-DNN。 +3. [原来的方案](https://github.com/PaddlePaddle/Paddle/pull/3096)会引入**nextLayer**的信息。 +但是在PaddlePaddle中,无论是重构前的layer还是重构后的op,都不会想要知道next layer/op的信息。 +4. MKL-DNN的高性能格式与PaddlePaddle原有的`NCHW`不同(PaddlePaddle中的cuDNN部分使用的也是`NCHW`,所以不存在这个问题)。 +所以需要引入一个转换方法,并且只需要在必要的时候转换这种格式,才能更好的发挥MKL-DNN的性能。 diff --git a/doc/design/mkldnn/image/engine.png b/doc/design/mkldnn/image/engine.png new file mode 100644 index 0000000000000000000000000000000000000000..1f5f65c2cc765a514a3ba9e7b7f468e1dc4b0c3b Binary files /dev/null and b/doc/design/mkldnn/image/engine.png differ diff --git a/doc/design/mkldnn/image/gradients.png b/doc/design/mkldnn/image/gradients.png new file mode 100644 index 0000000000000000000000000000000000000000..f031bcf8e4cec14e63075b8b9d2c7bbd9f1b1a3c Binary files /dev/null and b/doc/design/mkldnn/image/gradients.png differ diff --git a/doc/design/mkldnn/image/layers.png b/doc/design/mkldnn/image/layers.png new file mode 100644 index 0000000000000000000000000000000000000000..306f79b7a844610915eb8944128f57d2b7a3065a Binary files /dev/null and b/doc/design/mkldnn/image/layers.png differ diff --git a/doc/design/mkldnn/image/matrix.png b/doc/design/mkldnn/image/matrix.png new file mode 100644 index 0000000000000000000000000000000000000000..c33ce9cf0335e47cc8c1253304d0fe179186e6f2 Binary files /dev/null and b/doc/design/mkldnn/image/matrix.png differ diff --git a/doc/design/mkldnn/image/overview.png b/doc/design/mkldnn/image/overview.png index 84b455c28230703599a2529f014cfbb222138fef..8fb7bbb9dd654bf363d701d0c8cd4a557043d188 100644 Binary files a/doc/design/mkldnn/image/overview.png and b/doc/design/mkldnn/image/overview.png differ diff --git a/doc/design/refactor/distributed_architecture.md b/doc/design/refactor/distributed_architecture.md index ac7e98ccf1aadbb973a4801fde842375cf63448c..d9fe7d6bbb0eeb73fcdca3ee749a4f10bcdda682 100644 --- a/doc/design/refactor/distributed_architecture.md +++ b/doc/design/refactor/distributed_architecture.md @@ -2,106 +2,70 @@ ## Abstract -PaddlePaddle v0.10.0 uses the "trainer-parameter server" -architecture. We run multiple replicated instances of trainers (runs -the same code written by the user) and parameter servers for -distributed training. This architecture served us well, but has some -limitations: +PaddlePaddle version 0.10.0 uses the "trainer-parameter server" architecture. We run multiple instances of trainers (where each trainer runs the same model) and parameter servers for distributed training. This architecture serves well, but has few limitations: -1. Need to write special code to handle tasks which should only be run - by a single trainer. E.g., initializing model and saving model. +1. There is a need to write special code that handles tasks which should only be run on a single trainer. E.g., initializing the model, saving the model etc. -2. Model parallelism is hard: need to write if-else branches conditioned - on the trainer ID to partition model onto each trainer, and manually - write the inter-model-shard communication code. +2. Model parallelism is hard: It would need all the if-else branches conditioned on the trainer ID to partition the model onto the trainers, and eventually manually writing out the inter-model-shard communication code to communicate between different trainers. -3. The user can not directly specify the parameter update rule: need - to modify the parameter server C++ code and compile a new - binary. This adds complication for researchers: A lot of extra - effort is required. Besides, the training job submission program - may not allow running arbitrary binaries. +3. The user can not directly specify the parameter update rule: This would need to modify the parameter server code and compile a new binary. This makes things more complicated for researchers: A lot of extra effort is required to make this work. Besides, the training job submission program may not allow running arbitrary binaries. -This design doc discusses PaddlePaddle's new distributed training -architecture that addresses the above limitations. +This design doc discusses PaddlePaddle's new distributed training architecture that addresses the above mentioned limitations. ## Analysis -We will assume the user writes the trainer program by Python, the same -analysis holds if the trainer program is written in C++. +The assumption is that the user writes the trainer program in either Python or C++. ### Limitation 1 -If we look at the Python code that the user writes, there are two -kinds of functionalities: +There are two basic functionalities in the trainer program: -- The training logic such as load / save model and print log. -- The neural network definition such as the definition of the data - layer, the fully connected layer, the cost function and the +1. The training logic such as loading / saving the model and printing out the logs. +2. The neural network definition such as the definition of the data layer, the fully connected layer, the cost function and the optimizer. -When we training with PaddlePaddle v0.10.0 distributedly, multiple -replicated Python instances are running on different nodes: both the -training logic and the neural network computation is replicated. +When we train using PaddlePaddle v0.10.0 in a distributed fashion, multiple instances of the same Python code are run on different nodes, hence both: the +training logic as well as the neural network computation logic, is replicated. -The tasks that should only run once all belong to the training logic, -if we only replicate the neural network computation, but do **not** -replicate the training logic, the limitation could be solved. +The tasks that only need to be run once belong to the training logic. Hence if we only replicate the neural network computation part, and do **not** +replicate the training logic, the limitation mentioned above can be avoided. ### Limitation 2 -Model parallelism means running a single model on multiple nodes by -partitioning the model onto different nodes and managing the -inter-model-shard communications. +Model parallelism means that a single model is partitioned into different components and each node runs one of the component separately. This comes at the extra cost of managing the +inter-model-shard communication between nodes. -PaddlePaddle should be able to modify the nerual network computation -definition to support model parallelism automatically. However, the -computation is only specified in Python code, and PaddlePaddle can not -modify Python code. +PaddlePaddle should ideally be able to modify the neural network computation and figure out the support for model parallelism automatically. However, the +computation is only specified in Python code which sits outside of PaddlePaddle, hence PaddlePaddle can not support the feature in this setup. -Just like compiler uses a intermediate representation (IR) so that -programmer does not need to manually optimize their code in most of -the cases - the compiler will optimize the IR: +Similar to how a compiler uses an intermediate representation (IR) so that the programmer does not need to manually optimize their code for most of the cases, we can have an intermediate representation in PaddlePaddle as well. The compiler optimizes the IR as follows: -We can have our own IR too: PaddlePaddle can support model parallel by -converting the IR so the user no longer need to manually do it in -Python: +PaddlePaddle can support model parallelism by converting the IR so that the user no longer needs to manually perform the computation and operations in the Python component: -The IR for PaddlePaddle after refactor is called `Block`, it specifies -the computation dependency graph and the variables used in the -computation. +The IR for PaddlePaddle after refactoring is called a `Block`, it specifies the computation dependency graph and the variables used in the computation. ### Limitation 3 -The user can not directly specify the parameter update rule for the -parameter server because the parameter server does not use the same -computation definition as the trainer. Instead, the update rule is -baked in the parameter server. The user can not specify the update -rule in the same way of specifying the trainer computation. +The user can not directly specify the parameter update rule for the parameter server in the Python module, since the parameter server does not use the same computation definition as the trainer. Instead, the update rule is baked inside the parameter server. The user can not specify the update rule explicitly. -This could be fixed by making the parameter server run the same -computation definition as the trainer. For a detailed explanation, -please -see -[Design Doc: Operation Graph Based Parameter Server](./dist_train.md) +This could be fixed by making the parameter server run the same computation definition as the trainer (the user's Python module). For a detailed explanation, refer to this document - +[Design Doc: Operation Graph Based Parameter Server](./parameter_server.md) ## Distributed Training Architecture -The new distributed training architecture can address the above -limitations. Below is the illustration: +The revamped distributed training architecture can address the above discussed limitations. Below is the illustration of how it does so: -The architecture includes major components: *PaddlePaddle Python*, -*PaddlePaddle converter* and *PaddlePaddle runtime*: +The major components in the architecture are: *PaddlePaddle Python*, *PaddlePaddle converter* and *PaddlePaddle runtime*. ### PaddlePaddle Python -PaddlePaddle Python is the Python library that user's Python trainer -invoke to build the neural network topology, start training, etc. +PaddlePaddle Python is the Python library that user's Python code invokes, to read the data. build the neural network topology, start training, etc. ```Python paddle.init() @@ -117,102 +81,60 @@ for i in range(1000): print cost_val ``` -The code above is a typical Python trainer code, the neural network -topology is built using helper functions such as -`paddle.layer.fc`. The training is done by calling `session.eval` -iteratively. +The above code is what a typical Python trainer code is, the neural network topology is built using the helper functions such as `paddle.layer.fc`. Training is done by calling `session.eval` iteratively. #### session.eval -As shown in the graph, `session.eval` sends the IR and the evaluation -inputs/targets to the PaddlePaddle cluster for evaluation. The -targets can be any variable in the computation graph. When the target -is the `optimizer` variable, the neural network will be optimized -once. When the target is the `cost` variable, `session.eval` returns -the cost value. +As shown in the graph, `session.eval` sends the IR and the evaluation inputs or targets to the PaddlePaddle cluster for evaluation. +The targets can be any variable in the computation graph. When the target is say, the `optimizer` variable, the neural network will be optimized once. When the target is the `cost` variable, `session.eval` returns the cost value. Based on what the target is, an appropriate action is taken. -The Python `session` is a wrapper of the C++ `Session` class. For more -information about `Session`, please -see [Design Doc: Session](./session.md). +The Python `session` is a wrapper of the C++ `Session` class. For more information about `Session`, refer to this document - [Design Doc: Session](./session.md). ### PaddlePaddle Converter -PaddlePaddle converter automatically converts the IR in the request -(IR and evaluation inputs/targets) from PaddlePaddle Python to new -partitioned IRs and dispatch the new IRs and evaluation inputs/targets -to different PaddlePaddle runtimes. Below are the steps: +The PaddlePaddle converter automatically converts the IR in the request (IR and evaluation inputs/targets) from PaddlePaddle Python to partitioned IRs and dispatches the new IRs and evaluation inputs/targets to different PaddlePaddle runtimes. Below are the steps that are followed : -1. Add `feed` OP that feeds the eval inputs, and `fetch` OP that - fetches the eval targets to the IR. +1. Add a `feed` OP that feeds the eval inputs, and a `fetch` OP that fetches the eval targets to the IR. -1. Extract a new computation (sub)graph with `feed` and `fetch` OP as - the boundary. The runtime does not need to run the OP that is not - dependent by the `fetch` OP. +2. Extract a new computation (sub)graph with the `feed` and `fetch` OPs as the boundary. The runtime does not need to run the OP that is not dependent on the `fetch` OP. -1. Optimizes the computation graph. +3. Optimize the computation graph. -1. Place the OPs in the graph onto different devices on different - PaddlePaddle runtime according to a placement algorithm and device - constraint specified by the user. +4. Place the OPs in the graph onto different devices on different PaddlePaddle runtime according to a placement algorithm and the device constraints specified by the user. -1. Partition the graph according to runtime boundaries and add `send` / - `recv` OP pair on the runtime boundaries. +5. Partition the graph according to runtime boundaries and add `send` / `recv` OP pair on the runtime boundaries. -1. Dispatch the partitioned graph to different PaddlePaddle runtimes. +6. Dispatch the partitioned graph to different PaddlePaddle runtimes. + +7. PaddlePaddle runtimes with the `fetch` OP reports evaluation results back to the converter, the converter reports the evaluation results back to the PaddlePaddle Python. -1. PaddlePaddle runtimes with the `fetch` OP reports evaluation - results back to the converter, the convert reports the evaluation - results back to the PaddlePaddle Python. - The output IRs will be cached to optimize the conversion latency. #### Placement Algorithm -Our first implementation will only support "trainer-parameter server" -placement: the parameters, initializers, and optimizers are placed on -the PaddlePaddle runtimes with the parameter server role. And -everything else will be placed on the PaddlePaddle runtimes with the -trainer role. This has the same functionality of our -"trainer-parameter server" architecture of PaddlePaddle v0.10.0, but -is more general and flexible. +Our first implementation will only support "trainer-parameter server" placement: the parameters, initializers, and optimizers are all placed on the PaddlePaddle runtimes with the parameter server role. Everything else will be placed on the PaddlePaddle runtimes with the trainer role. This has the same functionality as the "trainer-parameter server" architecture of PaddlePaddle v0.10.0, but is more generic and flexible. -In the future, we will implement the general placement algorithm, -which makes placements according to the input IR, and a model of -device computation time and device communication time. Model -parallelism requires the general placement algorithm. +In the future, a more general placement algorithm should be implemented, which makes placements according to the input IR, and a model of device computation time and device communication time. Model parallelism requires the generic placement algorithm. ### PaddlePaddle Runtime -The PaddlePaddle runtime owns multiple devices (e.g., CPUs, GPUs) and -runs the IR. The runtime does not need to do OP placement since it's -already done by the converter. +The PaddlePaddle runtime owns multiple devices (e.g., CPUs, GPUs) and runs the IR. The runtime does not need to do OP placement since it is already done by the converter. ### Local Training Architecture -The local training architecture will be the same as the distributed -training architecture, the differences are everything runs locally, -and there is just one PaddlePaddle runtime: +The local training architecture will be the same as the distributed training architecture, the difference is that everything runs locally, and there is just one PaddlePaddle runtime: ### Training Data -In PaddlePaddle v0.10.0, training data is typically read -with [data reader](../reader/README.md) from Python. This approach is -no longer efficient when training distributedly since the Python -process no longer runs on the same node with the trainer processes, -the Python reader will need to read from the distributed filesystem -(assuming it has the access) and send to the trainers, doubling the -network traffic. - -When doing distributed training, the user can still use Python data -reader: the training data are sent with `session.eval`. However should -be used for debugging purpose only. The users are encouraged to use -the read data OPs. +In PaddlePaddle v0.10.0, training data is typically read with a [data reader](../reader/README.md) from Python. This approach is no longer efficient when training in a distributed fashion since the Python process no longer runs on the same node with the trainer processes. The Python reader will need to read from the distributed filesystem (assuming it has the required access) and send to the trainers, doubling the network traffic. + +When doing distributed training, the user can still use Python data reader: the training data are sent with `session.eval`. However this should be used for debugging purpose only. The users are encouraged to use the read data OPs. ## References: diff --git a/doc/design/releasing_process.md b/doc/design/releasing_process.md index 62ff8f3229bbbb5bc82e4da29259baffc30c2c87..14c081ea84282e52a2e36475c3c0ea755122d154 100644 --- a/doc/design/releasing_process.md +++ b/doc/design/releasing_process.md @@ -5,8 +5,9 @@ PaddlePaddle使用git-flow branching model做分支管理,使用[Semantic Vers PaddlePaddle每次发新的版本,遵循以下流程: 1. 从`develop`分支派生出新的分支,分支名为`release/版本号`。例如,`release/0.10.0` -2. 将新分支的版本打上tag,tag为`版本号rc.Patch号`。第一个tag为`0.10.0rc1`,第二个为`0.10.0rc2`,依次类推。 -3. 对这个版本的提交,做如下几个操作: +1. 将新分支的版本打上tag,tag为`版本号rc.Patch号`。第一个tag为`0.10.0rc1`,第二个为`0.10.0rc2`,依次类推。 +1. 对这个版本的提交,做如下几个操作: + * 修改`python/setup.py.in`中的版本信息,并将`istaged`字段设为`True`。 * 编译这个版本的Docker发行镜像,发布到dockerhub。如果失败,修复Docker编译镜像问题,Patch号加一,返回第二步 * 编译这个版本的Ubuntu Deb包。如果失败,修复Ubuntu Deb包编译问题,Patch号加一,返回第二步。 * 使用Regression Test List作为检查列表,测试Docker镜像/ubuntu安装包的功能正确性 @@ -20,9 +21,9 @@ PaddlePaddle每次发新的版本,遵循以下流程: pip install twine twine upload dist/[package to upload] ``` -4. 第三步完成后,将`release/版本号`分支合入master分支,并删除`release/版本号`分支。将master分支的合入commit打上tag,tag为`版本号`。同时再将`master`分支合入`develop`分支。最后删除`release/版本号`分支。 -5. 编译master分支的Docker发行镜像,发布到dockerhub。编译ubuntu的deb包,发布到github release页面 -6. 协同完成Release Note的书写 +1. 第三步完成后,将`release/版本号`分支合入master分支,并删除`release/版本号`分支。将master分支的合入commit打上tag,tag为`版本号`。同时再将`master`分支合入`develop`分支。最后删除`release/版本号`分支。 +1. 编译master分支的Docker发行镜像,发布到dockerhub。编译ubuntu的deb包,发布到github release页面 +1. 协同完成Release Note的书写 需要注意的是: @@ -30,7 +31,7 @@ PaddlePaddle每次发新的版本,遵循以下流程: * `release/版本号`分支一旦建立,一般不允许再从`develop`分支合入`release/版本号`。这样保证`release/版本号`分支功能的封闭,方便测试人员测试PaddlePaddle的行为。 * 在`release/版本号`分支存在的时候,如果有bugfix的行为,需要将bugfix的分支同时merge到`master`, `develop`和`release/版本号`这三个分支。 -# PaddlePaddle 分支规范 +## PaddlePaddle 分支规范 PaddlePaddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,并适应github的特性做了一些区别。 @@ -47,11 +48,11 @@ PaddlePaddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git- * BugFix分支也是在开发者自己的fork版本库维护,与功能分支不同的是,BugFix分支需要分别给主版本库的`master`、`develop`与可能有的`release/版本号`分支,同时提起`Pull Request`。 -# PaddlePaddle回归测试列表 +## PaddlePaddle回归测试列表 本列表说明PaddlePaddle发版之前需要测试的功能点。 -## PaddlePaddle Book中所有章节 +### PaddlePaddle Book中所有章节 PaddlePaddle每次发版本首先要保证PaddlePaddle Book中所有章节功能的正确性。功能的正确性包括验证PaddlePaddle目前的`paddle_trainer`训练和纯使用`Python`训练模型正确性。 diff --git a/doc/design/support_new_device.md b/doc/design/support_new_device.md new file mode 100644 index 0000000000000000000000000000000000000000..fd23dc211a35fdc9d87bc9233fcf4e90254da748 --- /dev/null +++ b/doc/design/support_new_device.md @@ -0,0 +1,248 @@ +# Design Doc: Supporting new Device/Library + +## Background + +Deep learning has a high demand for computing resources. New high-performance devices and computing libraries are appearing very frequently. Deep learning frameworks have to integrate these high-performance devices and computing libraries flexibly and efficiently. + +On one hand, hardware and computing libraries usually do not have a one-to-one correspondence. For example,Intel CPUs support Eigen and MKL computing libraries while Nvidia GPUs support Eigen and cuDNN computing libraries. We have to implement operator specific kernels for each computing library. + +On the other hand, users usually do not want to care about the low-level hardware and computing libraries when writing a neural network configuration. In Fluid, `Layer` is exposed in `Python`, and `Operator` is exposed in `C++`. Both `Layer` and `Operator` are hardware independent. + +So, how to support a new Device/Library in Fluid becomes a challenge. + + +## Basic: Integrate A New Device/Library + +For a general overview of fluid, please refer to the [overview doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/read_source.md). + +There are mainly three parts that we have to consider while integrating a new device/library: + +- Place and DeviceContext: indicates the device id and manages hardware resources + +- Memory and Tensor: malloc/free data on certain device + +- Math Functor and OpKernel: implement computing unit on certain devices/libraries + +### Place and DeviceContext + + +#### Place +Fluid uses class [Place](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h#L55) to represent different devices and computing libraries. There are inheritance relationships between different kinds of `Place`. + +``` + | CPUPlace --> MKLDNNPlace +Place --| CUDAPlace --> CUDNNPlace + | FPGAPlace +``` + +And `Place` is defined as follows: + +``` +typedef boost::variant Place; +``` + +#### DeviceContext + +Fluid uses class [DeviceContext](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/device_context.h#L30) to manage the resources in different hardwares, such as CUDA stream in `CDUADeviceContext`. There are also inheritance relationships between different kinds of `DeviceContext`. + + +``` + /-> CPUDeviceContext --> MKLDeviceContext +DeviceContext ----> CUDADeviceContext --> CUDNNDeviceContext + \-> FPGADeviceContext +``` + +An example of Nvidia GPU is as follows: + +- DeviceContext + + +``` +class DeviceContext { + virtual Place GetPlace() const = 0; +}; +``` + + +- CUDADeviceContext + + +``` +class CUDADeviceContext : public DeviceContext { + Place GetPlace() const override { return place_; } +private: + CUDAPlace place_; + cudaStream_t stream_; + cublasHandle_t cublas_handle_; + std::unique_ptr eigen_device_; // binds with stream_ +}; +``` + +- CUDNNDeviceContext + +``` +class CUDNNDeviceContext : public CUDADeviceContext { + private: + cudnnHandle_t cudnn_handle_; +}; +``` + + +### Memory and Tensor + + +#### memory module + +Fluid provides the following [memory interfaces](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/memory/memory.h#L36): + +``` +template +void* Alloc(Place place, size_t size); + +template +void Free(Place place, void* ptr); + +template +size_t Used(Place place); +``` + +To implementing these interfaces, we have to implement MemoryAllocator for different Devices + + +#### Tensor + +[Tensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/tensor.h#L36) holds data with some shape in a specific Place. + +```cpp +class Tensor { + public: + /*! Return a pointer to mutable memory block. */ + template + inline T* data(); + + /** + * @brief Return a pointer to mutable memory block. + * @note If not exist, then allocation. + */ + template + inline T* mutable_data(platform::Place place); + + /** + * @brief Return a pointer to mutable memory block. + * + * @param[in] dims The dimensions of the memory block. + * @param[in] place The place of the memory block. + * + * @note If not exist, then allocation. + */ + template + inline T* mutable_data(DDim dims, platform::Place place); + + /*! Resize the dimensions of the memory block. */ + inline Tensor& Resize(const DDim& dims); + + /*! Return the dimensions of the memory block. */ + inline const DDim& dims() const; + + private: + /*! holds the memory block if allocated. */ + std::shared_ptr holder_; + + /*! points to dimensions of memory block. */ + DDim dim_; +}; +``` + +`Placeholder` is used to delay memory allocation; that is, we can first define a tensor, using `Resize` to configure its shape, and then call `mutuable_data` to allocate the actual memory. + +```cpp +paddle::framework::Tensor t; +paddle::platform::CPUPlace place; +// set size first +t.Resize({2, 3}); +// allocate memory on CPU later +t.mutable_data(place); +``` + + + +### Math Functor and OpKernel + +Fluid implements computing units based on different DeviceContexts. Some computing units are shared between operators. This common part will be put in operators/math directory as basic Functors. + +Let's take [MaxOutFunctor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/math/maxouting.h#L27) as an example: + +The interface is defined in header file. + +``` +template +class MaxOutFunctor { + public: + void operator()(const DeviceContext& context, const framework::Tensor& input, + framework::Tensor* output, int groups); +}; +``` + +CPU implemention is in .cc file + +``` +template +class MaxOutFunctor { + public: + void operator()(const platform::CPUDeviceContext& context, + const framework::Tensor& input, framework::Tensor* output, + int groups) { + ... + } +}; +``` + +CUDA implemention is in .cu file + +``` +template +class MaxOutFunctor { + public: + void operator()(const platform::CUDADeviceContext& context, + const framework::Tensor& input, framework::Tensor* output, + int groups) { + ... + } +}; +``` + + +We get computing handle from a concrete DeviceContext, and make compution on tensors. + +The implemention of `OpKernel` is similar to math functors, the extra thing we need to do is to register the OpKernel in a global map. + +Fluid provides different register interfaces in op_registry.h + + +Let's take [Crop](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/crop_op.cc#L134) operator as an example: + +In .cc file: + +``` +REGISTER_OP_CPU_KERNEL(crop, ops::CropKernel); +REGISTER_OP_CPU_KERNEL( + crop_grad, ops::CropGradKernel); +``` + +In .cu file: + +``` +REGISTER_OP_CUDA_KERNEL(crop, ops::CropKernel); +REGISTER_OP_CUDA_KERNEL( + crop_grad, ops::CropGradKernel); +``` + + +## Advanced topics: How to switch between different Device/Library + +Generally, we will impelement OpKernel for all Device/Library of an Operator. We can easily train a Convolutional Neural Network in GPU. However, some OpKernel is not sutibale on a specific Device. For example, crf operator can only run on CPU, whereas most other operators can run at GPU. To achieve high performance in such circumstance, we have to switch between different Device/Library. + + +We will discuss how to implement an efficient OpKernel switch policy. + +- TBD diff --git a/doc/faq/build_and_install/index_cn.rst b/doc/faq/build_and_install/index_cn.rst index f1677e216f31d79b53ac29a0afbf6fbb886a0dcd..a2bdeead7841393fdfe90c78e5b91d9e61678a24 100644 --- a/doc/faq/build_and_install/index_cn.rst +++ b/doc/faq/build_and_install/index_cn.rst @@ -14,7 +14,7 @@ $ export CUDA_SO="$(\ls usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" $ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - $ docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddlepaddle:latest-gpu + $ docker run ${CUDA_SO} ${DEVICES} -it paddlepaddle/paddle:latest-gpu 更多关于Docker的安装与使用, 请参考 `PaddlePaddle Docker 文档 `_ 。 diff --git a/doc/getstarted/build_and_install/build_from_source_cn.rst b/doc/getstarted/build_and_install/build_from_source_cn.rst index 55665ac8edfcf20290936fba4c3e410b33e1f3d4..c875c807b8ab2e420dec189ef32d41533f58fa6d 100644 --- a/doc/getstarted/build_and_install/build_from_source_cn.rst +++ b/doc/getstarted/build_and_install/build_from_source_cn.rst @@ -1,4 +1,4 @@ -从源码编译PaddlePaddle +从源码编译 ====================== .. _build_step: @@ -7,8 +7,11 @@ ---------------- PaddlePaddle主要使用 `CMake `_ 以及GCC, G++作为编译工具。 -我们推荐您使用PaddlePaddle编译环境镜像完成编译,这样可以免去单独安装编译依赖的步骤,可选的不同编译环境 +我们推荐您使用PaddlePaddle Docker编译环境镜像完成编译,这样可以免去单独安装编译依赖的步骤,可选的不同编译环境Docker镜像 可以在 `这里 `_ 找到。 + +如果您选择不使用Docker镜像,则需要在本机安装下面章节列出的 `编译依赖`_ 之后才能开始编译的步骤。 + 编译PaddlePaddle,需要执行: .. code-block:: bash @@ -16,22 +19,47 @@ PaddlePaddle主要使用 `CMake `_ 以及GCC, G++作为编译 git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle # 如果使用Docker编译环境,执行下面的命令编译CPU-Only的二进制 - docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x paddle/scripts/docker/build.sh + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x /paddle/paddle/scripts/docker/build.sh # 如果不使用Docker编译环境,执行下面的命令 mkdir build cd build cmake -DWITH_GPU=OFF -DWITH_TESTING=OFF .. make - 编译完成后会在build/python/dist目录下生成输出的whl包,可以选在在当前机器安装也可以拷贝到目标机器安装: .. code-block:: bash - pip install python/dist/*.whl + pip install build/python/dist/*.whl -.. _build_step: +.. _run_test: + +执行单元测试 +---------------- + +如果您期望在编译完成后立即执行所有的单元测试,可以按照下面的方法: + +使用Docker的情况下,设置 :code:`RUN_TEST=ON` 和 :code:`WITH_TESTING=ON` 就会在完成编译之后,立即执行单元测试。 +开启 :code:`WITH_GPU=ON` 可以指定同时执行GPU上的单元测试。 + +.. code-block:: bash + + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x /paddle/paddle/scripts/docker/build.sh + +如果不使用Docker,可以执行ctest命令即可: + +.. code-block:: bash + + mkdir build + cd build + cmake -DWITH_GPU=OFF -DWITH_TESTING=OFF .. + make + ctest + # 指定执行其中一个单元测试 test_mul_op + ctest -R test_mul_op + +.. _compile_deps: 编译依赖 ---------------- diff --git a/doc/getstarted/build_and_install/build_from_source_en.rst b/doc/getstarted/build_and_install/build_from_source_en.rst index 9a3ed7dd57137ddf3d6213222c17433822b01dbb..f194f84ce7c961bb8644d7c077a7c71730220ea2 100644 --- a/doc/getstarted/build_and_install/build_from_source_en.rst +++ b/doc/getstarted/build_and_install/build_from_source_en.rst @@ -1,4 +1,4 @@ -Build PaddlePaddle from Sources +Build from Sources ========================== .. _build_step: @@ -9,15 +9,19 @@ How To Build PaddlePaddle mainly uses `CMake `_ and GCC, G++ as compile tools. We recommend you to use our pre-built Docker image to run the build to avoid installing dependencies by yourself. We have several build environment -Docker images `here `_. +Docker images `here `_ . + +If you choose not to use Docker image for your build, you need to install the +below `Compile Dependencies`_ before run the build. + Then run: .. code-block:: bash git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle - # run the following command to build CPU-Only binaries if you are using docker - docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x paddle/scripts/docker/build.sh + # run the following command to build a CPU-Only binaries if you are using docker + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x /paddle/paddle/scripts/docker/build.sh # else run these commands mkdir build cd build @@ -30,9 +34,37 @@ machine or copy it to the target machine. .. code-block:: bash - pip install python/dist/*.whl + pip install build/python/dist/*.whl -.. _build_step: + +.. _run_test: + +Run Tests +---------------- + +If you wish to run the tests, you may follow the below steps: + +When using Docker, set :code:`RUN_TEST=ON` and :code:`WITH_TESTING=ON` will run test immediately after the build. +Set :code:`WITH_GPU=ON` Can also run tests on GPU. + +.. code-block:: bash + + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x paddle/paddle/scripts/docker/build.sh + +If you don't use Docker, just run ctest will start the tests: + +.. code-block:: bash + + mkdir build + cd build + cmake -DWITH_GPU=OFF -DWITH_TESTING=ON .. + make + ctest + # run a single test like test_mul_op + ctest -R test_mul_op + + +.. _compile_deps: Compile Dependencies ---------------- @@ -85,7 +117,7 @@ You can add :code:`-D` argument to pass such options, like: "WITH_PYTHON", "Build with integrated Python interpreter", "ON" "WITH_STYLE_CHECK", "Check code style when building", "ON" "WITH_TESTING", "Build unit tests", "ON" - "WITH_DOC", "Build documentaions", "OFF" + "WITH_DOC", "Build documentations", "OFF" "WITH_SWIG_PY", "Build Python SWIG interface for V2 API", "Auto" "WITH_GOLANG", "Build fault-tolerant parameter server written in go", "ON" "WITH_MKL", "Use MKL as BLAS library, else use OpenBLAS", "ON" diff --git a/doc/getstarted/build_and_install/docker_install_cn.rst b/doc/getstarted/build_and_install/docker_install_cn.rst index 07933b2e0bbca809f6c4e90e7ff8f71d1b3304b2..1eb06e4182d40c3be20d71e37b34009905eaf9d6 100644 --- a/doc/getstarted/build_and_install/docker_install_cn.rst +++ b/doc/getstarted/build_and_install/docker_install_cn.rst @@ -1,4 +1,4 @@ -使用Docker安装运行PaddlePaddle +使用Docker安装运行 ================================ 使用Docker安装和运行PaddlePaddle可以无需考虑依赖环境即可运行。并且也可以在Windows的docker中运行。 @@ -114,7 +114,7 @@ PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Note .. code-block:: bash - nvidia-docker run -it -v $PWD:/work paddledev/paddle:latest-gpu /bin/bash + nvidia-docker run -it -v $PWD:/work paddlepaddle/paddle:latest-gpu /bin/bash **注: 如果没有安装nvidia-docker,可以尝试以下的方法,将CUDA库和Linux设备挂载到Docker容器内:** @@ -122,7 +122,7 @@ PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Note export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:latest-gpu + docker run ${CUDA_SO} ${DEVICES} -it paddlepaddle/paddle:latest-gpu **关于AVX:** diff --git a/doc/getstarted/build_and_install/docker_install_en.rst b/doc/getstarted/build_and_install/docker_install_en.rst index 9b977c9c72e36b4b47cbf56ae848ab83d9895783..5a46c598f2248c7912169a9e77b16851230c1d2e 100644 --- a/doc/getstarted/build_and_install/docker_install_en.rst +++ b/doc/getstarted/build_and_install/docker_install_en.rst @@ -1,4 +1,4 @@ -PaddlePaddle in Docker Containers +Run in Docker Containers ================================= Run PaddlePaddle in Docker container so that you don't need to care about @@ -122,7 +122,7 @@ GPU driver installed before move on. .. code-block:: bash - nvidia-docker run -it -v $PWD:/work paddledev/paddle:latest-gpu /bin/bash + nvidia-docker run -it -v $PWD:/work paddlepaddle/paddle:latest-gpu /bin/bash **NOTE: If you don't have nvidia-docker installed, try the following method to mount CUDA libs and devices into the container.** @@ -130,7 +130,7 @@ GPU driver installed before move on. export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:latest-gpu + docker run ${CUDA_SO} ${DEVICES} -it paddlepaddle/paddle:latest-gpu **About AVX:** diff --git a/doc/getstarted/build_and_install/index_cn.rst b/doc/getstarted/build_and_install/index_cn.rst index 88c5142ddee994ed0c0dc520195311e97f5a549e..c9ba84c842b530162c92713046e64fdf82bd441b 100644 --- a/doc/getstarted/build_and_install/index_cn.rst +++ b/doc/getstarted/build_and_install/index_cn.rst @@ -13,7 +13,7 @@ PaddlePaddle提供pip和Docker的安装方式: pip_install_cn.rst docker_install_cn.rst - + ../../howto/dev/build_cn.md 编译流程 ++++++++ diff --git a/doc/getstarted/build_and_install/index_en.rst b/doc/getstarted/build_and_install/index_en.rst index c8b60d03578ba6a9b73134ec53b440d057e36079..32d66d63dd5b2a30d5de4a088dc80b680830cb84 100644 --- a/doc/getstarted/build_and_install/index_en.rst +++ b/doc/getstarted/build_and_install/index_en.rst @@ -13,6 +13,7 @@ You can choose either pip or Docker to complete your install: pip_install_en.rst docker_install_en.rst + ../../howto/dev/build_en.md Build from Source diff --git a/doc/getstarted/build_and_install/pip_install_cn.rst b/doc/getstarted/build_and_install/pip_install_cn.rst index 41312da48c055826186a560ef9653653e45d1047..b270e2c2f0b0cbfd6fb4b9b0750d207952f84d76 100644 --- a/doc/getstarted/build_and_install/pip_install_cn.rst +++ b/doc/getstarted/build_and_install/pip_install_cn.rst @@ -1,4 +1,4 @@ -使用pip安装PaddlePaddle +使用pip安装 ================================ PaddlePaddle可以使用常用的Python包管理工具 @@ -34,7 +34,7 @@ PaddlePaddle可以使用常用的Python包管理工具 :align: center .. csv-table:: 各个版本最新的whl包 - :header: "版本说明", "cp27-cp27mu", "cp27-cp27mu", "C-API" + :header: "版本说明", "cp27-cp27mu", "cp27-cp27m", "C-API" :widths: 1, 3, 3, 3 "cpu_avx_mkl", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" @@ -83,4 +83,4 @@ PaddlePaddle发布的安装包会尽量对齐 `manylinux1 `_ 链接中找到。 - 如果系统支持的是 linux_x86_64 而安装包是 manylinux1_x86_64 ,需要升级pip版本到最新; 如果系统支持 manylinux1_x86_64 而安装包(本地)是 linux_x86_64 ,可以重命名这个whl包为 manylinux1_x86_64 再安装。 \ No newline at end of file + 如果系统支持的是 linux_x86_64 而安装包是 manylinux1_x86_64 ,需要升级pip版本到最新; 如果系统支持 manylinux1_x86_64 而安装包(本地)是 linux_x86_64 ,可以重命名这个whl包为 manylinux1_x86_64 再安装。 diff --git a/doc/getstarted/build_and_install/pip_install_en.rst b/doc/getstarted/build_and_install/pip_install_en.rst index 4f295e14baa1465a93b8eef1b3f3b6b47eeea905..70f601a11c610e0a2b5dcc8b73d2c3ea19e195e1 100644 --- a/doc/getstarted/build_and_install/pip_install_en.rst +++ b/doc/getstarted/build_and_install/pip_install_en.rst @@ -1,4 +1,4 @@ -Install PaddlePaddle Using pip +Install Using pip ================================ You can use current widely used Python package management @@ -37,7 +37,7 @@ If the links below shows up the login form, just click "Log in as guest" to star :align: center .. csv-table:: whl package of each version - :header: "version", "cp27-cp27mu", "cp27-cp27mu", "C-API" + :header: "version", "cp27-cp27mu", "cp27-cp27m", "C-API" :widths: 1, 3, 3, 3 "cpu_avx_mkl", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" diff --git a/doc/howto/dev/build_cn.md b/doc/howto/dev/build_cn.md index 0b911f7b7509da4a147c65954acb7e7c38f489da..4a80a5245102fb992f513a749f6a02e1130188af 100644 --- a/doc/howto/dev/build_cn.md +++ b/doc/howto/dev/build_cn.md @@ -1,4 +1,4 @@ -# 编译PaddlePaddle和运行单元测试 +# 用Docker编译和测试PaddlePaddle ## 需要的软硬件 diff --git a/doc/howto/dev/build_en.md b/doc/howto/dev/build_en.md index d0048e3714a5861a503736879d6c0870e5906c95..91c41ef8ce3abdec5d69a9cbcebbc49b17d8f663 100644 --- a/doc/howto/dev/build_en.md +++ b/doc/howto/dev/build_en.md @@ -1,4 +1,4 @@ -# Build PaddlePaddle from Source Code and Run Unit Test +# Build using Docker ## What Developers Need diff --git a/doc/howto/dev/contribute_to_paddle_cn.md b/doc/howto/dev/contribute_to_paddle_cn.md index 699390145226ec2b65fdf5122db187e1d30d669e..3e0bf7b3973079a2063d33b6be4fe8a9dc5c07bb 100644 --- a/doc/howto/dev/contribute_to_paddle_cn.md +++ b/doc/howto/dev/contribute_to_paddle_cn.md @@ -76,18 +76,18 @@ no changes added to commit (use "git add" and/or "git commit -a") ## 构建和测试 -编译 PaddlePaddle 的源码以及生成文档需要多种开发工具。为了方便大家,我们的标准开发流程是把这些工具都装进一个Docker image,称为*开发镜像*,通常名字是 `paddle:dev`。然后所有用 `cmake && make` 的地方(比如IDE配置里)都用 `docker run paddle:dev`来代替。 +编译 PaddlePaddle 的源码以及生成文档需要多种开发工具。为了方便大家,我们的标准开发流程是把这些工具都装进一个Docker image,称为*开发镜像*,通常名字是 `paddle:latest-dev` 或者 `paddle:[version tag]-dev` 如 `paddle:0.11.0-dev`。然后所有用 `cmake && make` 的地方(比如IDE配置里)都用 `docker run paddle:latest-dev`来代替。 如要build这个开发镜像,在源码目录树的根目录中运行: ```bash -➜ docker build -t paddle:dev . +➜ docker build -t paddle:latest-dev . ``` 随后可以用这个开发镜像开始build PaddlePaddle的源码。比如如果要build一个不依赖GPU,但是支持AVX指令集,并且包括unit tests的PaddlePaddle,可以: ```bash -➜ docker run -v $(pwd):/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TEST=ON" paddle:dev +➜ docker run -v $(pwd):/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TESTING=ON" paddle:latest-dev ``` 这个过程除了编译PaddlePaddle为 `./build/libpaddle.so`,并且输出一个 `./build/paddle.deb`文件之外,还会输出一个 `build/Dockerfile`。我们只需要运行下面命令把编译好的PaddlePaddle打包成一个*生产镜像*(`paddle:prod`): @@ -99,7 +99,7 @@ no changes added to commit (use "git add" and/or "git commit -a") 如果要运行所有的单元测试,可以用如下命令: ```bash -➜ docker run -it -v $(pwd):/paddle paddle:dev bash -c "cd /paddle/build && ctest" +➜ docker run -it -v $(pwd):/paddle paddle:latest-dev bash -c "cd /paddle/build && ctest" ``` 关于构建和测试的更多信息,请参见[这篇文档](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/getstarted/build_and_install/docker_install_cn.rst)。 diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 6cfc9536f20e88571a9845a50be0341fe4d9f78b..757a5840bca4c8028e362789ec95bb03d261d2c1 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -1,17 +1,18 @@ # 如何写新的Operator - [概念简介](#概念简介) - - [实现C++类](#实现C++类) - - [定义ProtoMaker类](#定义ProtoMaker类) - - [定义Operator类](#定义Operator类) - - [定义OpKernel类](#定义OpKernel类) - - [注册Operator](#注册Operator) + - [实现C++类](#实现c类) + - [定义ProtoMaker类](#定义protomaker类) + - [定义Operator类](#定义operator类) + - [定义OpKernel类](#定义opkernel类) + - [注册Operator](#注册operator) - [编译](#编译) - - [绑定Python](#绑定Python) + - [绑定Python](#绑定python) - [实现单元测试](#实现单元测试) - - [前向Operator单测](#前向Operator单测) - - [反向Operator单测](#反向Operator单测) + - [前向Operator单测](#前向operator单测) + - [反向Operator单测](#反向operator单测) - [编译和执行](#编译和执行) + - [注意事项](#注意事项) ## 概念简介 @@ -30,8 +31,8 @@ -------------- | :---------------------- OpProtoMake定义 | `.cc`文件,Backward Op不需要定义OpProtoMake Op定义 | `.cc`文件 -Kernel实现 | CPU、GPU共享Kernel实现在`.h`文件中,否则,CPU 实现在`.cc`文件中,GPU 实现在`.cu`文件中。 -注册Op | Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,GPU实现在`.cu`文件中 +Kernel实现 | CPU、CUDA共享Kernel实现在`.h`文件中,否则,CPU 实现在`.cc`文件中,CUDA 实现在`.cu`文件中。 +注册Op | Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,CUDA实现在`.cu`文件中 实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc` 、`*_op.cu`(如有)结尾。**系统会根据文件名自动构建op和其对应的Python扩展。** @@ -43,7 +44,7 @@ Kernel实现 | CPU、GPU共享Kernel实现在`.h`文件中,否则,CPU ## 实现C++类 -### 1. 定义ProtoMaker类 +### 定义ProtoMaker类 矩阵乘法的公式:$Out = X * Y$, 可见该计算由两个输入,一个输出组成。 @@ -100,7 +101,7 @@ The equation is: Out = scale*X - `AddAttr("scale", "...").SetDefault(1.0);` : 增加`scale`系数,作为参数属性,并且设置默认值为1.0。 -### 2. 定义Operator类 +### 定义Operator类 下面的点实现了MulOp的定义: @@ -149,11 +150,11 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, 通常`OpProtoMaker`和`Op`类的定义写在`.cc`文件中,和下面将要介绍的注册函数一起放在`.cc`中 -### 3. 定义OpKernel类 +### 定义OpKernel类 `MulKernel`继承自`framework::OpKernel`,带有下面两个模板参数: -- `typename Place`: 表示设备类型,不同设备(CPU、GPU)共享同一个Kernel时,需加该模板参数,不共享则不加,一个不共享的例子是[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)。 +- `typename DeviceContext`: 表示设备类型,不同设备(CPU、CUDA)共享同一个Kernel时,需加该模板参数,不共享则不加,一个不共享的例子是[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)。 - `typename T` : 表示数据类型,如`float`, `double`等。 @@ -165,7 +166,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, 下面是 `MulKernel` `Compute`的实现: ```cpp - template + template class MulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -173,33 +174,32 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, auto* Y = context.Input("Y"); auto* Z = context.Output("Out"); Z->mutable_data(context.GetPlace()); - auto* device_context = - const_cast(context.device_context_); - math::matmul(*X, false, *Y, false, 1, Z, 0, device_context); + auto& device_context = context.template device_context(); + math::matmul(*X, false, *Y, false, 1, Z, 0, device_context); } }; ``` -需要注意:**不同设备(CPU、GPU)共享一个Op定义,是否则共享同一个`OpKernel`,取决于`Compute`调用的函数是否支持不同设备。** +需要注意:**不同设备(CPU、CUDA)共享一个Op定义,是否则共享同一个`OpKernel`,取决于`Compute`调用的函数是否支持不同设备。** -`MulOp`的CPU、GPU实现共享同一个`Kernel`。`OpKernel`不共享的例子可以参考:[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)。 +`MulOp`的CPU、CUDA实现共享同一个`Kernel`。`OpKernel`不共享的例子可以参考:[`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43)。 -为了使`OpKernel`的计算过程书写更加简单,并且CPU、GPU的代码可以复用,我们通常借助 Eigen unsupported Tensor模块来实现`Compute`接口。关于在PaddlePaddle中如何使用Eigen库,请参考[使用文档](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/use_eigen_cn.md)。 +为了使`OpKernel`的计算过程书写更加简单,并且CPU、CUDA的代码可以复用,我们通常借助 Eigen unsupported Tensor模块来实现`Compute`接口。关于在PaddlePaddle中如何使用Eigen库,请参考[使用文档](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/use_eigen_cn.md)。 到此,前向Op实现完成。接下来,需要在`.cc`文件中注册该op和kernel。 反向Op类的定义,反向OpKernel的定义与前向Op类似,这里不再赘述。**但需注意反向Op没有`ProtoMaker`**。 -### 4. 注册Operator +### 注册Operator - 在`.cc`文件中注册前向、反向Op类,注册CPU Kernel。 ```cpp namespace ops = paddle::operators; REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulOpGrad); - REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); + REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); REGISTER_OP_CPU_KERNEL(mul_grad, - ops::MulGradKernel); + ops::MulGradKernel); ``` 在上面的代码中: @@ -209,20 +209,20 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulGradKernel`类。 -- 在 `.cu`文件中注册GPU Kernel。 - - 请注意,如果GPU Kernel的实现基于Eigen unsupported模块,那么在 `.cu`的开始请加上宏定义 `#define EIGEN_USE_GPU`,代码示例如下: +- 在 `.cu`文件中注册CUDA Kernel。 + - 请注意,如果CUDA Kernel的实现基于Eigen unsupported模块,那么在 `.cu`的开始请加上宏定义 `#define EIGEN_USE_GPU`,代码示例如下: ```cpp // if use Eigen unsupported module before include head files - // #define EIGEN_USE_GPU + #define EIGEN_USE_GPU namespace ops = paddle::operators; - REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel); - REGISTER_OP_GPU_KERNEL(mul_grad, - ops::MulGradKernel); + REGISTER_OP_CUDA_KERNEL(mul, ops::MulKernel); + REGISTER_OP_CUDA_KERNEL(mul_grad, + ops::MulGradKernel); ``` -### 5. 编译 +### 编译 运行下面命令可以进行编译: @@ -236,71 +236,57 @@ make mul_op ## 实现单元测试 -单测包括对比前向Op不同设备(CPU、GPU)的实现、对比反向OP不同设备(CPU、GPU)的实现、反向Op的梯度测试。下面介绍介绍[`MulOp`的单元测试](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/test_mul_op.py)。 +单测包括对比前向Op不同设备(CPU、CUDA)的实现、对比反向OP不同设备(CPU、CUDA)的实现、反向Op的梯度测试。下面介绍介绍[`MulOp`的单元测试](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/test_mul_op.py)。 -### 前向Operator单元测试 +### 前向Operator单测 -前向Op单元测试继承自`unittest.TestCase`,并定义元类`__metaclass__ = OpTestMeta`。各项更加具体的单元测试在`OpTestMeta`里完成。测试前向Operator,需要: +Op单元测试继承自`OpTest`。各项更加具体的单元测试在`TestMulOp`里完成。测试Operator,需要: 1. 在`setUp`函数定义输入、输出,以及相关的属性参数。 2. 生成随机的输入数据。 3. 在Python脚本中实现与前向operator相同的计算逻辑,得到输出值,与operator前向计算的输出进行对比。 +4. 反向计算已经自动集成进测试框架,直接调用相应接口即可。 ```python import unittest import numpy as np - from gradient_checker import GradientChecker, create_op - from op_test_util import OpTestMeta + from op_test import OpTest - class TestMulOp(unittest.TestCase): - __metaclass__ = OpTestMeta + class TestMulOp(OpTest): def setUp(self): - self.type = "mul" + self.op_type = "mul" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((84, 100)).astype("float32") } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} - ``` - -上面的代码首先导入依赖的包,下面是对`setUp`函数中操作的重要变量的详细解释: - -- `self.type = "mul" ` : 定义类型,与operator注册时注册的类型一致。 -- `self.inputs` : 定义输入,类型为`numpy.array`,并初始化。 -- `self.outputs` : 定义输出,并在Python脚本中完成与operator同样的计算逻辑,返回Python端的计算结果。 + def test_check_output(self): + self.check_output() -### 反向Operator单元测试 + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) -反向Op单元测试继承自`GradientChecker`,而`GradientChecker`继承自`unittest.TestCase`,因此,**反向单元测试函数需要以`test_`开头**。 + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) -```python -class TestMulGradOp(GradientChecker): - def setUp(self): - self.op = create_op("mul") - self.inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'Y': np.random.random((84, 100)).astype("float32") - } - - def test_check_grad_normal(self): - # mul op will enlarge the relative error - self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) + ``` - def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) +上面的代码首先导入依赖的包,下面是对`setUp`函数中操作的重要变量的详细解释: - def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) -``` +- `self.op_type = "mul" ` : 定义类型,与operator注册时注册的类型一致。 +- `self.inputs` : 定义输入,类型为`numpy.array`,并初始化。 +- `self.outputs` : 定义输出,并在Python脚本中完成与operator同样的计算逻辑,返回Python端的计算结果。 -下面解释代码中一些关键的地方: +### 反向operator单测 -- 调用`create_op("mul")`创建反向Op对应的前向Op。 +而反向测试中: - `test_check_grad_normal`中调用`check_grad`使用数值法检测梯度正确性和稳定性。 - 第一个参数`["X", "Y"]` : 指定对输入变量`X`、`Y`做梯度检测。 - 第二个参数`"Out"` : 指定前向网络最终的输出目标变量`Out`。 @@ -308,7 +294,7 @@ class TestMulGradOp(GradientChecker): - `test_check_grad_ingore_x`和`test_check_grad_ingore_y`分支用来测试只需要计算一个输入梯度的情况。 -### 编译和执行单元测试 +### 编译和执行 `python/paddle/v2/framework/tests` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译。 @@ -328,5 +314,5 @@ ctest -R test_mul_op - 为每个Op创建单独的`*_op.h`(如有)、`*_op.cc`和`*_op.cu`(如有)。不允许一个文件中包含多个Op,这将会导致编译出错。 - 注册Op时的类型名,需要和该Op的名字一样。即不允许在`A_op.cc`里面,注册`REGISTER_OP(B, ...)`等,这将会导致单元测试出错。 -- 如果Op没有实现GPU Kernel,请不要创建空的`*_op.cu`,这将会导致单元测试出错。 +- 如果Op没有实现CUDA Kernel,请不要创建空的`*_op.cu`,这将会导致单元测试出错。 - 如果多个Op依赖一些共用的函数,可以创建非`*_op.*`格式的文件来存放,如`gather.h`文件。 diff --git a/doc/howto/dev/new_op_en.md b/doc/howto/dev/new_op_en.md index 1e88e1f5b4df710f1b69f0305d8d8a2921c4249a..fe86936bc12cc2fb88d653429e250f71a478dfb6 100644 --- a/doc/howto/dev/new_op_en.md +++ b/doc/howto/dev/new_op_en.md @@ -1,8 +1,8 @@ # How to write a new operator - [Background](#background) - - [Implementing C++ Types](#implementing-c++-types) - - [Defining ProtoMaker](#defining-protoMaker) + - [Implementing C++ Types](#implementing-c-types) + - [Defining ProtoMaker](#defining-protomaker) - [Defining Operator](#defining-operator) - [Registering Operator](#registering-operator) - [Compilation](#compilation) @@ -28,8 +28,8 @@ An operator can be differentiated by whether in has kernel methods. An operator -------------- | :---------------------- OpProtoMake definition | `.cc`files, Backward Op does not need an OpProtoMake interface. Op definition | `.cc` files -Kernel implementation | The kernel methods shared between CPU and GPU are defined in `.h` files. CPU-specific kernels live in `.cc` files, while GPU-specific kernels are implemented in `.cu`files. -Registering the Op | Ops are registered in `.cc` files; For Kernel registration, `.cc` files contain the CPU implementation, while `.cu` files contain the GPU implementation. +Kernel implementation | The kernel methods shared between CPU and CUDA are defined in `.h` files. CPU-specific kernels live in `.cc` files, while CUDA-specific kernels are implemented in `.cu`files. +Registering the Op | Ops are registered in `.cc` files; For Kernel registration, `.cc` files contain the CPU implementation, while `.cu` files contain the CUDA implementation. New Operator implementations are added to the list [paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators), with file names in the format `*_op.h` (if applicable), `*_op.cc`, `*_op.cu` (if applicable).** The system will use the naming scheme to automatically build operators and their corresponding Python extensions. ** @@ -41,7 +41,7 @@ Let's take matrix multiplication operator, [MulOp](https://github.com/PaddlePadd ## Implementing C++ Types -### 1. Defining Class ProtoMaker +### Defining ProtoMaker Matrix Multiplication can be written as $Out = X * Y$, meaning that the operation consists of two inputs and pne output. @@ -98,7 +98,7 @@ There are two changes in this example: - `AddAttr("scale", "...").SetDefault(1.0);` adds `scale`constant as an attribute, and sets the default value to 1.0. -### 2. Defining Operator +### Defining Operator The following code defines the interface for MulOp: @@ -147,11 +147,11 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, Usually `OpProtoMaker` and `Op`'s type definitions are written in `.cc` files, which also include the registration methods introduced later. -### 3. Defining OpKernel +### Defining OpKernel `MulKernel` inherits `framework::OpKernel`, which includes the following templates: -- `typename Place` denotes device type. When different devices, namely the CPU and the GPU, share the same kernel, this template needs to be added. If they don't share kernels, this must not be added. An example of a non-sharing kernel is [`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43). +- `typename DeviceContext` denotes device context type. When different devices, namely the CPUDeviceContext and the CUDADeviceContext, share the same kernel, this template needs to be added. If they don't share kernels, this must not be added. An example of a non-sharing kernel is [`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43). - `typename T` denotes data type, such as `float` or `double`. @@ -163,7 +163,7 @@ Usually `OpProtoMaker` and `Op`'s type definitions are written in `.cc` files, w `MulKernel`'s implementation of `Compute` is as follows: ```cpp - template + template class MulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -171,16 +171,15 @@ Usually `OpProtoMaker` and `Op`'s type definitions are written in `.cc` files, w auto* Y = context.Input("Y"); auto* Z = context.Output("Out"); Z->mutable_data(context.GetPlace()); - auto* device_context = - const_cast(context.device_context_); - math::matmul(*X, false, *Y, false, 1, Z, 0, device_context); + auto& device_context = context.template device_context(); + math::matmul(*X, false, *Y, false, 1, Z, 0, device_context); } }; ``` -Note that **different devices (CPU, GPU)share an Op definition; whether or not they share the same `OpKernel` depends on whether `Compute` calls functions that support both devices.** +Note that **different devices (CPU, CUDA)share an Op definition; whether or not they share the same `OpKernel` depends on whether `Compute` calls functions that support both devices.** -`MulOp`'s CPU and GPU share the same `Kernel`. A non-sharing `OpKernel` example can be seen in [`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43). +`MulOp`'s CPU and CUDA share the same `Kernel`. A non-sharing `OpKernel` example can be seen in [`OnehotCrossEntropyOpKernel`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/cross_entropy_op.h#L43). To ease the writing of `OpKernel` compute, and for reusing code cross-device, [`Eigen-unsupported Tensor`](https://bitbucket.org/eigen/eigen/src/default/unsupported/Eigen/CXX11/src/Tensor/README.md?fileviewer=file-view-default) module is used to implement `Compute` interface. To learn about how the Eigen library is used in PaddlePaddle, please see [usage document](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/use_eigen_cn.md). @@ -189,16 +188,16 @@ This concludes the forward implementation of an operator. Next its operation and The definition of its corresponding backward operator, if applicable, is similar to that of an forward operator. **Note that a backward operator does not include a `ProtoMaker`**. -### 4. Registering Operator +### Registering Operator - In `.cc` files, register forward and backward operator classes and the CPU kernel. ```cpp namespace ops = paddle::operators; REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulOpGrad); - REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); + REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); REGISTER_OP_CPU_KERNEL(mul_grad, - ops::MulGradKernel); + ops::MulGradKernel); ``` In that code block, @@ -208,20 +207,20 @@ The definition of its corresponding backward operator, if applicable, is similar - `REGISTER_OP_CPU_KERNEL` registers `ops::MulKernel` class and specialized template types `paddle::platform::CPUPlace` and `float`, which also registers `ops::MulGradKernel`. -- Registering GPU Kernel in `.cu` files - - Note that if GPU Kernel is implemented using the `Eigen unsupported` module, then on top of `.cu`, a macro definition `#define EIGEN_USE_GPU` is needed, such as +- Registering CUDA Kernel in `.cu` files + - Note that if CUDA Kernel is implemented using the `Eigen unsupported` module, then on top of `.cu`, a macro definition `#define EIGEN_USE_GPU` is needed, such as ```cpp // if use Eigen unsupported module before include head files #define EIGEN_USE_GPU namespace ops = paddle::operators; - REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel); - REGISTER_OP_GPU_KERNEL(mul_grad, - ops::MulGradKernel); + REGISTER_OP_CUDA_KERNEL(mul, ops::MulKernel); + REGISTER_OP_CUDA_KERNEL(mul_grad, + ops::MulGradKernel); ``` -### 5. Compilation +### Compilation Run the following commands to compile. @@ -253,62 +252,51 @@ A forward operator unit test inherits `unittest.TestCase` and defines metaclass 2. Generating random input data. -3. Implementing the same computation logic in a Python script: +3. Implementing the same computation logic in a Python script. + +4. Call check gradient function to check the backward operator. ```python import unittest import numpy as np - from gradient_checker import GradientChecker, create_op - from op_test_util import OpTestMeta + from op_test import OpTest - class TestMulOp(unittest.TestCase): - __metaclass__ = OpTestMeta + class TestMulOp(OpTest): def setUp(self): - self.type = "mul" + self.op_type = "mul" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((84, 100)).astype("float32") } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) ``` Get its output, and compare it with the forward operator's own output. The code above first loads required packages. In addition, we have -- `self.type = "mul" ` defines the type that is identical to what the operator's registered type. +- `self.op_type = "mul" ` defines the type that is identical to what the operator's registered type. - `self.inputs` defines input, with type `numpy.array` and initializes it. - `self.outputs` defines output and completes the same operator computation in the Python script, and returns its result from the Python script. ### Testing Backward Operators -A backward operator unit test inherits `GradientChecker`, which inherits `unittest.TestCase`. As a result, **a backward operator unit test needs to be have the prefix `test_`**. - -```python -class TestMulGradOp(GradientChecker): - def setUp(self): - self.op = create_op("mul") - self.inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'Y': np.random.random((84, 100)).astype("float32") - } - - def test_check_grad_normal(self): - # mul op will enlarge the relative error - self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) - - def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) - - def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) -``` - -Some key points in the code above include: +Some key points in checking gradient above include: -- `create_op("mul")` creates the backward operator's corresponding forward operator. - `test_normal` calls `check_grad` to validate scaling tests' correctness and stability through numeric methods. - The first variable `["X", "Y"]` appoints `X` and `Y` to be scale tested. - The second variable `"Out"` points to the network's final output target `Out`. @@ -338,5 +326,5 @@ ctest -R test_mul_op - Every `*_op.h` (if applicable), `*_op.cc`, and `*_op.cu` (if applicable) must be created for a unique Op. Compiling will fail if multiple operators are included per file. - The type with which an operator is registered needs to be identical to the Op's name. Registering `REGISTER_OP(B, ...)` in `A_op.cc` will cause unit testing failures. -- If the operator does not implement a GPU kernel, please refrain from creating an empty `*_op.cu` file, or else unit tests will fail. +- If the operator does not implement a CUDA kernel, please refrain from creating an empty `*_op.cu` file, or else unit tests will fail. - If multiple operators rely on some shared methods, a file NOT named `*_op.*` can be created to store them, such as `gather.h`. diff --git a/doc/howto/dev/write_docs_cn.rst b/doc/howto/dev/write_docs_cn.rst index 61f3a223547b352cf7929615cf3682b29b9a738f..1bc947c260d7adb75ee5a2bb10e6b91bc0be2d4c 100644 --- a/doc/howto/dev/write_docs_cn.rst +++ b/doc/howto/dev/write_docs_cn.rst @@ -3,12 +3,64 @@ ################## PaddlePaddle的文档包括英文文档 ``doc`` 和中文文档 ``doc_cn`` 两个部分。文档都是通过 `cmake`_ 驱动 `sphinx`_ 编译生成,生成后的文档分别存储在编译目录的 ``doc`` 和 ``doc_cn`` 两个子目录下。 - +也可以利用PaddlePaddle 工具来编译文档,这个情况下所有的文件会存在整理过的的文件目录 .ppo_workspace/content 下 如何构建文档 ============ -PaddlePaddle的文档构建有两种方式。 +PaddlePaddle的文档构建有三种方式。 + + +使用PaddlePaddle.org工具 +-------------- +这个是目前推荐的使用方法。除了可以自动编译文档,也可以直接在网页预览文档。 + +文件工具是使用Docker,需要在系统里先安装好Docker工具包。Docker安装请参考Docker的官网。安装好Docker之后及可用以下命令启动工具 + +.. code-block:: bash + + mkdir paddlepaddle # Create paddlepaddle working directory + cd paddlepaddle + + # Clone the content repositories + git clone https://github.com/PaddlePaddle/Paddle.git + git clone https://github.com/PaddlePaddle/book.git + git clone https://github.com/PaddlePaddle/models.git + git clone https://github.com/PaddlePaddle/Mobile.git + + # Please specify the working directory through -v + docker run -it -p 8000:8000 -v `pwd`:/var/content paddlepaddle/paddlepaddle.org:latest + +注意: PaddlePaddle.org 会在 -v (volume) 指定的内容存储库运行命令 +之后再用网页连到http://localhost:8000就可以在网页上生成需要的文档 +编译后的文件将被存储在工作目录 /.ppo_workspace/content。 + +如果不想使用 Docker,你还可以通过运行Django框架直接激活工具的服务器。使用下面的命令来运行它。 + +.. code-block:: bash + + mkdir paddlepaddle # Create paddlepaddle working directory + cd paddlepaddle + + # Clone the content repositories and PaddlePaddle.org + git clone https://github.com/PaddlePaddle/Paddle.git + git clone https://github.com/PaddlePaddle/book.git + git clone https://github.com/PaddlePaddle/models.git + git clone https://github.com/PaddlePaddle/Mobile.git + git clone https://github.com/PaddlePaddle/PaddlePaddle.org.git + + # Please specify the PaddlePaddle working directory. In the current setting, it should be pwd + export CONTENT_DIR= + export ENV='' + cd PaddlePaddle.org/portal/ + pip install -r requirements.txt + python manage.py runserver + +工具服务器将读取环境变量 CONTENT_DIR 搜索代码库。请指定的PaddlePaddle工作目录给环境变量 CONTENT_DIR。 +之后再用网页连到http://localhost:8000就可以在网页上生成需要的文档。 +编译后的文件将被存储在工作目录 /.ppo_workspace/content。 + +想了解更多PaddlePaddle.org工具的详细信息,可以 `点击这里 `_ 。 使用Docker构建 -------------- @@ -47,17 +99,12 @@ PaddlePaddle的文档构建有两种方式。 PaddlePaddle文档使用 `sphinx`_ 自动生成,用户可以参考sphinx教程进行书写。 -如何更新文档主题 -================ - -PaddlePaddle文档主题在 `TO_YOUR_PADDLE_CLONE_PATH/doc_theme` 文件夹下,包含所有和前端网页设计相关的文件。 - -如何更新doc.paddlepaddle.org +如何更新www.paddlepaddle.org ============================ -更新的文档以PR的形式提交到github中,提交方式参见 `贡献文档 `_ 。 -目前PaddlePaddle的develop分支的文档是自动触发更新的,用户可以分别查看最新的 `中文文档 `_ 和 -`英文文档 `_ 。 +更新的文档以PR的形式提交到github中,提交方式参见 `贡献文档 `_ 。 +目前PaddlePaddle的develop分支的文档是自动触发更新的,用户可以分别查看最新的 `中文文档 `_ 和 +`英文文档 `_ 。 .. _cmake: https://cmake.org/ diff --git a/doc/howto/dev/write_docs_en.rst b/doc/howto/dev/write_docs_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..b3ef07eb1d0012827df8e6a4f27c5fa643649492 --- /dev/null +++ b/doc/howto/dev/write_docs_en.rst @@ -0,0 +1,80 @@ +################## +Contribute Documentation +################## + +PaddlePaddle supports English documentation ``doc`` and Chinese documentation ``doc_cn``. +Both are compiled by `cmake`_ and `sphinx`_ , the compiled documentations will be stored under ``doc`` and ``doc_cn`` directories. +When using the PaddlePaddle.org to compile documentations, the compiled documentations will be stored under a consolidated directory: .ppo_workspace/content + +How to Build Documentations +============ + +We recommend using PaddlePaddle.org tool to build documentation + + +Use PaddlePaddle.org tool +-------------- +This is the recommended method to build documentation. It can compile documentation and preview the documentation in a web browser. + +The tool uses Docker, please install it on your system. Please check Docker official website on how to install Docker. You may use the following commands to activate the tool + +.. code-block:: bash + + mkdir paddlepaddle # Create paddlepaddle working directory + cd paddlepaddle + + # Clone the content repositories. You may only clone the contents you need + git clone https://github.com/PaddlePaddle/Paddle.git + git clone https://github.com/PaddlePaddle/book.git + git clone https://github.com/PaddlePaddle/models.git + git clone https://github.com/PaddlePaddle/Mobile.git + + # Please specify the working directory through -v + docker run -it -p 8000:8000 -v `pwd`:/var/content paddlepaddle/paddlepaddle.org:latest + +Note: PaddlePaddle.org will read the content repos specified in the -v (volume) flag of the docker run command +Use a web browser and navigate to http://localhost:8000, click the buttons to compile the documentation +The compiled documentations will be stored in /.ppo_workspace/content + + +If you don't wish to use Docker, you can also activate the tool through Django. Use the following the commands to set up + +.. code-block:: bash + + mkdir paddlepaddle # Create paddlepaddle working directory + cd paddlepaddle + + # Clone the content repositories and PaddlePaddle.org + git clone https://github.com/PaddlePaddle/Paddle.git + git clone https://github.com/PaddlePaddle/book.git + git clone https://github.com/PaddlePaddle/models.git + git clone https://github.com/PaddlePaddle/Mobile.git + git clone https://github.com/PaddlePaddle/PaddlePaddle.org.git + + # Please specify the PaddlePaddle working directory. In the current setting, it should be pwd + export CONTENT_DIR= + export ENV='' + cd PaddlePaddle.org/portal/ + pip install -r requirements.txt + python manage.py runserver + +Use a web browser and navigate to http://localhost:8000, click the buttons to compile the documentation +The compiled documentations will be stored in /.ppo_workspace/content + +If you want to learn more on the PaddlePaddle.org, please `click here `_ 。 + +How to write Documentations +============ + +PaddlePaddle uses `sphinx`_ to compile documentations,Please check sphinx official website for more detail. + + +How to update www.paddlepaddle.org +============================ + +Please create PRs and submit them to github, please check `Contribute Code `_ 。 +PaddlePaddle develop branch will update the documentation once the PR is merged. User may check latest `Chinese Docs `_ and +`English Docs `_ 。 + +.. _cmake: https://cmake.org/ +.. _sphinx: http://www.sphinx-doc.org/en/1.4.8/ diff --git a/doc/howto/index_cn.rst b/doc/howto/index_cn.rst index 76d3e0a0092f89005605a23e14e712530112a5ac..991b9e2596a3b499846b963152c838d66260265d 100644 --- a/doc/howto/index_cn.rst +++ b/doc/howto/index_cn.rst @@ -19,7 +19,7 @@ .. toctree:: :maxdepth: 1 - dev/build_cn.rst + dev/contribute_to_paddle_cn.md dev/write_docs_cn.rst 模型配置 diff --git a/doc/howto/index_en.rst b/doc/howto/index_en.rst index 1b6034be4edffd2cbc822018b733b9a3836ea84a..61bf25ccd12eeedffc747fdd4ce84fa4adde07ee 100644 --- a/doc/howto/index_en.rst +++ b/doc/howto/index_en.rst @@ -18,9 +18,9 @@ Development .. toctree:: :maxdepth: 1 - dev/build_en.rst dev/new_layer_en.rst dev/contribute_to_paddle_en.md + dev/write_docs_en.rst Configuration ------------- diff --git a/doc/howto/optimization/cpu_profiling.md b/doc/howto/optimization/cpu_profiling.md index 32d89a7c183d57e0e69039dfb2c78703d9866f7c..1775374cf6e518586c28bbd8e04946c74df7e4c5 100644 --- a/doc/howto/optimization/cpu_profiling.md +++ b/doc/howto/optimization/cpu_profiling.md @@ -1,42 +1,52 @@ -此教程会介绍如何使用Python的cProfile包,与Python库yep,google perftools来运行性能分析(Profiling)与调优。 +This tutorial introduces techniques we use to profile and tune the +CPU performance of PaddlePaddle. We will use Python packages +`cProfile` and `yep`, and Google's `perftools`. -运行性能分析可以让开发人员科学的,有条不紊的对程序进行性能优化。性能分析是性能调优的基础。因为在程序实际运行中,真正的瓶颈可能和程序员开发过程中想象的瓶颈相去甚远。 +Profiling is the process that reveals performance bottlenecks, +which could be very different from what's in the developers' mind. +Performance tuning is done to fix these bottlenecks. Performance optimization +repeats the steps of profiling and tuning alternatively. -性能优化的步骤,通常是循环重复若干次『性能分析 --> 寻找瓶颈 ---> 调优瓶颈 --> 性能分析确认调优效果』。其中性能分析是性能调优的至关重要的量化指标。 +PaddlePaddle users program AI applications by calling the Python API, which calls +into `libpaddle.so.` written in C++. In this tutorial, we focus on +the profiling and tuning of -Paddle提供了Python语言绑定。用户使用Python进行神经网络编程,训练,测试。Python解释器通过`pybind`和`swig`调用Paddle的动态链接库,进而调用Paddle C++部分的代码。所以Paddle的性能分析与调优分为两个部分: +1. the Python code and +1. the mixture of Python and C++ code. -* Python代码的性能分析 -* Python与C++混合代码的性能分析 +## Profiling the Python Code +### Generate the Performance Profiling File -## Python代码的性能分析 - -### 生成性能分析文件 - -Python标准库中提供了性能分析的工具包,[cProfile](https://docs.python.org/2/library/profile.html)。生成Python性能分析的命令如下: +We can use Python standard +package, [`cProfile`](https://docs.python.org/2/library/profile.html), +to generate Python profiling file. For example: ```bash python -m cProfile -o profile.out main.py ``` -其中`-o`标识了一个输出的文件名,用来存储本次性能分析的结果。如果不指定这个文件,`cProfile`会打印一些统计信息到`stdout`。这不方便我们进行后期处理(进行`sort`, `split`, `cut`等等)。 - -### 查看性能分析文件 +where `main.py` is the program we are going to profile, `-o` specifies +the output file. Without `-o`, `cProfile` would outputs to standard +output. -当main.py运行完毕后,性能分析结果文件`profile.out`就生成出来了。我们可以使用[cprofilev](https://github.com/ymichael/cprofilev)来查看性能分析结果。`cprofilev`是一个Python的第三方库。使用它会开启一个HTTP服务,将性能分析结果以网页的形式展示出来。 +### Look into the Profiling File -使用`pip install cprofilev`安装`cprofilev`工具。安装完成后,使用如下命令开启HTTP服务 +`cProfile` generates `profile.out` after `main.py` completes. We can +use [`cprofilev`](https://github.com/ymichael/cprofilev) to look into +the details: ```bash cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py ``` -其中`-a`标识HTTP服务绑定的IP。使用`0.0.0.0`允许外网访问这个HTTP服务。`-p`标识HTTP服务的端口。`-f`标识性能分析的结果文件。`main.py`标识被性能分析的源文件。 +where `-a` specifies the HTTP IP, `-p` specifies the port, `-f` +specifies the profiling file, and `main.py` is the source file. -访问对应网址,即可显示性能分析的结果。性能分析结果格式如下: +Open the Web browser and points to the local IP and the specifies +port, we will see the output like the following: -```text +``` ncalls tottime percall cumtime percall filename:lineno(function) 1 0.284 0.284 29.514 29.514 main.py:1() 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/executor.py:20(run) @@ -44,23 +54,23 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py 1 0.144 0.144 6.534 6.534 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/__init__.py:14() ``` -每一列的含义是: +where each line corresponds to Python function, and the meaning of +each column is as follows: -| 列名 | 含义 | +| column | meaning | | --- | --- | -| ncalls | 函数的调用次数 | -| tottime | 函数实际使用的总时间。该时间去除掉本函数调用其他函数的时间 | -| percall | tottime的每次调用平均时间 | -| cumtime | 函数总时间。包含这个函数调用其他函数的时间 | -| percall | cumtime的每次调用平均时间 | -| filename:lineno(function) | 文件名, 行号,函数名 | +| ncalls | the number of calls into a function | +| tottime | the total execution time of the function, not including the + execution time of other functions called by the function | +| percall | tottime divided by ncalls | +| cumtime | the total execution time of the function, including the execution time of other functions being called | +| percall | cumtime divided by ncalls | +| filename:lineno(function) | where the function is defined | +### Identify Performance Bottlenecks -### 寻找性能瓶颈 - -通常`tottime`和`cumtime`是寻找瓶颈的关键指标。这两个指标代表了某一个函数真实的运行时间。 - -将性能分析结果按照tottime排序,效果如下: +Usually, `tottime` and the related `percall` time is what we want to +focus on. We can sort above profiling file by tottime: ```text 4696 12.040 0.003 12.040 0.003 {built-in method run} @@ -68,12 +78,15 @@ cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:219(__init__) 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/__init__.py:1() - ``` -可以看到最耗时的函数是C++端的`run`函数。这需要联合我们第二节`Python与C++混合代码的性能分析`来进行调优。而`sync_with_cpp`函数的总共耗时很长,每次调用的耗时也很长。于是我们可以点击`sync_with_cpp`的详细信息,了解其调用关系。 +We can see that the most time-consuming function is the `built-in +method run`, which is a C++ function in `libpaddle.so`. We will +explain how to profile C++ code in the next section. At this +moment, let's look into the third function `sync_with_cpp`, which is a +Python function. We can click it to understand more about it: -```text +``` Called By: Ordered by: internal time @@ -92,72 +105,93 @@ Called: List reduced from 4497 to 2 due to restriction <'sync_with_cpp'> ``` -通常观察热点函数间的调用关系,和对应行的代码,就可以了解到问题代码在哪里。当我们做出性能修正后,再次进行性能分析(profiling)即可检查我们调优后的修正是否能够改善程序的性能。 +The lists of the callers of `sync_with_cpp` might help us understand +how to improve the function definition. +## Profiling Python and C++ Code +### Generate the Profiling File -## Python与C++混合代码的性能分析 +To profile a mixture of Python and C++ code, we can use a Python +package, `yep`, that can work with Google's `perftools`, which is a +commonly-used profiler for C/C++ code. -### 生成性能分析文件 - -C++的性能分析工具非常多。常见的包括`gprof`, `valgrind`, `google-perftools`。但是调试Python中使用的动态链接库与直接调试原始二进制相比增加了很多复杂度。幸而Python的一个第三方库`yep`提供了方便的和`google-perftools`交互的方法。于是这里使用`yep`进行Python与C++混合代码的性能分析 - -使用`yep`前需要安装`google-perftools`与`yep`包。ubuntu下安装命令为 +In Ubuntu systems, we can install `yep` and `perftools` by running the +following commands: ```bash +apt update apt install libgoogle-perftools-dev pip install yep ``` -安装完毕后,我们可以通过 +Then we can run the following command ```bash python -m yep -v main.py ``` -生成性能分析文件。生成的性能分析文件为`main.py.prof`。 +to generate the profiling file. The default filename is +`main.py.prof`. + +Please be aware of the `-v` command line option, which prints the +analysis results after generating the profiling file. By examining the + the print result, we'd know that if we stripped debug +information from `libpaddle.so` at build time. The following hints +help make sure that the analysis results are readable: -命令行中的`-v`指定在生成性能分析文件之后,在命令行显示分析结果。我们可以在命令行中简单的看一下生成效果。因为C++与Python不同,编译时可能会去掉调试信息,运行时也可能因为多线程产生混乱不可读的性能分析结果。为了生成更可读的性能分析结果,可以采取下面几点措施: +1. Use GCC command line option `-g` when building `libpaddle.so` so to + include the debug information. The standard building system of + PaddlePaddle is CMake, so you might want to set + `CMAKE_BUILD_TYPE=RelWithDebInfo`. -1. 编译时指定`-g`生成调试信息。使用cmake的话,可以将CMAKE_BUILD_TYPE指定为`RelWithDebInfo`。 -2. 编译时一定要开启优化。单纯的`Debug`编译性能会和`-O2`或者`-O3`有非常大的差别。`Debug`模式下的性能测试是没有意义的。 -3. 运行性能分析的时候,先从单线程开始,再开启多线程,进而多机。毕竟如果单线程调试更容易。可以设置`OMP_NUM_THREADS=1`这个环境变量关闭openmp优化。 +1. Use GCC command line option `-O2` or `-O3` to generate optimized + binary code. It doesn't make sense to profile `libpaddle.so` + without optimization, because it would anyway run slowly. -### 查看性能分析文件 +1. Profiling the single-threaded binary file before the + multi-threading version, because the latter often generates tangled + profiling analysis result. You might want to set environment + variable `OMP_NUM_THREADS=1` to prevents OpenMP from automatically + starting multiple threads. -在运行完性能分析后,会生成性能分析结果文件。我们可以使用[pprof](https://github.com/google/pprof)来显示性能分析结果。注意,这里使用了用`Go`语言重构后的`pprof`,因为这个工具具有web服务界面,且展示效果更好。 +### Examining the Profiling File -安装`pprof`的命令和一般的`Go`程序是一样的,其命令如下: +The tool we used to examine the profiling file generated by +`perftools` is [`pprof`](https://github.com/google/pprof), which +provides a Web-based GUI like `cprofilev`. + +We can rely on the standard Go toolchain to retrieve the source code +of `pprof` and build it: ```bash go get github.com/google/pprof ``` -进而我们可以使用如下命令开启一个HTTP服务: +Then we can use it to profile `main.py.prof` generated in the previous +section: ```bash pprof -http=0.0.0.0:3213 `which python` ./main.py.prof ``` -这行命令中,`-http`指开启HTTP服务。`which python`会产生当前Python二进制的完整路径,进而指定了Python可执行文件的路径。`./main.py.prof`输入了性能分析结果。 - -访问对应的网址,我们可以查看性能分析的结果。结果如下图所示: +Where `-http` specifies the IP and port of the HTTP service. +Directing our Web browser to the service, we would see something like +the following: ![result](./pprof_1.png) +### Identifying the Performance Bottlenecks -### 寻找性能瓶颈 - -与寻找Python代码的性能瓶颈类似,寻找Python与C++混合代码的性能瓶颈也是要看`tottime`和`cumtime`。而`pprof`展示的调用图也可以帮助我们发现性能中的问题。 - -例如下图中, +Similar to how we work with `cprofilev`, we'd focus on `tottime` and +`cumtime`. ![kernel_perf](./pprof_2.png) -在一次训练中,乘法和乘法梯度的计算占用2%-4%左右的计算时间。而`MomentumOp`占用了17%左右的计算时间。显然,`MomentumOp`的性能有问题。 - -在`pprof`中,对于性能的关键路径都做出了红色标记。先检查关键路径的性能问题,再检查其他部分的性能问题,可以更有次序的完成性能的优化。 - -## 总结 +We can see that the execution time of multiplication and the computing +of the gradient of multiplication takes 2% to 4% of the total running +time, and `MomentumOp` takes about 17%. Obviously, we'd want to +optimize `MomentumOp`. -至此,两种性能分析的方式都介绍完毕了。希望通过这两种性能分析的方式,Paddle的开发人员和使用人员可以有次序的,科学的发现和解决性能问题。 +`pprof` would mark performance critical parts of the program in +red. It's a good idea to follow the hints. diff --git a/doc/howto/optimization/cpu_profiling_cn.md b/doc/howto/optimization/cpu_profiling_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..14eba0e2f34b115f5cd24920b5b1af07ec953d00 --- /dev/null +++ b/doc/howto/optimization/cpu_profiling_cn.md @@ -0,0 +1,155 @@ +此教程会介绍如何使用Python的cProfile包、Python库yep、Google perftools来进行性能分析 (profiling) 与调优(performance tuning)。 + +Profling 指发现性能瓶颈。系统中的瓶颈可能和程序员开发过程中想象的瓶颈相去甚远。Tuning 指消除瓶颈。性能优化的过程通常是不断重复地 profiling 和 tuning。 + +PaddlePaddle 用户一般通过调用 Python API 编写深度学习程序。大部分 Python API 调用用 C++ 写的 libpaddle.so。所以 PaddlePaddle 的性能分析与调优分为两个部分: + +* Python 代码的性能分析 +* Python 与 C++ 混合代码的性能分析 + + +## Python代码的性能分析 + +### 生成性能分析文件 + +Python标准库中提供了性能分析的工具包,[cProfile](https://docs.python.org/2/library/profile.html)。生成Python性能分析的命令如下: + +```bash +python -m cProfile -o profile.out main.py +``` + +其中 `main.py` 是我们要分析的程序,`-o`标识了一个输出的文件名,用来存储本次性能分析的结果。如果不指定这个文件,`cProfile`会打印到标准输出。 + +### 查看性能分析文件 + +`cProfile` 在main.py 运行完毕后输出`profile.out`。我们可以使用[`cprofilev`](https://github.com/ymichael/cprofilev)来查看性能分析结果。`cprofilev`是一个Python的第三方库。使用它会开启一个HTTP服务,将性能分析结果以网页的形式展示出来: + +```bash +cprofilev -a 0.0.0.0 -p 3214 -f profile.out main.py +``` + +其中`-a`标识HTTP服务绑定的IP。使用`0.0.0.0`允许外网访问这个HTTP服务。`-p`标识HTTP服务的端口。`-f`标识性能分析的结果文件。`main.py`标识被性能分析的源文件。 + +用Web浏览器访问对应网址,即可显示性能分析的结果: + +``` + ncalls tottime percall cumtime percall filename:lineno(function) + 1 0.284 0.284 29.514 29.514 main.py:1() + 4696 0.128 0.000 15.748 0.003 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/executor.py:20(run) + 4696 12.040 0.003 12.040 0.003 {built-in method run} + 1 0.144 0.144 6.534 6.534 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/__init__.py:14() +``` + +每一列的含义是: + +| 列名 | 含义 | +| --- | --- | +| ncalls | 函数的调用次数 | +| tottime | 函数实际使用的总时间。该时间去除掉本函数调用其他函数的时间 | +| percall | tottime的每次调用平均时间 | +| cumtime | 函数总时间。包含这个函数调用其他函数的时间 | +| percall | cumtime的每次调用平均时间 | +| filename:lineno(function) | 文件名, 行号,函数名 | + + +### 寻找性能瓶颈 + +通常`tottime`和`cumtime`是寻找瓶颈的关键指标。这两个指标代表了某一个函数真实的运行时间。 + +将性能分析结果按照tottime排序,效果如下: + +```text + 4696 12.040 0.003 12.040 0.003 {built-in method run} + 300005 0.874 0.000 1.681 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/dataset/mnist.py:38(reader) + 107991 0.676 0.000 1.519 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:219(__init__) + 4697 0.626 0.000 2.291 0.000 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) + 1 0.618 0.618 0.618 0.618 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/__init__.py:1() +``` + +可以看到最耗时的函数是C++端的`run`函数。这需要联合我们第二节`Python`与`C++`混合代码的性能分析来进行调优。而`sync_with_cpp`函数的总共耗时很长,每次调用的耗时也很长。于是我们可以点击`sync_with_cpp`的详细信息,了解其调用关系。 + +```text +Called By: + + Ordered by: internal time + List reduced from 4497 to 2 due to restriction <'sync_with_cpp'> + +Function was called by... + ncalls tottime cumtime +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:428(sync_with_cpp) <- 4697 0.626 2.291 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:562(sync_with_cpp) +/home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:562(sync_with_cpp) <- 4696 0.019 2.316 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:487(clone) + 1 0.000 0.001 /home/yuyang/perf_test/.env/lib/python2.7/site-packages/paddle/v2/fluid/framework.py:534(append_backward) + + +Called: + + Ordered by: internal time + List reduced from 4497 to 2 due to restriction <'sync_with_cpp'> +``` + +通常观察热点函数间的调用关系,和对应行的代码,就可以了解到问题代码在哪里。当我们做出性能修正后,再次进行性能分析(profiling)即可检查我们调优后的修正是否能够改善程序的性能。 + + + +## Python与C++混合代码的性能分析 + +### 生成性能分析文件 + +C++的性能分析工具非常多。常见的包括`gprof`, `valgrind`, `google-perftools`。但是调试Python中使用的动态链接库与直接调试原始二进制相比增加了很多复杂度。幸而Python的一个第三方库`yep`提供了方便的和`google-perftools`交互的方法。于是这里使用`yep`进行Python与C++混合代码的性能分析 + +使用`yep`前需要安装`google-perftools`与`yep`包。ubuntu下安装命令为 + +```bash +apt update +apt install libgoogle-perftools-dev +pip install yep +``` + +安装完毕后,我们可以通过 + +```bash +python -m yep -v main.py +``` + +生成性能分析文件。生成的性能分析文件为`main.py.prof`。 + +命令行中的`-v`指定在生成性能分析文件之后,在命令行显示分析结果。我们可以在命令行中简单的看一下生成效果。因为C++与Python不同,编译时可能会去掉调试信息,运行时也可能因为多线程产生混乱不可读的性能分析结果。为了生成更可读的性能分析结果,可以采取下面几点措施: + +1. 编译时指定`-g`生成调试信息。使用cmake的话,可以将CMAKE_BUILD_TYPE指定为`RelWithDebInfo`。 +2. 编译时一定要开启优化。单纯的`Debug`编译性能会和`-O2`或者`-O3`有非常大的差别。`Debug`模式下的性能测试是没有意义的。 +3. 运行性能分析的时候,先从单线程开始,再开启多线程,进而多机。毕竟单线程调试更容易。可以设置`OMP_NUM_THREADS=1`这个环境变量关闭openmp优化。 + +### 查看性能分析文件 + +在运行完性能分析后,会生成性能分析结果文件。我们可以使用[`pprof`](https://github.com/google/pprof)来显示性能分析结果。注意,这里使用了用`Go`语言重构后的`pprof`,因为这个工具具有web服务界面,且展示效果更好。 + +安装`pprof`的命令和一般的`Go`程序是一样的,其命令如下: + +```bash +go get github.com/google/pprof +``` + +进而我们可以使用如下命令开启一个HTTP服务: + +```bash +pprof -http=0.0.0.0:3213 `which python` ./main.py.prof +``` + +这行命令中,`-http`指开启HTTP服务。`which python`会产生当前Python二进制的完整路径,进而指定了Python可执行文件的路径。`./main.py.prof`输入了性能分析结果。 + +访问对应的网址,我们可以查看性能分析的结果。结果如下图所示: + +![result](./pprof_1.png) + + +### 寻找性能瓶颈 + +与寻找Python代码的性能瓶颈类似,寻找Python与C++混合代码的性能瓶颈也是要看`tottime`和`cumtime`。而`pprof`展示的调用图也可以帮助我们发现性能中的问题。 + +例如下图中, + +![kernel_perf](./pprof_2.png) + +在一次训练中,乘法和乘法梯度的计算占用2%-4%左右的计算时间。而`MomentumOp`占用了17%左右的计算时间。显然,`MomentumOp`的性能有问题。 + +在`pprof`中,对于性能的关键路径都做出了红色标记。先检查关键路径的性能问题,再检查其他部分的性能问题,可以更有次序的完成性能的优化。 diff --git a/doc/howto/read_source.md b/doc/howto/read_source.md new file mode 100644 index 0000000000000000000000000000000000000000..383acb0c8251043c3c6bbf309d2e07bf0074cd4f --- /dev/null +++ b/doc/howto/read_source.md @@ -0,0 +1,67 @@ +# PaddlePaddle Fluid Source Code Overview + +Examples: https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/v2/fluid/tests/book + +Core: https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework + +Operator: https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators + +Optimizer: https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/optimizer + +Memory: https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/memory + +# Compile Time + +The following **defines** the NN. The definition goes into this [protocol buffer](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto). + +```python +x = fluid.layers.data(name='x', shape=[13], dtype='float32') +y = fluid.layers.data(name='y', shape=[1], dtype='float32') + +y_predict = fluid.layers.fc(input=x, size=1, act=None) +cost = fluid.layers.square_error_cost(input=y_predict, label=y) +avg_cost = fluid.layers.mean(x=cost) + +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +sgd_optimizer.minimize(avg_cost) +``` + +- Variables: `x`, `y`, `y_predict`, `cost` and `avg_cost`. [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/framework.py#L93) +- Layers: `fluid.layers.data`, `fluid.layers.fc` and `fluid.layers.mean` are layers. [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/layers.py) + - Every Layer has one or more operators and variables/parameters + - All the operators are defined at [`paddle/operators/`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators). Other worth-looking files: + - Base class: [`paddle/framework/operator.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h) + - Operator Registration: [`paddle/framework/op_registry.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h) + - Operator Lookup: [`paddle/framework/op_info.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_info.h) +- Optimizer: `fluid.optimizer.SGD`. It does the following + - Add backward operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/backward.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/backward.cc)] + - Add optimizer operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/optimizer.py), [C++](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/optimizer)] + +# Run Time + +The following **evaluates** the NN. Instantiates all the variables, operators. + +```python +place = fluid.CPUPlace() +feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) +exe = fluid.Executor(place) + +# Allocate memory. Initialize Parameter. +exe.run(fluid.default_startup_program()) + +# Allocate memory. Do computation. +exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost]) +``` + +- Place: `place`. one of CPU, GPU or FPGA. [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h) + - The device handle are at [paddle/platform/device_context.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/device_context.h) +- Executor: `fluid.Executor(place)`. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/executor.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.cc)] + - Feeds the data: `feed=feeder.feed(data)` + - Evaluates all the operators + - Fetches the result: `fetch_list=[avg_cost]` +- Other worth looking files: + - Scope: [paddle/framework/scope.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/scope.h). Where all the variables live + - Variable: [paddle/framework/variable.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/variable.h). Where all the data (most likely tensors) live + - Tensor: [paddle/framework/tensor.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/tensor.h). Where we allocate memory through [`paddle/memory/`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/memory) diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index d6b8464100d4497876aa3f6f7cbc666aafae4bfc..cf84568ecdf1227b0d0ed3606a4a9a6e5186af72 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -25,8 +25,18 @@ FILE(GLOB PY_PADDLE_PYTHON_FILES ${PADDLE_SOURCE_DIR}/paddle/py_paddle/*.py) SET_SOURCE_FILES_PROPERTIES(Paddle.i PROPERTIES CPLUSPLUS ON) +SET(SWIG_NEED_FLAGS + -ftls-model=global-dynamic + -Wno-parentheses-equality + -Wno-self-assign + -Wno-maybe-uninitialized + -Wno-missing-field-initializers) + FOREACH(flag ${SWIG_NEED_FLAGS}) + safe_set_cxxflag(SWIG_CXX_FLAGS ${flag}) +ENDFOREACH() + SET(CMAKE_SWIG_OUTDIR ${CMAKE_CURRENT_BINARY_DIR}) -SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-parentheses-equality -Wno-missing-field-initializers -Wno-self-assign -ftls-model=global-dynamic") +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SWIG_CXX_FLAGS}") SET(SWIG_MODULE_swig_paddle_EXTRA_DEPS paddle_parameter diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index d267b14657be2a773d1dacfd9ac3767cddc47415..ebb083c5a477d5be91ef14be74dd9de349d07931 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -4,6 +4,16 @@ else () set(PADDLE_FLOAT_TYPE float) endif() +execute_process( + COMMAND ${GIT_EXECUTABLE} log --pretty=format:%H -1 + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR} + OUTPUT_VARIABLE PADDLE_GIT_COMMIT + RESULT_VARIABLE PADDLE_GIT_COMMIT_RESULT + ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) +if(NOT PADDLE_GIT_COMMIT) + set(PADDLE_GIT_COMMIT "no commit information") +endif() + # config.h used for C-API. It will store Paddle building configuration as a # header. Make user just include PaddleCAPI.h then can get building # configuration without explicitly set -DPADDLE_WITH_DOUBLE when building their diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp index bb8249a5511c089ec2f2263ff4cc290f0a5a8fce..c038789340033fcf6dcc07a41b033a50e980c965 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/capi/Main.cpp @@ -43,4 +43,11 @@ paddle_error paddle_init(int argc, char** argv) { isInit = true; return kPD_NO_ERROR; } + +paddle_error paddle_init_thread() { + if (FLAGS_use_gpu) { + hl_init(FLAGS_gpu_id); + } + return kPD_NO_ERROR; +} } diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index 30f3a766f0c65187c8f2dd4603e3d26c9b9a6a3d..cbacd1fb71c14f490ff548db714e728772292b4b 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -40,7 +40,7 @@ paddle_error paddle_matrix_destroy(paddle_matrix mat) { paddle_error paddle_matrix_set_row(paddle_matrix mat, uint64_t rowID, paddle_real* rowArray) { - if (mat == nullptr) return kPD_NULLPTR; + if (mat == nullptr || rowArray == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); if (ptr->mat == nullptr) return kPD_NULLPTR; if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE; diff --git a/paddle/capi/config.h.in b/paddle/capi/config.h.in index d205307588eb60b2e11accb9f825391f7c1453f2..0ddbd8c753c55ab95a89e1781c64b9416f7344e7 100644 --- a/paddle/capi/config.h.in +++ b/paddle/capi/config.h.in @@ -3,6 +3,9 @@ typedef @PADDLE_FLOAT_TYPE@ paddle_real; +#define __PADDLE_VERSION__ "@PADDLE_VERSION@" +#define __PADDLE_COMMIT__ "@PADDLE_GIT_COMMIT@" + // Since we only support linux and macos in compile, always use clang or // gcc 4.8+. DLL_IMPORT/DLL_EXPORT is as simple as below. #define PD_API __attribute__((visibility("default"))) diff --git a/paddle/capi/error.cpp b/paddle/capi/error.cpp new file mode 100644 index 0000000000000000000000000000000000000000..169b65f92104336d9ec12e2a5a6778db25080270 --- /dev/null +++ b/paddle/capi/error.cpp @@ -0,0 +1,32 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "error.h" + +const char* paddle_error_string(paddle_error err) { + switch (err) { + case kPD_NULLPTR: + return "nullptr error"; + case kPD_OUT_OF_RANGE: + return "out of range error"; + case kPD_PROTOBUF_ERROR: + return "protobuf error"; + case kPD_NOT_SUPPORTED: + return "not supported error"; + case kPD_UNDEFINED_ERROR: + return "undefined error"; + default: + return ""; + } +} diff --git a/paddle/capi/error.h b/paddle/capi/error.h index 44d8c2040d1aad698398089baeee6f13c3deeb55..9d9d0ed63a5276c6b9a8747e1ee1fce6872bdc9e 100644 --- a/paddle/capi/error.h +++ b/paddle/capi/error.h @@ -15,6 +15,8 @@ limitations under the License. */ #ifndef __PADDLE_CAPI_ERROR_H__ #define __PADDLE_CAPI_ERROR_H__ +#include "config.h" + /** * Error Type for Paddle API. */ @@ -27,4 +29,9 @@ typedef enum { kPD_UNDEFINED_ERROR = -1, } paddle_error; +/** + * Error string for Paddle API. + */ +PD_API const char* paddle_error_string(paddle_error err); + #endif diff --git a/paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt b/paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt index 98e411ddc02a46034e8f6ceb00657622d998c9f3..2fc8debddedeab6ae982b0df49ec2b73bc0f85f5 100644 --- a/paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt +++ b/paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt @@ -1,8 +1,29 @@ project(multi_thread) cmake_minimum_required(VERSION 2.8) -aux_source_directory(. SRC_LIST) -add_executable(${PROJECT_NAME} ${SRC_LIST}) + find_package (Threads) + +if(NOT PADDLE_ROOT) + set(PADDLE_ROOT $ENV{PADDLE_ROOT} CACHE PATH "Paddle Path") +endif() +if(PADDLE_ROOT) + include_directories(${PADDLE_ROOT}/include) + link_directories(${PADDLE_ROOT}/lib) +endif() + +set(CPU_SRCS main.c) +add_executable(${PROJECT_NAME} ${CPU_SRCS}) set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99) -target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared - ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${PROJECT_NAME} + -lpaddle_capi_shared + ${CMAKE_THREAD_LIBS_INIT}) + +find_package(CUDA QUIET) +if(CUDA_FOUND) + set(GPU_SRCS main_gpu.c) + cuda_add_executable(${PROJECT_NAME}_gpu ${GPU_SRCS}) + set_property(TARGET ${PROJECT_NAME}_gpu PROPERTY C_STANDARD 99) + target_link_libraries(${PROJECT_NAME}_gpu + -lpaddle_capi_shared + ${CMAKE_THREAD_LIBS_INIT}) +endif(CUDA_FOUND) diff --git a/paddle/capi/examples/model_inference/multi_thread/main_gpu.c b/paddle/capi/examples/model_inference/multi_thread/main_gpu.c new file mode 100644 index 0000000000000000000000000000000000000000..6fd376e0d1a2fee4f9a0f676b53c6f2891795cab --- /dev/null +++ b/paddle/capi/examples/model_inference/multi_thread/main_gpu.c @@ -0,0 +1,113 @@ +#include +#include +#include +#include "../common/common.h" + +#define CONFIG_BIN "./trainer_config.bin" +#define NUM_THREAD 4 +#define NUM_ITER 1000 + +pthread_mutex_t mutex; + +/* + * @brief It is an simple inference example that runs multi-threads on a GPU. + * Each thread holds it own local gradient_machine but shares the same + * parameters. + * If you want to run on different GPUs, you need to launch + * multi-processes or set trainer_count > 1. + */ +void* thread_main(void* gm_ptr) { + // Initialize the thread environment of Paddle. + CHECK(paddle_init_thread()); + + paddle_gradient_machine machine = (paddle_gradient_machine)(gm_ptr); + // Create input arguments. + paddle_arguments in_args = paddle_arguments_create_none(); + // Create input matrix. + paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1, + /* size */ 784, + /* useGPU */ true); + // Create output arguments. + paddle_arguments out_args = paddle_arguments_create_none(); + // Create output matrix. + paddle_matrix prob = paddle_matrix_create_none(); + + // CPU buffer to cache the input and output. + paddle_real* cpu_input = (paddle_real*)malloc(784 * sizeof(paddle_real)); + paddle_real* cpu_output = (paddle_real*)malloc(10 * sizeof(paddle_real)); + for (int iter = 0; iter < NUM_ITER; ++iter) { + // There is only one input layer of this network. + CHECK(paddle_arguments_resize(in_args, 1)); + CHECK(paddle_arguments_set_value(in_args, 0, mat)); + + for (int i = 0; i < 784; ++i) { + cpu_input[i] = rand() / ((float)RAND_MAX); + } + CHECK(paddle_matrix_set_value(mat, cpu_input)); + + CHECK(paddle_gradient_machine_forward(machine, + in_args, + out_args, + /* isTrain */ false)); + + CHECK(paddle_arguments_get_value(out_args, 0, prob)); + CHECK(paddle_matrix_get_value(prob, cpu_output)); + + pthread_mutex_lock(&mutex); + printf("Prob: "); + for (int i = 0; i < 10; ++i) { + printf("%.2f ", cpu_output[i]); + } + printf("\n"); + pthread_mutex_unlock(&mutex); + } + + CHECK(paddle_matrix_destroy(prob)); + CHECK(paddle_arguments_destroy(out_args)); + CHECK(paddle_matrix_destroy(mat)); + CHECK(paddle_arguments_destroy(in_args)); + CHECK(paddle_gradient_machine_destroy(machine)); + + free(cpu_input); + free(cpu_output); + + return NULL; +} + +int main() { + // Initalize Paddle + char* argv[] = {"--use_gpu=True"}; + CHECK(paddle_init(1, (char**)argv)); + + // Reading config binary file. It is generated by `convert_protobin.sh` + long size; + void* buf = read_config(CONFIG_BIN, &size); + + // Create a gradient machine for inference. + paddle_gradient_machine machine; + CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size)); + CHECK(paddle_gradient_machine_randomize_param(machine)); + + // Loading parameter. Uncomment the following line and change the directory. + // CHECK(paddle_gradient_machine_load_parameter_from_disk(machine, + // "./some_where_to_params")); + srand(time(0)); + pthread_mutex_init(&mutex, NULL); + + pthread_t threads[NUM_THREAD]; + + for (int i = 0; i < NUM_THREAD; ++i) { + paddle_gradient_machine thread_local_machine; + CHECK(paddle_gradient_machine_create_shared_param( + machine, buf, size, &thread_local_machine)); + pthread_create(&threads[i], NULL, thread_main, thread_local_machine); + } + + for (int i = 0; i < NUM_THREAD; ++i) { + pthread_join(threads[i], NULL); + } + + pthread_mutex_destroy(&mutex); + + return 0; +} diff --git a/paddle/capi/main.h b/paddle/capi/main.h index 893ebcbd58dd24cf835fb2005865c94c9ba2a810..99c4e8428dbaa14d36dc2d36b2a4f16c9ec3e0d1 100644 --- a/paddle/capi/main.h +++ b/paddle/capi/main.h @@ -26,6 +26,13 @@ extern "C" { */ PD_API paddle_error paddle_init(int argc, char** argv); +/** + * Initialize the thread environment of Paddle. + * @note it is requisite for GPU runs but optional for CPU runs. + * For GPU runs, all threads will run on the same GPU devices. + */ +PD_API paddle_error paddle_init_thread(); + #ifdef __cplusplus } #endif diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h index 89c1f48edacbe0a4432957fe066481412db7e6e1..88418062927cd0f7714e992cc2495109da45d32f 100644 --- a/paddle/cuda/include/hl_cnn.h +++ b/paddle/cuda/include/hl_cnn.h @@ -116,6 +116,7 @@ extern void hl_maxpool_backward(const int frameCnt, * @param[in] paddingW padding width. * @param[out] tgtData output data. * @param[in] tgtStride stride between output data samples. + * @param[in] excludeMode whether to consider paddings for size. * */ extern void hl_avgpool_forward(const int frameCnt, @@ -132,7 +133,8 @@ extern void hl_avgpool_forward(const int frameCnt, const int paddingH, const int paddingW, real* tgtData, - const int tgtStride); + const int tgtStride, + bool excludeMode); /** * @brief Maximum pool backward. @@ -154,6 +156,7 @@ extern void hl_avgpool_forward(const int frameCnt, * @param[in] scaleB scale. * @param[out] backGrad output grad. * @param[in] outStride stride between output data samples. + * @param[in] excludeMode whether to consider paddings for size. * */ extern void hl_avgpool_backward(const int frameCnt, @@ -172,7 +175,8 @@ extern void hl_avgpool_backward(const int frameCnt, real scaleA, real scaleB, real* backGrad, - const int outStride); + const int outStride, + bool excludeMode); extern void hl_maxpool3D_forward(const int frameCnt, const real* inputData, diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/cuda/include/stub/hl_cnn_stub.h index 968ed4840ffb0623b57bd6e6d839973e109394de..706cc59a8e394b109d2b290425f4b5f51d987f28 100644 --- a/paddle/cuda/include/stub/hl_cnn_stub.h +++ b/paddle/cuda/include/stub/hl_cnn_stub.h @@ -68,7 +68,8 @@ inline void hl_avgpool_forward(const int frameCnt, const int paddingH, const int paddingW, real* tgtData, - const int tgtStride) {} + const int tgtStride, + const bool excludeMode) {} inline void hl_avgpool_backward(const int frameCnt, const real* outGrad, @@ -86,7 +87,8 @@ inline void hl_avgpool_backward(const int frameCnt, real scaleA, real scaleB, real* backGrad, - const int outStride) {} + const int outStride, + const bool excludeMode) {} inline void hl_maxpool3D_forward(const int frameCnt, const real* inputData, diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu index 3699b1e8ae9d8f813439eaeaa760c4a9f6e100a0..2d1bc4f6d55fac4b74f4e58d40fe56aa61d19cf9 100644 --- a/paddle/cuda/src/hl_cuda_cnn.cu +++ b/paddle/cuda/src/hl_cuda_cnn.cu @@ -210,7 +210,8 @@ __global__ void KeAvgPoolForward(const int nthreads, const int padH, const int padW, real* tgtData, - const int tgtStride) { + const int tgtStride, + const bool excludeMode) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { int pw = index % pooledW; @@ -224,7 +225,8 @@ __global__ void KeAvgPoolForward(const int nthreads, int wend = min(wstart + sizeX, width); hstart = max(hstart, 0); wstart = max(wstart, 0); - int pool_size = (hend - hstart) * (wend - wstart); + int poolSize = + excludeMode ? (hend - hstart) * (wend - wstart) : sizeY * sizeX; real aveval = 0; inputData += (frameNum * channels + c) * height * width; @@ -235,7 +237,7 @@ __global__ void KeAvgPoolForward(const int nthreads, } int tgtIndex = index % (pooledW * pooledH * channels) + frameNum * tgtStride; - tgtData[tgtIndex] = aveval / pool_size; + tgtData[tgtIndex] = aveval / poolSize; } } @@ -253,7 +255,8 @@ void hl_avgpool_forward(const int frameCnt, const int paddingH, const int paddingW, real* tgtData, - const int tgtStride) { + const int tgtStride, + const bool excludeMode) { int num_kernels = pooledH * pooledW * channels * frameCnt; int blocks = (num_kernels + 1024 - 1) / 1024; KeAvgPoolForward<<>>(num_kernels, @@ -270,7 +273,8 @@ void hl_avgpool_forward(const int frameCnt, paddingH, paddingW, tgtData, - tgtStride); + tgtStride, + excludeMode); CHECK_SYNC("hl_avgpool_forward failed"); } @@ -290,7 +294,8 @@ __global__ void KeAvgPoolBackward(const int nthreads, real scaleA, real scaleB, real* tgtGrad, - const int outStride) { + const int outStride, + const bool excludeMode) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { int offsetW = index % width + padW; @@ -314,8 +319,9 @@ __global__ void KeAvgPoolBackward(const int nthreads, int wstart = pw * strideW - padW; int wend = min(wstart + sizeX, width); wstart = max(wstart, 0); - int poolsize = (hend - hstart) * (wend - wstart); - gradient += outGrad[ph * pooledW + pw] / poolsize; + int poolSize = + excludeMode ? (hend - hstart) * (wend - wstart) : sizeY * sizeX; + gradient += outGrad[ph * pooledW + pw] / poolSize; } } tgtGrad[index] = scaleB * tgtGrad[index] + scaleA * gradient; @@ -338,7 +344,8 @@ void hl_avgpool_backward(const int frameCnt, real scaleA, real scaleB, real* backGrad, - const int outStride) { + const int outStride, + const bool excludeMode) { int num_kernels = height * width * channels * frameCnt; int blocks = (num_kernels + 1024 - 1) / 1024; @@ -358,7 +365,8 @@ void hl_avgpool_backward(const int frameCnt, scaleA, scaleB, backGrad, - outStride); + outStride, + excludeMode); CHECK_SYNC("hl_avgpool_backward failed"); } diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 8fd2906107c490eee129fc10262df28bfa67800b..a17036c6527da3a4a32f021a57542b6b6d68a395 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -27,6 +27,18 @@ namespace paddle { namespace framework { +static std::unordered_set* g_ctrl_flow_ops_ = nullptr; +// Control Flow operators's backward is significantly different from +// computational operators. Hack Code here. +// We should design a better way to backward CtrlFlowOps. +static std::unordered_set& CtrlFlowOps() { + if (g_ctrl_flow_ops_ == nullptr) { + g_ctrl_flow_ops_ = new std::unordered_set{ + "increment", "lod_rank_table", "less_than"}; + } + return *g_ctrl_flow_ops_; +} + static inline std::unique_ptr CreateGradOp( const OperatorBase& op, const std::unordered_set& no_grad_set, std::unordered_map* grad_to_var) { @@ -178,8 +190,9 @@ static std::unique_ptr BackwardRecursive( // collect all the offset for each alias, // insert a sum operator to add all aliases to output insert_position.push_back( - {dup_op.back(), OpRegistry::CreateOp("sum", {{"X", dup_outputs}}, - {{"Out", {name}}}, {})}); + {dup_op.back(), + OpRegistry::CreateOp("sum", {{"X", dup_outputs}}, {{"Out", {name}}}, + AttributeMap{})}); } // make sure the inserted `sum` ops follow the BFS order. @@ -204,7 +217,8 @@ static std::unique_ptr BackwardRecursive( // If part of input gradient of that operator is not calculated, fill // zero variables to that input gradient. net->AppendOp(OpRegistry::CreateOp("fill_zeros_like", {{"X", {prefix}}}, - {{"Y", {grad_input}}}, {})); + {{"Y", {grad_input}}}, + AttributeMap{})); } return false; }); @@ -288,12 +302,24 @@ static void CreateGradVarInBlock( for (size_t op_index = grad_op_start_index; op_index < ops.size(); ++op_index) { std::unordered_set new_vars; + auto& ctrl_flow_ops = CtrlFlowOps(); ForEachVarName(ops[op_index]->Outputs(), [&](const std::string& grad_var_name) { - if (block_desc->HasVar(grad_var_name)) { + if (ctrl_flow_ops.find(ops[op_index]->Type()) != + ctrl_flow_ops.end()) { + if (block_desc->HasVarRecursive(grad_var_name)) { + return false; + } + } else { + if (block_desc->HasVar(grad_var_name)) { + return false; + } + } + if (grad_var_name == framework::kEmptyVarName) { return false; } auto var = block_desc->Var(grad_var_name); + VLOG(10) << "Creating Variable " << grad_var_name; new_vars.insert(var->Name()); auto it = param_name_map.find(grad_var_name); if (it == param_name_map.end()) { @@ -333,14 +359,25 @@ std::vector> MakeOpGrad( // All input gradients of forwarding operator do not need to calculate. const std::vector& inputs = op_desc->InputArgumentNames(); if (AllGradInSet(inputs, *no_grad_vars)) { + VLOG(10) << "Drop operator " << op_desc->Type(); return grad_op_descs; // empty vector } + // All output gradients of forwarding operator do not need to calculate. const std::vector& outputs = op_desc->OutputArgumentNames(); + if (AllGradInSet(outputs, *no_grad_vars)) { - for (const std::string& name : inputs) { - no_grad_vars->insert(GradVarName(name)); + VLOG(10) << "Drop operator " << op_desc->Type(); + // FIXME: Hack code here + auto& ctrl_flow_ops = CtrlFlowOps(); + if (ctrl_flow_ops.find(op_desc->Type()) == ctrl_flow_ops.end()) { + // Only computational op need drop input's gradient. + for (const std::string& name : inputs) { + no_grad_vars->insert(GradVarName(name)); + VLOG(10) << " Also drop " << GradVarName(name); + } } + return grad_op_descs; // empty vector } @@ -357,8 +394,9 @@ std::vector> MakeOpGrad( 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); std::string new_name = prefix + kZeroVarSuffix; desc->Rename(in_name, new_name); - std::unique_ptr fill_zeros_op(new OpDescBind( - "fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {})); + std::unique_ptr fill_zeros_op( + new OpDescBind("fill_zeros_like", {{"X", {prefix}}}, + {{"Y", {new_name}}}, AttributeMap{})); pending_fill_zeros_ops.push_back(std::move(fill_zeros_op)); } } @@ -448,8 +486,9 @@ std::vector> MakeBlockBackward( sum_op_inputs.emplace_back(new_name); next_g_name = sum_op_inputs.back(); } - std::unique_ptr sum_op(new OpDescBind( - "sum", {{"X", sum_op_inputs}}, {{"Out", {out_name}}}, {})); + std::unique_ptr sum_op( + new OpDescBind("sum", {{"X", sum_op_inputs}}, {{"Out", {out_name}}}, + AttributeMap{})); pending_sum_ops.push_back({dup_op.back(), std::move(sum_op)}); } } diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 2b858f5ea0874d7bf1a9cf38529f5d0d70cca7f2..9fe49881d5b740655432f6e83a7886878ceb17e8 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -106,15 +106,15 @@ class FcOp : public operators::NetOp { FcOp(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const AttributeMap &attrs) : NetOp(type, inputs, outputs, attrs) { - AppendOp(OpRegistry::CreateOp("mul", - {{"X", {Input("X")}}, {"Y", {Input("W")}}}, - {{"Out", {Output("mul_result")}}}, {})); + AppendOp(OpRegistry::CreateOp( + "mul", {{"X", {Input("X")}}, {"Y", {Input("W")}}}, + {{"Out", {Output("mul_result")}}}, AttributeMap{})); auto input_b = Inputs("b"); std::string before_act = "mul_result"; if (input_b.size() != 0) { AppendOp(OpRegistry::CreateOp( "rowwise_add", {{"X", {Output("mul_result")}}, {"b", {input_b[0]}}}, - {{"Out", {Output("add_result")}}}, {})); + {{"Out", {Output("add_result")}}}, AttributeMap{})); before_act = "add_result"; } else { auto out_varname = Output("add_result"); @@ -124,7 +124,7 @@ class FcOp : public operators::NetOp { } AppendOp(OpRegistry::CreateOp("sigmoid", {{"X", {Output(before_act)}}}, - {{"Out", {Output("Out")}}}, {})); + {{"Out", {Output("Out")}}}, AttributeMap{})); CompleteAddOp(false); } }; @@ -278,8 +278,9 @@ REGISTER_OPERATOR(scale, f::NoneOp); REGISTER_OP_CPU_KERNEL(scale, f::NoneKernel); TEST(Backward, simple_op_not_need_grad) { - auto fwd = f::OpRegistry::CreateOp( - "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); + auto fwd = + f::OpRegistry::CreateOp("rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, + {{"Out", {"out"}}}, f::AttributeMap{}); ASSERT_NE(fwd, nullptr); auto gop = f::Backward(*fwd, {"x"}); ASSERT_EQ(gop->Output(f::GradVarName("X")), f::kEmptyVarName); @@ -296,9 +297,10 @@ TEST(Backward, net_fc_backward_normal) { {{"mul_result", {"mul_res"}}, {"add_result", {"add_re"}}, {"Out", {"out"}}}, - {}); + f::AttributeMap{}); ASSERT_NE(fwd, nullptr); - std::shared_ptr gop = f::Backward(*fwd, {}); + std::shared_ptr gop = + f::Backward(*fwd, std::unordered_set{}); ASSERT_TRUE(gop->IsNetOp()); auto net = static_cast(gop.get()); @@ -322,9 +324,10 @@ TEST(Backward, net_fc_backward_not_have_b) { {{"mul_result", {"mul_res"}}, {"add_result", {"add_res"}}, {"Out", {"tmp"}}}, - {}); + f::AttributeMap{}); ASSERT_NE(fwd, nullptr); - std::shared_ptr gop = f::Backward(*fwd, {}); + std::shared_ptr gop = + f::Backward(*fwd, std::unordered_set{}); ASSERT_TRUE(gop->IsNetOp()); auto net = static_cast(gop.get()); @@ -346,13 +349,13 @@ TEST(Backward, net_input_of_network_not_need_grad) { {{"mul_result", {"mul_tmp_0"}}, {"add_result", {"add_tmp_0"}}, {"Out", {"hidden0"}}}, - {})); + f::AttributeMap{})); net.AppendOp(f::OpRegistry::CreateOp( "fc", {{"X", {"hidden0"}}, {"W", {"W2"}}, {"b", {"b2"}}}, {{"mul_result", {"mul_tmp_1"}}, {"add_result", {"add_tmp_1"}}, {"Out", {"hidden1"}}}, - {})); + f::AttributeMap{})); net.CompleteAddOp(); auto bwd = Backward(net, {"x"}); // x@GRAD is not need. ASSERT_TRUE(bwd->IsNetOp()); @@ -381,12 +384,13 @@ TEST(Backward, net_input_of_network_not_need_grad) { TEST(Backward, net_shared_weight) { ops::NetOp net; net.AppendOp(f::OpRegistry::CreateOp("mul", {{"X", {"x"}}, {"Y", {"w"}}}, - {{"Out", {"out"}}}, {})); + {{"Out", {"out"}}}, f::AttributeMap{})); net.AppendOp(f::OpRegistry::CreateOp("mul", {{"X", {"out"}}, {"Y", {"w"}}}, - {{"Out", {"FinalOut"}}}, {})); + {{"Out", {"FinalOut"}}}, + f::AttributeMap{})); net.CompleteAddOp(); - auto bwd = f::Backward(net, {}); + auto bwd = f::Backward(net, std::unordered_set{}); ASSERT_TRUE(bwd->IsNetOp()); auto bwd_net = static_cast(bwd.get()); ASSERT_EQ(3UL, bwd_net->ops_.size()); @@ -394,8 +398,9 @@ TEST(Backward, net_shared_weight) { } TEST(Backward, op_all_input_are_not_need) { - auto fwd = f::OpRegistry::CreateOp( - "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); + auto fwd = + f::OpRegistry::CreateOp("rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, + {{"Out", {"out"}}}, f::AttributeMap{}); auto backward = f::Backward(*fwd, {"x", "b"}); ASSERT_TRUE(backward->IsNetOp()); auto net = static_cast(backward.get()); @@ -403,8 +408,9 @@ TEST(Backward, op_all_input_are_not_need) { } TEST(Backward, op_all_output_are_not_need) { - auto fwd = f::OpRegistry::CreateOp( - "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); + auto fwd = + f::OpRegistry::CreateOp("rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, + {{"Out", {"out"}}}, f::AttributeMap{}); auto backward = f::Backward(*fwd, {"out"}); ASSERT_TRUE(backward->IsNetOp()); auto net = static_cast(backward.get()); @@ -412,8 +418,9 @@ TEST(Backward, op_all_output_are_not_need) { } TEST(Backward, op_part_of_output_are_not_need) { - auto fwd = f::OpRegistry::CreateOp("many_output_op", {{"x", {"X"}}}, - {{"y", {"Y"}}, {"z", {"Z"}}}, {}); + auto fwd = + f::OpRegistry::CreateOp("many_output_op", {{"x", {"X"}}}, + {{"y", {"Y"}}, {"z", {"Z"}}}, f::AttributeMap{}); auto backward = f::Backward(*fwd, {"Z"}); ASSERT_TRUE(backward->IsNetOp()); auto net = static_cast(backward.get()); @@ -437,7 +444,7 @@ TEST(Backward, op_part_of_output_are_not_need) { TEST(Backward, op_part_of_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp("mul", {{"X", {"a"}}, {"Y", {"b"}}}, - {{"Out", {"out"}}}, {}); + {{"Out", {"out"}}}, f::AttributeMap{}); auto backward = f::Backward(*fwd, {"a"}); auto &grad_mul = *backward; ASSERT_EQ(grad_mul.Type(), "mul_grad"); @@ -458,19 +465,19 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { {{"mul_result", {"mul_out1"}}, {"add_result", {"add_out1"}}, {"Out", {"out1"}}}, - {})); + f::AttributeMap{})); net.AppendOp(f::OpRegistry::CreateOp( "fc", {{"X", {"out1"}}, {"W", {"w2"}}, {"b", {"b2"}}}, {{"mul_result", {"mul_out2"}}, {"add_result", {"tmp_out2"}}, {"Out", {"out2"}}}, - {})); + f::AttributeMap{})); net.AppendOp(f::OpRegistry::CreateOp( "fc", {{"X", {"out2"}}, {"W", {"w3"}}, {"b", {"b3"}}}, {{"mul_result", {"mul_out3"}}, {"add_result", {"tmp_out3"}}, {"Out", {"out3"}}}, - {})); + f::AttributeMap{})); net.CompleteAddOp(); auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"}); @@ -509,7 +516,8 @@ TEST(Backward, simple_single_op) { auto target = f::VarDescBind("out"); target.SetShape({1}); - auto var_to_grad = AppendBackward(program, target, {}); + auto var_to_grad = + AppendBackward(program, target, std::unordered_set{}); ASSERT_EQ(block->AllOps().size(), 3UL); f::OpDescBind *fill_op = block->AllOps()[1]; @@ -546,7 +554,7 @@ TEST(Backward, default_attribute) { auto target = f::VarDescBind("out"); target.SetShape({1}); - AppendBackward(program, target, {}); + AppendBackward(program, target, std::unordered_set{}); ASSERT_EQ(block->AllOps().size(), 3UL); EXPECT_EQ(boost::get(op->GetAttr("x_num_col_dims")), 1); @@ -585,7 +593,8 @@ TEST(Backward, simple_mult_op) { auto target = f::VarDescBind("out3"); target.SetShape({1}); size_t forward_len = block->AllOps().size(); - auto var_to_grad = AppendBackward(program, target, {}); + auto var_to_grad = + AppendBackward(program, target, std::unordered_set{}); ASSERT_EQ(block->AllOps().size(), 6UL + 1); f::OpDescBind *fill_op = block->AllOps()[forward_len]; @@ -817,7 +826,8 @@ TEST(Backward, shared_var) { auto target = f::VarDescBind("out3"); target.SetShape({1}); size_t forward_len = block->AllOps().size(); - auto var_to_grad = AppendBackward(program, target, {}); + auto var_to_grad = + AppendBackward(program, target, std::unordered_set{}); ASSERT_EQ(block->AllOps().size(), 8UL); f::OpDescBind *fill_op = block->AllOps()[forward_len]; diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 11764810e1d40e5e6eb3cd0d8e9b4b63a79855b4..6a7a07d5cf471a32822cdccf5c616d8748fd1bd7 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/block_desc.h" +#include "paddle/framework/operator.h" #include "paddle/framework/program_desc.h" namespace paddle { @@ -42,6 +43,8 @@ bool BlockDescBind::HasVar(const std::string &name) const { } VarDescBind *BlockDescBind::FindVarRecursive(const std::string &name) const { + if (name == kEmptyVarName) return nullptr; + auto it = vars_.find(name); if (it == vars_.end()) { return Parent() == kNoneBlockIndex ? nullptr diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 2ffb5b7dbb27b561092856eac0de23d0c3788f75..83aa927c293676c3800ed945c175e4f3dc5629d6 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -97,6 +97,10 @@ void Executor::Run(const ProgramDescBind& pdesc, Scope* scope, int block_id, if (create_local_scope) { local_scope = &scope->NewScope(); for (auto& var : block.AllVars()) { + if (var->Name() == framework::kEmptyVarName) { + continue; + } + if (var->Persistable()) { auto* ptr = scope->Var(var->Name()); CreateTensor(ptr, var->GetType()); diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc index a0f2906c749054c1ff9f624e47df432ec2bd6ac8..fdf6de4babff3bb3c253aaf516636882237e6faf 100644 --- a/paddle/framework/lod_tensor.cc +++ b/paddle/framework/lod_tensor.cc @@ -13,6 +13,8 @@ limitations under the License. */ #include "paddle/framework/lod_tensor.h" +#include "paddle/framework/data_type.h" +#include "paddle/framework/framework.pb.h" #include "paddle/memory/memcpy.h" #include "paddle/memory/memory.h" @@ -27,11 +29,11 @@ namespace paddle { namespace framework { -std::ostream& operator<<(std::ostream& os, const LoD& lod) { +std::ostream &operator<<(std::ostream &os, const LoD &lod) { os << "{"; - for (auto& v : lod) { + for (auto &v : lod) { os << "{"; - for (auto& i : v) { + for (auto &i : v) { os << i << ","; } os << "}"; @@ -41,7 +43,7 @@ std::ostream& operator<<(std::ostream& os, const LoD& lod) { return os; } -LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end) { +LoD SliceLevels(const LoD &in, size_t level_begin, size_t level_end) { LoD new_lod; new_lod.reserve(level_end - level_begin); for (size_t i = level_begin; i < level_end; i++) { @@ -53,7 +55,7 @@ LoD SliceLevels(const LoD& in, size_t level_begin, size_t level_end) { return new_lod; } -LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin, +LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin, size_t elem_end) { PADDLE_ENFORCE_LT(level, in.size()); PADDLE_ENFORCE_LT(elem_end, in[level].size()); @@ -64,9 +66,9 @@ LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin, res[0].assign(in[level].begin() + elem_begin, in[level].begin() + elem_end + 1); for (size_t lvl = 1; lvl < res.size(); lvl++) { - const auto& in_level = in[level + lvl]; - const auto& above_level = res[lvl - 1]; - auto& out_level = res[lvl]; + const auto &in_level = in[level + lvl]; + const auto &above_level = res[lvl - 1]; + auto &out_level = res[lvl]; out_level.assign(in_level.begin() + above_level.front(), in_level.begin() + above_level.back() + 1); } @@ -74,33 +76,33 @@ LoD SliceInLevel(const LoD& in, size_t level, size_t elem_begin, // to make the first offset equals 0, all the elements minus the first // element size_t front = res[lvl].front(); - for (auto& ele : res[lvl]) { + for (auto &ele : res[lvl]) { ele -= front; } } return res; } -LoD ToAbsOffset(const LoD& in) { +LoD ToAbsOffset(const LoD &in) { // the lowest level stores relative offsets if (in.empty() || in.size() == 1) return in; LoD result = in; for (int level = result.size() - 2; level >= 0; level--) { - for (auto& ele : result[level]) { + for (auto &ele : result[level]) { ele = result[level + 1][ele]; } } return result; } -bool operator==(const LoD& a, const LoD& b) { +bool operator==(const LoD &a, const LoD &b) { if (a.size() != b.size()) { return false; } for (size_t i = 0; i < a.size(); i++) { - const auto& a_level = a[i]; - const auto& b_level = b[i]; + const auto &a_level = a[i]; + const auto &b_level = b[i]; if (a_level.size() != b_level.size()) { return false; } @@ -151,7 +153,7 @@ void LoDTensor::ShrinkInLevel(size_t level, size_t elem_begin, } using LoDAndOffset = std::pair>; -LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD& lod, size_t start_idx, +LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD &lod, size_t start_idx, size_t end_idx, size_t start_level) { LoD sub_lod; @@ -170,7 +172,7 @@ LoDAndOffset GetSubLoDAndAbsoluteOffset(const LoD& lod, size_t start_idx, return LoDAndOffset{sub_lod, {start_idx, end_idx}}; } -void AppendLoD(LoD* lod, const LoD& lod_length) { +void AppendLoD(LoD *lod, const LoD &lod_length) { PADDLE_ENFORCE( lod->empty() || lod->size() == lod_length.size(), "The lod_length should has the same size with the appended lod."); @@ -178,12 +180,139 @@ void AppendLoD(LoD* lod, const LoD& lod_length) { *lod = LoD(lod_length.size(), std::vector({0})); } for (size_t i = 0; i < lod->size(); ++i) { - auto& level = (*lod)[i]; + auto &level = (*lod)[i]; for (size_t len : lod_length[i]) { level.push_back(level.back() + len); } } } +void SerializeToStream(std::ostream &os, const LoDTensor &tensor, + const platform::DeviceContext &dev_ctx) { + // TODO(typhoonzero): serialize to ostream + { // the 1st field, uint32_t version + constexpr uint32_t version = 0; + os.write(reinterpret_cast(&version), sizeof(version)); + } + { // the 2nd field, tensor description + // int32_t size + // void* protobuf message + framework::TensorDesc desc; + desc.set_data_type(framework::ToDataType(tensor.type())); + auto dims = framework::vectorize(tensor.dims()); + auto *pb_dims = desc.mutable_dims(); + pb_dims->Resize(static_cast(dims.size()), 0); + std::copy(dims.begin(), dims.end(), pb_dims->begin()); + int32_t size = desc.ByteSize(); + os.write(reinterpret_cast(&size), sizeof(size)); + auto out = desc.SerializeAsString(); + os.write(out.data(), size); + } + { // the 3rd field, tensor data + uint64_t size = tensor.memory_size(); + auto *data_ptr = tensor.data(); + PADDLE_ENFORCE(size < std::numeric_limits::max(), + "Index overflow when writing tensor"); + if (platform::is_gpu_place(tensor.place())) { +#ifdef PADDLE_WITH_CUDA + constexpr size_t kBufSize = 1024 * 1024 * 64; // 64MB + std::unique_ptr buf(new char[kBufSize]); + auto &gpu_dev_ctx = + static_cast(dev_ctx); + platform::CPUPlace cpu; + uintptr_t data = reinterpret_cast(data_ptr); + while (size != 0) { + size_t size_to_write = std::min(kBufSize, static_cast(size)); + memory::Copy(cpu, buf.get(), + boost::get(tensor.place()), + reinterpret_cast(data), size_to_write, + gpu_dev_ctx.stream()); + gpu_dev_ctx.Wait(); + os.write(buf.get(), size_to_write); + data += size_to_write; + size -= size_to_write; + } +#else + PADDLE_THROW("Unexpected branch"); +#endif + } else { + os.write(static_cast(data_ptr), + static_cast(size)); + } + } + { // the 4th field, lod information + // uint64_t lod_level + // uint64_t lod_level_1 size in byte. + // int* lod_level_1 data + // ... + auto lod = tensor.lod(); + uint64_t size = lod.size(); + os.write(reinterpret_cast(&size), sizeof(size)); + + for (auto &each : lod) { + size = each.size() * sizeof(framework::LoD::value_type::value_type); + os.write(reinterpret_cast(&size), sizeof(size)); + os.write(reinterpret_cast(each.data()), + static_cast(size)); + } + } +} + +void DeserializeFromStream(std::istream &is, LoDTensor *tensor) { + uint32_t version; + is.read(reinterpret_cast(&version), sizeof(version)); + PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported"); + framework::TensorDesc desc; + { // int32_t size + // proto buffer + int32_t size; + is.read(reinterpret_cast(&size), sizeof(size)); + std::unique_ptr buf(new char[size]); + is.read(reinterpret_cast(buf.get()), size); + PADDLE_ENFORCE(desc.ParseFromArray(buf.get(), size), + "Cannot parse tensor desc"); + } + { // read tensor + std::vector dims; + dims.reserve(static_cast(desc.dims().size())); + std::copy(desc.dims().begin(), desc.dims().end(), std::back_inserter(dims)); + tensor->Resize(framework::make_ddim(dims)); + + void *buf; + platform::Place cpu = platform::CPUPlace(); + switch (desc.data_type()) { + case framework::FP32: + buf = tensor->mutable_data(cpu); + break; + case framework::FP64: + buf = tensor->mutable_data(cpu); + break; + case framework::INT32: + buf = tensor->mutable_data(cpu); + break; + case framework::INT64: + buf = tensor->mutable_data(cpu); + break; + default: + PADDLE_THROW("DataType %d not supported", desc.data_type()); + } + is.read(static_cast(buf), tensor->memory_size()); + } + { // read lod + uint64_t lod_level; + is.read(reinterpret_cast(&lod_level), sizeof(lod_level)); + auto &lod = *tensor->mutable_lod(); + lod.resize(lod_level); + for (uint64_t i = 0; i < lod_level; ++i) { + uint64_t size; + is.read(reinterpret_cast(&size), sizeof(size)); + std::vector tmp(size / sizeof(size_t)); + is.read(reinterpret_cast(tmp.data()), + static_cast(size)); + lod[i] = tmp; + } + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 21bdfca1111f16d5b8ea71be004ddb8da12fd03c..9411c96aea4c10ebf921cc3e3b442769c8acbefa 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -189,5 +189,14 @@ std::pair> GetSubLoDAndAbsoluteOffset( void AppendLoD(LoD* lod, const LoD& lod_length); +/* + * Serialize/Desiralize LoDTensor to std::ostream + * You can pass ofstream or ostringstream to serilize to file + * or to a in memory string. GPU tensor will be copied to CPU. + */ +void SerializeToStream(std::ostream& os, const LoDTensor& tensor, + const platform::DeviceContext& dev_ctx); +void DeserializeFromStream(std::istream& is, LoDTensor* tensor); + } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 48cd131550dea5ad3f368b25c31d753efbe0dff9..7ba1e3e4e3270f4cd88e41e245f24c3cfc8aaab7 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -59,13 +59,13 @@ class CompileTimeInferShapeContext : public InferShapeContext { auto *in_var = block_.FindVarRecursive(Inputs(in)[i]); auto *out_var = block_.FindVarRecursive(Outputs(out)[j]); if (in_var->GetType() != VarDesc::LOD_TENSOR) { - VLOG(3) << "input " << in << "is not LodTensor"; + VLOG(3) << "input " << in << " is not LodTensor"; return; } PADDLE_ENFORCE_EQ(in_var->GetType(), VarDesc::LOD_TENSOR, "The %d-th output of Output(%s) must be LoDTensor.", j, out); - in_var->SetLoDLevel(out_var->GetLodLevel()); + out_var->SetLoDLevel(in_var->GetLodLevel()); } bool IsRuntime() const override; @@ -316,8 +316,8 @@ static void InitInferShapeFuncs() { for (auto &kern_pair : OperatorWithKernel::AllOpKernels()) { auto op_type = kern_pair.first; auto &op_info = info_map.at(op_type); - auto op = - static_cast(op_info.Creator()("", {}, {}, {})); + auto op = static_cast(op_info.Creator()( + "", VariableNameMap{}, VariableNameMap{}, AttributeMap{})); if (op_info.infer_shape_) { // infer_shape has been registered. continue; } @@ -466,7 +466,12 @@ DDim CompileTimeInferShapeContext::GetDim(const std::string &name) const { auto var = block_.FindVarRecursive(name); PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s", name); try { - return framework::make_ddim(var->Shape()); + auto shape = var->Shape(); + if (shape.empty()) { + return framework::make_ddim({0UL}); + } else { + return framework::make_ddim(var->Shape()); + } } catch (...) { VLOG(5) << "GetDim of variable " << name << " error"; std::rethrow_exception(std::current_exception()); diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index daade439e5232f06be72bc5bb1e2285124f2c3a4..b29238432b05d81e984e1f4c269a00b01a4229cc 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -181,8 +181,8 @@ class OpKernelRegistrar : public Registrar { return 0; \ } -#define REGISTER_OP_GPU_KERNEL(op_type, ...) \ - REGISTER_OP_KERNEL(op_type, GPU, ::paddle::platform::GPUPlace, __VA_ARGS__) +#define REGISTER_OP_CUDA_KERNEL(op_type, ...) \ + REGISTER_OP_KERNEL(op_type, CUDA, ::paddle::platform::GPUPlace, __VA_ARGS__) #define REGISTER_OP_CPU_KERNEL(op_type, ...) \ REGISTER_OP_KERNEL(op_type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__) @@ -217,7 +217,7 @@ class OpKernelRegistrar : public Registrar { #else #define USE_OP_KERNEL(op_type) \ USE_OP_DEVICE_KERNEL(op_type, CPU); \ - USE_OP_DEVICE_KERNEL(op_type, GPU) + USE_OP_DEVICE_KERNEL(op_type, CUDA) #endif #define USE_NO_KERNEL_OP(op_type) USE_OP_ITSELF(op_type); @@ -226,9 +226,9 @@ class OpKernelRegistrar : public Registrar { USE_OP_ITSELF(op_type); \ USE_OP_DEVICE_KERNEL(op_type, CPU); -#define USE_GPU_ONLY_OP(op_type) \ - USE_OP_ITSELF(op_type); \ - USE_OP_DEVICE_KERNEL(op_type, GPU) +#define USE_CUDA_ONLY_OP(op_type) \ + USE_OP_ITSELF(op_type); \ + USE_OP_DEVICE_KERNEL(op_type, CUDA) #define USE_OP(op_type) \ USE_OP_ITSELF(op_type); \ diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 93467ab8ac796277b47a861a427de2837fb2d3d4..e83d7547831744333d6a9c36e842d840a2a0dc03 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -22,20 +22,6 @@ limitations under the License. */ namespace paddle { namespace framework { -template <> -Eigen::DefaultDevice& ExecutionContext::GetEigenDevice< - platform::CPUPlace, Eigen::DefaultDevice>() const { - return *device_context_.GetEigenDevice(); -} - -#ifdef PADDLE_WITH_CUDA -template <> -Eigen::GpuDevice& -ExecutionContext::GetEigenDevice() const { - return *device_context_.GetEigenDevice(); -} -#endif - std::string OperatorBase::Input(const std::string& name) const { auto& ins = Inputs(name); PADDLE_ENFORCE_LE(ins.size(), 1UL, @@ -426,13 +412,10 @@ void OperatorWithKernel::Run(const Scope& scope, } kernel_iter->second->Compute(ctx); - - // throws errors if have. - dev_ctx.Finish(); } OpKernelType OperatorWithKernel::GetKernelType( const ExecutionContext& ctx) const { - return OpKernelType(IndicateDataType(ctx), ctx.device_context()); + return OpKernelType(IndicateDataType(ctx), ctx.GetPlace()); } DataType OperatorWithKernel::IndicateDataType( const ExecutionContext& ctx) const { diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 60861d92933dd100f877bec8d43f9b924f951e60..e60dbfc313f732120f6879fd6fd19ca8abc06813 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -276,17 +276,25 @@ class ExecutionContext { out_tensor->set_lod(in_tensor.lod()); } - template ::EigenDeviceType> - DeviceType& GetEigenDevice() const; - platform::Place GetPlace() const { return device_context_.GetPlace(); } + template + const DeviceContextType& device_context() const { + return *reinterpret_cast(&device_context_); + } + const platform::DeviceContext& device_context() const { return device_context_; } +#ifdef PADDLE_WITH_CUDA + const inline platform::CUDADeviceContext& cuda_device_context() const { + PADDLE_ENFORCE(platform::is_gpu_place(device_context_.GetPlace())); + return *reinterpret_cast( + &device_context_); + } +#endif + //! Get actual name vector for this input. const std::vector& Inputs(const std::string& name) const { return op_.Inputs(name); @@ -297,14 +305,6 @@ class ExecutionContext { return op_.Outputs(name); } -#ifdef PADDLE_WITH_CUDA - const inline platform::CUDADeviceContext& cuda_device_context() const { - PADDLE_ENFORCE(platform::is_gpu_place(device_context_.GetPlace())); - return *reinterpret_cast( - &device_context_); - } -#endif - private: const OperatorBase& op_; const Scope& scope_; diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 1e19f82b341768142258ba4a5dfa246d87ba4c43..b678178454ff63e4217f0be7a9938a9ba183cda4 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -115,7 +115,7 @@ class OpWithKernelTest : public OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override {} OpKernelType GetKernelType(const ExecutionContext& ctx) const override { - return OpKernelType(DataType::FP32, ctx.device_context()); + return OpKernelType(DataType::FP32, ctx.GetPlace()); } }; @@ -261,7 +261,9 @@ class OperatorClone : public paddle::framework::OperatorBase { }; TEST(Operator, Clone) { - OperatorClone a("ABC", {}, {}, {}); + OperatorClone a("ABC", paddle::framework::VariableNameMap{}, + paddle::framework::VariableNameMap{}, + paddle::framework::AttributeMap{}); auto b = a.Clone(); ASSERT_EQ(a.Type(), b->Type()); } diff --git a/paddle/framework/prune_test.cc b/paddle/framework/prune_test.cc index 5988874809f51c09b3d3d279be6c1e8d43d7a782..f21df37a292fd1e039ee8f8fa26244e26c978cae 100644 --- a/paddle/framework/prune_test.cc +++ b/paddle/framework/prune_test.cc @@ -54,7 +54,8 @@ TEST(Prune, one_operator) { f::ProgramDescBind program; f::BlockDescBind *block = program.MutableBlock(0); - AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, {}, block); + AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, f::AttributeMap{}, + block); f::ProgramDesc *pdesc = program.Proto(); f::ProgramDesc pruned; @@ -71,10 +72,14 @@ TEST(Prune, forward) { f::ProgramDescBind program; f::BlockDescBind *block = program.MutableBlock(0); - AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, {}, block); - AddOp("one_one", {{"input", {"b"}}}, {{"output", {"c"}}}, {}, block); - AddOp("one_one", {{"input", {"c"}}}, {{"output", {"d"}}}, {}, block); - AddOp("one_one", {{"input", {"d"}}}, {{"output", {"e"}}}, {}, block); + AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, f::AttributeMap{}, + block); + AddOp("one_one", {{"input", {"b"}}}, {{"output", {"c"}}}, f::AttributeMap{}, + block); + AddOp("one_one", {{"input", {"c"}}}, {{"output", {"d"}}}, f::AttributeMap{}, + block); + AddOp("one_one", {{"input", {"d"}}}, {{"output", {"e"}}}, f::AttributeMap{}, + block); f::ProgramDesc *pdesc = program.Proto(); @@ -90,11 +95,14 @@ TEST(Prune, multi_input_op) { f::ProgramDescBind program; f::BlockDescBind *block = program.MutableBlock(0); - AddOp("one_one", {{"input", {"a0"}}}, {{"output", {"b0"}}}, {}, block); - AddOp("one_one", {{"input", {"a1"}}}, {{"output", {"b1"}}}, {}, block); - AddOp("one_one", {{"input", {"a2"}}}, {{"output", {"b2"}}}, {}, block); - AddOp("three_one", {{"input", {"b0", "b1", "b2"}}}, {{"output", {"c"}}}, {}, + AddOp("one_one", {{"input", {"a0"}}}, {{"output", {"b0"}}}, f::AttributeMap{}, + block); + AddOp("one_one", {{"input", {"a1"}}}, {{"output", {"b1"}}}, f::AttributeMap{}, block); + AddOp("one_one", {{"input", {"a2"}}}, {{"output", {"b2"}}}, f::AttributeMap{}, + block); + AddOp("three_one", {{"input", {"b0", "b1", "b2"}}}, {{"output", {"c"}}}, + f::AttributeMap{}, block); f::ProgramDesc *pdesc = program.Proto(); pdesc->mutable_blocks(0)->mutable_ops(3)->set_is_target(true); @@ -108,9 +116,12 @@ TEST(Prune, multi_output_op) { f::ProgramDescBind program; f::BlockDescBind *block = program.MutableBlock(0); - AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, {}, block); - AddOp("one_one", {{"input", {"b"}}}, {{"output", {"b1"}}}, {}, block); - AddOp("one_one", {{"input", {"c"}}}, {{"output", {"c1"}}}, {}, block); + AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, + f::AttributeMap{}, block); + AddOp("one_one", {{"input", {"b"}}}, {{"output", {"b1"}}}, f::AttributeMap{}, + block); + AddOp("one_one", {{"input", {"c"}}}, {{"output", {"c1"}}}, f::AttributeMap{}, + block); f::ProgramDesc *pdesc = program.Proto(); pdesc->mutable_blocks(0)->mutable_ops(2)->set_is_target(true); @@ -124,9 +135,12 @@ TEST(Prune, multi_target) { f::ProgramDescBind program; f::BlockDescBind *block = program.MutableBlock(0); - AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, {}, block); - AddOp("one_one", {{"input", {"b"}}}, {{"output", {"b1"}}}, {}, block); - AddOp("one_one", {{"input", {"c"}}}, {{"output", {"c1"}}}, {}, block); + AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, + f::AttributeMap{}, block); + AddOp("one_one", {{"input", {"b"}}}, {{"output", {"b1"}}}, f::AttributeMap{}, + block); + AddOp("one_one", {{"input", {"c"}}}, {{"output", {"c1"}}}, f::AttributeMap{}, + block); f::ProgramDesc *pdesc = program.Proto(); pdesc->mutable_blocks(0)->mutable_ops(1)->set_is_target(true); diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index 9ad6272c99dd6a85520ae44c1331ac232bc6a9a2..656736e23846c8de50553a608c54a0bdd3272cb1 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -36,12 +36,9 @@ Scope& Scope::NewScope() const { } Variable* Scope::Var(const std::string& name) { - auto iter = vars_.find(name); - if (iter != vars_.end()) { - VLOG(3) << "Get existing variable " << name; - return iter->second; - } - Variable* v = new Variable(); + auto* v = FindVarLocally(name); + if (v != nullptr) return v; + v = new Variable(); vars_[name] = v; VLOG(3) << "Create variable " << name; v->name_ = &(vars_.find(name)->first); @@ -57,8 +54,10 @@ Variable* Scope::Var(std::string* name) { } Variable* Scope::FindVar(const std::string& name) const { - auto it = vars_.find(name); - if (it != vars_.end()) return it->second; + auto var = FindVarLocally(name); + if (var != nullptr) { + return var; + } return (parent_ == nullptr) ? nullptr : parent_->FindVar(name); } @@ -116,6 +115,11 @@ std::string Scope::Rename(const std::string& origin_name) const { Rename(origin_name, var_name); return var_name; } +Variable* Scope::FindVarLocally(const std::string& name) const { + auto it = vars_.find(name); + if (it != vars_.end()) return it->second; + return nullptr; +} } // namespace framework } // namespace paddle diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index c2aafb6ad825f9bd9ffef754923a15afdeaa8e5c..56e815db54b6385c4e4d87f456ed5d59113ca77b 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -76,6 +76,8 @@ class Scope { std::string Rename(const std::string& origin_name) const; private: + Variable* FindVarLocally(const std::string& name) const; + // Call Scope::NewScope for a sub-scope. explicit Scope(Scope const* parent) : parent_(parent) {} diff --git a/paddle/framework/shape_inference.cc b/paddle/framework/shape_inference.cc index 0af41b164f5894db17b2f86d4eba371cf05e3b41..7dac1cfd5ee0c320c67bc0b2448417d258d6862b 100644 --- a/paddle/framework/shape_inference.cc +++ b/paddle/framework/shape_inference.cc @@ -12,6 +12,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/shape_inference.h" +#include "grad_op_desc_maker.h" +#include "paddle/framework/operator.h" namespace paddle { namespace framework { @@ -22,6 +24,12 @@ std::vector InferShapeContext::GetInputsDim( return GetDims(names); } +DDim InferShapeContext::GetInputsElementDim(const std::string &name, + int idx) const { + const std::vector &names = Inputs(name); + return this->GetDim(names[idx]); +} + void InferShapeContext::SetOutputsDim( const std::string &name, const std::vector &dims) { auto &names = Outputs(name); @@ -43,6 +51,9 @@ void InferShapeContext::SetDims(const std::vector &names, size_t length = names.size(); PADDLE_ENFORCE_EQ(length, dims.size()); for (size_t i = 0; i < length; ++i) { + if (names[i] == framework::kEmptyVarName) { + continue; + } SetDim(names[i], dims[i]); } } diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index 05dc47f06ac81f0acb6d0317cbecb3009c7dd7f0..46f2ea84b4b64292cc9026ef9864621efba79c7a 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -37,6 +37,7 @@ class InferShapeContext { virtual framework::DDim GetInputDim(const std::string &name) const = 0; std::vector GetInputsDim(const std::string &name) const; + DDim GetInputsElementDim(const std::string &name, int idx) const; virtual void SetOutputDim(const std::string &name, const DDim &dim) = 0; void SetOutputsDim(const std::string &name, diff --git a/paddle/function/EigenGemm.cpp b/paddle/function/EigenGemm.cpp index b3e666e860d29d89650d48a23cf44917035a02d7..644098a9e7873fb59b6343e805163e4892f060a8 100644 --- a/paddle/function/EigenGemm.cpp +++ b/paddle/function/EigenGemm.cpp @@ -21,7 +21,7 @@ template struct EigenBlasGemm { typedef Eigen::TensorMap, Eigen::Aligned> - Matrix; + EigenMatrix; static void compute(const bool transA, const bool transB, @@ -56,14 +56,13 @@ struct EigenBlasGemm { sizeB[1] = N; CHECK_EQ(N, ldb); } - Eigen::array sizeC; - sizeC[0] = M; - sizeC[1] = N; - CHECK_EQ(N, ldc); + Eigen::array sizeC = {{M, ldc}}; + Eigen::array offsetC = {{0, 0}}; + Eigen::array extentC = {{M, N}}; - const Matrix a(const_cast(A), sizeA); - const Matrix b(const_cast(B), sizeB); - Matrix c(C, sizeC); + const EigenMatrix a(const_cast(A), sizeA); + const EigenMatrix b(const_cast(B), sizeB); + EigenMatrix c(C, sizeC); typedef typename Eigen::Tensor::DimensionPair DimPair; Eigen::array dims; @@ -72,12 +71,23 @@ struct EigenBlasGemm { dims[0].second = transB ? 1 : 0; Eigen::DefaultDevice device; - if (alpha == T(1) && beta == T(0)) { - c.device(device) = a.contract(b, dims); - } else if (alpha == T(1) && beta == T(1)) { - c.device(device) += a.contract(b, dims); + if (N == ldc) { + if (alpha == T(1) && beta == T(0)) { + c.device(device) = a.contract(b, dims); + } else if (alpha == T(1) && beta == T(1)) { + c.device(device) += a.contract(b, dims); + } else { + c.device(device) = alpha * a.contract(b, dims) + beta * c; + } } else { - c.device(device) = alpha * a.contract(b, dims) + beta * c; + if (alpha == T(1) && beta == T(0)) { + c.slice(offsetC, extentC).device(device) = a.contract(b, dims); + } else if (alpha == T(1) && beta == T(1)) { + c.slice(offsetC, extentC).device(device) += a.contract(b, dims); + } else { + c.slice(offsetC, extentC).device(device) = + alpha * a.contract(b, dims) + beta * c.slice(offsetC, extentC); + } } } }; diff --git a/paddle/gserver/activations/ActivationFunction.cpp b/paddle/gserver/activations/ActivationFunction.cpp index f5a41b66bf09a4abc5ae7b64f227ca52461408f5..57c890e4884da38e2087d89dc199e20af51495ea 100644 --- a/paddle/gserver/activations/ActivationFunction.cpp +++ b/paddle/gserver/activations/ActivationFunction.cpp @@ -24,7 +24,7 @@ limitations under the License. */ #include "paddle/utils/ClassRegistrar.h" #include "paddle/utils/Logging.h" -#ifdef PADDLE_USE_MKLDNN +#ifdef PADDLE_WITH_MKLDNN #include "MKLDNNActivation.h" #endif @@ -490,7 +490,7 @@ Error __must_check backward(Argument& act) { END_DEFINE_ACTIVATION(log) ActivationFunction* ActivationFunction::create(const std::string& type) { -#ifdef PADDLE_USE_MKLDNN +#ifdef PADDLE_WITH_MKLDNN if (!type.empty() && type.compare(0, 7, "mkldnn_") == 0) { return MKLDNNActivation::create(type); } diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index be112b41239cace3fa9b9ee97923f8c3c7a9a98f..68bf37d59db65ddc8096e2db3391be25c37b57e6 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" -#ifdef PADDLE_USE_MKLDNN +#ifdef PADDLE_WITH_MKLDNN #include "paddle/gserver/layers/MKLDNNLayer.h" #endif @@ -307,7 +307,7 @@ void NeuralNetwork::backward(const UpdateCallback& callback) { } void NeuralNetwork::finish() { -#ifdef PADDLE_USE_MKLDNN +#ifdef PADDLE_WITH_MKLDNN FOR_EACH_R(layer, layers_) { MKLDNNLayerPtr dnnLayer = std::dynamic_pointer_cast(*layer); if (dnnLayer) { diff --git a/paddle/gserver/layers/ConvTransProjection.cpp b/paddle/gserver/layers/ConvTransProjection.cpp index 48132a3ce4cc4b50fea6d755d84d7254d2055bec..e7f081c0232d185c223fc2f48ca79dc84c7f721d 100644 --- a/paddle/gserver/layers/ConvTransProjection.cpp +++ b/paddle/gserver/layers/ConvTransProjection.cpp @@ -24,13 +24,13 @@ size_t ConvTransProjection::calOutputSize() { if (outputH_ == 0) outputH_ = configOutH_; if (outputW_ == 0) outputW_ = configOutW_; imageH_ = imageSize(outputH_, - filterH_, + (filterH_ - 1) * dilationH_ + 1, paddingH_, strideH_, /* caffeMode */ true); imageW_ = imageSize(outputW_, - filterW_, + (filterW_ - 1) * dilationW_ + 1, paddingW_, strideW_, /* caffeMode */ true); diff --git a/paddle/gserver/layers/FactorizationMachineLayer.cpp b/paddle/gserver/layers/FactorizationMachineLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..be26b9ba88c279036f73b0a0baaff164755fe067 --- /dev/null +++ b/paddle/gserver/layers/FactorizationMachineLayer.cpp @@ -0,0 +1,158 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "FactorizationMachineLayer.h" +#include +#include +#include "paddle/math/SparseMatrix.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/Stat.h" + +namespace paddle { + +REGISTER_LAYER(factorization_machine, FactorizationMachineLayer); + +bool FactorizationMachineLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* Initialize the basic parent class */ + Layer::init(layerMap, parameterMap); + + factorSize_ = config_.factor_size(); + + /* initialize the latentVectors_ */ + CHECK_EQ(inputLayers_.size(), 1UL); + size_t inputSize = inputLayers_[0]->getSize(); + CHECK_EQ(parameters_[0]->getSize(), inputSize * factorSize_); + latentVectors_ = std::unique_ptr( + new Weight(inputSize, factorSize_, parameters_[0])); + + return true; +} + +void FactorizationMachineLayer::forward(PassType passType) { + Layer::forward(passType); + + const MatrixPtr& inputV = getInputValue(0); + + size_t batchSize = inputV->getHeight(); + size_t outputSize = getSize(); + size_t inputSize = inputLayers_[0]->getSize(); + reserveOutput(batchSize, outputSize); + + MatrixPtr outV = getOutputValue(); + + Matrix::resizeOrCreate( + latentVectorsSquare_, inputSize, factorSize_, false, useGpu_); + Matrix::resizeOrCreate( + inputMulFactor_, batchSize, factorSize_, false, useGpu_); + Matrix::resizeOrCreate(tmpOut_, batchSize, factorSize_, false, useGpu_); + + REGISTER_TIMER_INFO("FmInputMulFactorTimer", getName().c_str()); + inputMulFactor_->mul(*inputV, *latentVectors_->getW()); + inputMulFactor_->square2(*tmpOut_); + outV->sumRows(*tmpOut_, 0.5, 0); + + if (dynamic_cast(inputV.get())) { + Matrix::resizeOrCreateSparseMatrix(inputSquare_, + inputV->getHeight(), + inputV->getWidth(), + inputV->getElementCnt(), + inputV->getValueType()); + inputSquare_->copyFrom(*inputV); + (dynamic_cast(inputSquare_.get()))->square2(); + } else { + Matrix::resizeOrCreate( + inputSquare_, inputV->getHeight(), inputV->getWidth(), false, useGpu_); + inputV->square2(*inputSquare_); + } + latentVectors_->getW()->square2(*latentVectorsSquare_); + tmpOut_->mul(*inputSquare_, *latentVectorsSquare_); + outV->sumRows(*tmpOut_, -0.5, 1.0); + + /* activation */ { + REGISTER_TIMER_INFO("FmFwAtvTimer", getName().c_str()); + forwardActivation(); + } +} + +void FactorizationMachineLayer::backward(const UpdateCallback& callback) { + /* Do derivation */ { backwardActivation(); } + + const MatrixPtr& inputV = getInputValue(0); + const MatrixPtr& oGrad = getOutputGrad(); + + Matrix::resizeOrCreate( + tmpSum_, 1, latentVectors_->getW()->getHeight(), false, useGpu_); + MatrixPtr tmpSumTrans = Matrix::create(tmpSum_->getRowBuf(0), + latentVectors_->getW()->getHeight(), + 1, + false, + useGpu_); + + /* Calculate the gradients of the latentVectors_ matrix */ + if (latentVectors_->getWGrad()) { + if (dynamic_cast(inputV.get())) { + Matrix::resizeOrCreateSparseMatrix(tmpInput_, + inputV->getHeight(), + inputV->getWidth(), + inputV->getElementCnt()); + + CpuSparseMatrix* sparseInputV = + dynamic_cast(inputV.get()); + CpuSparseMatrix* sparseInputSquare = + dynamic_cast(inputSquare_.get()); + CpuSparseMatrix* sparseTmpInput = + dynamic_cast(tmpInput_.get()); + sparseTmpInput->copyFrom(*sparseInputV); + + sparseTmpInput->rowScale(0, *sparseInputV, *oGrad); + latentVectors_->getWGrad()->mul( + *sparseTmpInput->getTranspose(), *inputMulFactor_, 1, 1); + sparseTmpInput->rowScale(0, *sparseInputSquare, *oGrad); + + Matrix::resizeOrCreate(negOnes_, 1, inputV->getHeight(), false, useGpu_); + negOnes_->zeroMem(); + negOnes_->add(-1); + tmpSum_->mul(*negOnes_, *sparseTmpInput, 1, 0); + } else { + Matrix::resizeOrCreate( + tmpInput_, inputV->getHeight(), inputV->getWidth(), false, useGpu_); + + tmpInput_->rowScale(0, *inputV, *oGrad); + latentVectors_->getWGrad()->mul( + *tmpInput_->getTranspose(), *inputMulFactor_, 1, 1); + tmpInput_->rowScale(0, *inputSquare_, *oGrad); + + tmpSum_->sumCols(*tmpInput_, -1, 0); + } + + latentVectors_->getWGrad()->addRowScale( + 0, *latentVectors_->getW(), *tmpSumTrans); + + /* Increasing the number of gradient */ + latentVectors_->getParameterPtr()->incUpdate(callback); + } + + /* Calculate the input layers gradient */ + MatrixPtr inGrad = getInputGrad(0); + if (inGrad != NULL) { + inGrad->mul( + *inputMulFactor_, *latentVectors_->getW()->getTranspose(), 1, 1); + tmpSumTrans->sumRows(*latentVectorsSquare_, -1, 0); + inGrad->addColScale(0, *inputV, *tmpSum_); + inGrad->rowScale(0, *inGrad, *oGrad); + } +} + +} // namespace paddle diff --git a/paddle/gserver/layers/FactorizationMachineLayer.h b/paddle/gserver/layers/FactorizationMachineLayer.h new file mode 100644 index 0000000000000000000000000000000000000000..df20a49934d5dd444f127842c8fdb7c77f4ebeb1 --- /dev/null +++ b/paddle/gserver/layers/FactorizationMachineLayer.h @@ -0,0 +1,80 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "Layer.h" +#include "paddle/math/Matrix.h" +#include "paddle/utils/ThreadLocal.h" + +namespace paddle { +/** + * @brief The Factorization Machine models pairwise (order-2) feature + * interactions as inner product of the learned latent vectors corresponding + * to each input feature. + * + * The Factorization Machine can effectively capture feature interactions + * especially when the input is sparse. While in principle FM can model higher + * order feature interaction, in practice usually only order-2 feature + * interactions are considered. The Factorization Machine Layer here only + * computes the order-2 interations with the formula: + * + * \f[ + * y = \sum_{i=1}^{n-1}\sum_{j=i+1}^n\langle v_i, v_j \rangle x_i x_j + * \f] + * + * The detailed calculation for forward and backward can be found at this paper: + * + * Factorization machines. + * + * The config file api is factorization_machine. + */ + +class FactorizationMachineLayer : public Layer { +protected: + // The latent vectors, shape: (size, factorSize_) + // Each row of the latentVectors_ matrix is the latent vector + // corresponding to one input feature dimension + std::unique_ptr latentVectors_; + // The hyperparameter that defines the dimensionality of the factorization + size_t factorSize_; + +private: + // Store the square values of the letent vectors matrix + MatrixPtr latentVectorsSquare_; + // Store the square values of input matrix + MatrixPtr inputSquare_; + // The result of input matrix * latent vector matrix that will be used in + // both forward and backward step + MatrixPtr inputMulFactor_; + // Store temporary calculation result + MatrixPtr tmpOut_; + MatrixPtr tmpSum_; + MatrixPtr tmpInput_; + // Negative identity matrix + MatrixPtr negOnes_; + +public: + explicit FactorizationMachineLayer(const LayerConfig& config) + : Layer(config) {} + ~FactorizationMachineLayer() {} + + bool init(const LayerMap& layerMap, + const ParameterMap& parameterMap) override; + + void forward(PassType passType) override; + void backward(const UpdateCallback& callback = nullptr) override; +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/HierarchicalSigmoidLayer.cpp b/paddle/gserver/layers/HierarchicalSigmoidLayer.cpp index d62a8d846e5b347aa44ce1951c043d5813a5b3ff..236f8096bdb6e024cf3c9c73eba422616a777a23 100644 --- a/paddle/gserver/layers/HierarchicalSigmoidLayer.cpp +++ b/paddle/gserver/layers/HierarchicalSigmoidLayer.cpp @@ -64,49 +64,111 @@ void HierarchicalSigmoidLayer::forward(PassType passType) { batchSize, codeLength_, /* trans */ false, - useGpu(deviceId_)); + false); Matrix::resizeOrCreate(preOutput_.grad, batchSize, codeLength_, /* trans */ false, - useGpu(deviceId_)); - + false); IVectorPtr label = getInput(*getLabelLayer()).ids; - preOutput_.value->zeroMem(); + if (useGpu_) { + Matrix::resizeOrCreate(cpuOutput_, + output_.value->getHeight(), + output_.value->getWidth(), + /* trans */ false, + false); + IVector::resizeOrCreate(cpuLabel_, label->getSize(), false); + cpuLabel_->copyFrom(*label); + cpuOutput_->copyFrom(*output_.value); + } else { + cpuOutput_ = output_.value; + cpuLabel_ = label; + } /* add the bias-vector */ if (biases_.get() != NULL) { - preOutput_.value->addByBitCode(numClasses_, *label, *biases_->getW()); + if (useGpu_) { + Matrix::resizeOrCreate(cpuBias_, + 1, + numClasses_ - 1, + /* trans */ false, + false); + cpuBias_->copyFrom(*biases_->getW()); + } else { + cpuBias_ = biases_->getW(); + } + preOutput_.value->addByBitCode(numClasses_, *cpuLabel_, *cpuBias_); } for (size_t i = 0; i < inputLayers_.size() - 1; ++i) { MatrixPtr input = getInputValue(i); + if (useGpu_) { + Matrix::resizeOrCreate(cpuInput_, + input->getHeight(), + input->getWidth(), + /* trans */ false, + false); + Matrix::resizeOrCreate(cpuWeight_, + weights_[i]->getW()->getHeight(), + weights_[i]->getW()->getWidth(), + /* trans */ false, + false); + cpuInput_->copyFrom(*input); + cpuWeight_->copyFrom(*weights_[i]->getW()); + } else { + cpuInput_ = input; + cpuWeight_ = weights_[i]->getW(); + } preOutput_.value->mulByBitCode( - numClasses_, *label, *weights_[i]->getW(), *input); + numClasses_, *cpuLabel_, *cpuWeight_, *cpuInput_); } // keep consistent with the clipping in the following softrelu preOutput_.value->clip(-40.0, 40.0); preOutput_.value->sumByBitCode(numClasses_, - *label, - *output_.value, + *cpuLabel_, + *cpuOutput_, -1); // scaleSum preOutput_.value->softrelu(*preOutput_.value); - MatrixPtr sum = - Matrix::create(batchSize, 1, /* trans= */ false, useGpu(deviceId_)); + MatrixPtr sum = Matrix::create(batchSize, 1, /* trans= */ false, false); preOutput_.value->rowSum(*sum); - output_.value->add(*sum); + cpuOutput_->add(*sum); + if (useGpu_) { + output_.value->copyFrom(*cpuOutput_); + } else { + output_.value = cpuOutput_; + } } void HierarchicalSigmoidLayer::backward(const UpdateCallback& callback) { IVectorPtr label = getInput(*getLabelLayer()).ids; + if (useGpu_) { + IVector::resizeOrCreate(cpuLabel_, label->getSize(), false); + cpuLabel_->copyFrom(*label); + } else { + cpuLabel_ = label; + } preOutput_.grad->one(); preOutput_.grad->softreluDerivative(*preOutput_.value); - preOutput_.grad->subByBitCode(numClasses_, *label); + preOutput_.grad->subByBitCode(numClasses_, *cpuLabel_); if (biases_ && biases_->getWGrad()) { - preOutput_.grad->addByBitCodeBackward( - numClasses_, *label, *biases_->getWGrad()); - + MatrixPtr biases_grad = biases_->getWGrad(); + if (useGpu_) { + Matrix::resizeOrCreate(cpuBias_, + 1, + numClasses_ - 1, + /* trans */ false, + false); + cpuBias_->copyFrom(*biases_grad); + } else { + cpuBias_ = biases_grad; + } + preOutput_.grad->addByBitCodeBackward(numClasses_, *cpuLabel_, *cpuBias_); + if (useGpu_) { + biases_grad->copyFrom(*cpuBias_); + } else { + biases_grad = cpuBias_; + } /* Increasing the number of gradient */ biases_->getParameterPtr()->incUpdate(callback); } @@ -115,9 +177,31 @@ void HierarchicalSigmoidLayer::backward(const UpdateCallback& callback) { /* Calculate the W-gradient for the current layer */ MatrixPtr input = getInputValue(i); if (weights_[i]->getWGrad()) { + MatrixPtr weights_grad = weights_[i]->getWGrad(); + if (useGpu_) { + Matrix::resizeOrCreate(cpuInput_, + input->getHeight(), + input->getWidth(), + /* trans */ false, + false); + Matrix::resizeOrCreate(cpuWeightGrad_, + weights_grad->getHeight(), + weights_grad->getWidth(), + /* trans */ false, + false); + cpuInput_->copyFrom(*input); + cpuWeightGrad_->copyFrom(*weights_grad); + } else { + cpuInput_ = input; + cpuWeightGrad_ = weights_grad; + } preOutput_.grad->mulByBitCodeBackwardWeight( - numClasses_, *label, *weights_[i]->getWGrad(), *input); - + numClasses_, *cpuLabel_, *cpuWeightGrad_, *cpuInput_); + if (useGpu_) { + weights_grad->copyFrom(*cpuWeightGrad_); + } else { + weights_grad = cpuWeightGrad_; + } /* Increasing the number of gradient */ weights_[i]->getParameterPtr()->incUpdate(callback); } @@ -125,8 +209,30 @@ void HierarchicalSigmoidLayer::backward(const UpdateCallback& callback) { /* Calculate the input layers error */ MatrixPtr inputGrad = getInputGrad(i); if (inputGrad) { + if (useGpu_) { + Matrix::resizeOrCreate(cpuInputGrad_, + inputGrad->getHeight(), + inputGrad->getWidth(), + /* trans */ false, + false); + Matrix::resizeOrCreate(cpuWeight_, + weights_[i]->getW()->getHeight(), + weights_[i]->getW()->getWidth(), + /* trans */ false, + false); + cpuInputGrad_->copyFrom(*inputGrad); + cpuWeight_->copyFrom(*weights_[i]->getW()); + } else { + cpuInputGrad_ = inputGrad; + cpuWeight_ = weights_[i]->getW(); + } preOutput_.grad->mulByBitCodeBackwardError( - numClasses_, *label, *weights_[i]->getW(), *inputGrad); + numClasses_, *cpuLabel_, *cpuWeight_, *cpuInputGrad_); + if (useGpu_) { + inputGrad->copyFrom(*cpuInputGrad_); + } else { + inputGrad = cpuInputGrad_; + } } } } diff --git a/paddle/gserver/layers/HierarchicalSigmoidLayer.h b/paddle/gserver/layers/HierarchicalSigmoidLayer.h index 9afd40b1674680da962d6e51caa56b46279b70de..7f896e61ca26e3e22b99b65b1285384a121f7f02 100644 --- a/paddle/gserver/layers/HierarchicalSigmoidLayer.h +++ b/paddle/gserver/layers/HierarchicalSigmoidLayer.h @@ -80,6 +80,15 @@ protected: int codeLength_; /// temporary result of output_ Argument preOutput_; + + /// The temporary variables in CPU memory. + MatrixPtr cpuWeight_; + MatrixPtr cpuWeightGrad_; + MatrixPtr cpuInput_; + MatrixPtr cpuInputGrad_; + MatrixPtr cpuBias_; + MatrixPtr cpuOutput_; + IVectorPtr cpuLabel_; }; } // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNLRNLayer.cpp b/paddle/gserver/layers/MKLDNNLRNLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..741984bb68d3881f6ac26eaca7790190ed6e572a --- /dev/null +++ b/paddle/gserver/layers/MKLDNNLRNLayer.cpp @@ -0,0 +1,163 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "MKLDNNLRNLayer.h" +#include "paddle/utils/Logging.h" + +using namespace mkldnn; // NOLINT +typedef memory::format format; + +namespace paddle { + +REGISTER_LAYER(mkldnn_lrn, MKLDNNLRNLayer); + +bool MKLDNNLRNLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + if (!MKLDNNLayer::init(layerMap, parameterMap)) { + return false; + } + + /* the size of inputs for norm-layer is 1 */ + CHECK_EQ(config_.inputs_size(), 1UL); + const NormConfig& conf = config_.inputs(0).norm_conf(); + localSize_ = conf.size(); + alpha_ = conf.scale(); + beta_ = conf.pow(); + + ic_ = conf.channels(); + oc_ = ic_; + iw_ = conf.img_size(); + ow_ = conf.output_x(); + ih_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); + oh_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); + CHECK_EQ(iw_, ow_); + CHECK_EQ(ih_, oh_); + return true; +} + +void MKLDNNLRNLayer::reshape( + int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) { + CHECK_EQ(inputLayers_.size(), 1UL); + reshapeInput(bs, ih, iw); + // ic_ and oc can not be changed + CHECK_EQ((size_t)ic, + inputLayers_[0]->getOutputValue()->getElementCnt() / bs / ih / iw) + << "Input channel can not be changed"; + oh = ih; + ow = iw; + reshapeOutput(oh, ow); + resizeOutput(bs, oc * oh * ow); +} + +void MKLDNNLRNLayer::resetFwd(std::vector& pipeline, + std::vector& inputs, + MKLDNNMatrixPtr& out) { + resetFwdBuffers(inputs[0], out); + + resetFwdPD(fwdPD_, inputs[0], out); + + resetFwdPipeline(pipeline, fwdPD_, inputs[0], out); +} + +void MKLDNNLRNLayer::resetBwd(std::vector& pipeline, + std::vector& inputs, + MKLDNNMatrixPtr& out) { + std::shared_ptr pd; + + resetBwdBuffers(inputs[0], out); + + resetBwdPD(pd, inputs[0], out); + + resetBwdPipeline(pipeline, pd, inputs[0], out); +} + +void MKLDNNLRNLayer::resetFwdBuffers(MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& out) { + resetInValue(in); + CHECK(in); + resetOutValue(out, in->getPrimitiveDesc()); +} + +void MKLDNNLRNLayer::resetFwdPD(std::shared_ptr& pd, + MKLDNNMatrixPtr in, + MKLDNNMatrixPtr out) { + prop_kind pk = passType_ == PASS_TEST ? prop_kind::forward_scoring + : prop_kind::forward_training; + auto fwdDesc = lrn_fwd::desc(pk, + algorithm::lrn_across_channels, + in->getMemoryDesc(), + localSize_, + alpha_, + beta_, + 1.0f); + pd.reset(new lrn_fwd::primitive_desc(fwdDesc, engine_)); + // prepare workspace if necessary + workspace_ = + passType_ != PASS_TEST + ? std::make_shared(memory(pd->workspace_primitive_desc())) + : nullptr; +} + +void MKLDNNLRNLayer::resetFwdPipeline( + std::vector& pipeline, + std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& out) { + fwd_ = workspace_ + ? std::make_shared(lrn_fwd(*pd, *in, *workspace_, *out)) + : std::make_shared(lrn_fwd(*pd, *in, *out)); + pipeline.push_back(*fwd_); +} + +void MKLDNNLRNLayer::resetBwdBuffers(MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& out) { + CHECK(inVals_[0] && outVal_); + resetOutGrad(out, outVal_->getPrimitiveDesc()); + resetInGrad(in, inVals_[0]->getPrimitiveDesc()); +} + +void MKLDNNLRNLayer::resetBwdPD(std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& out) { + pd = nullptr; + if (in == nullptr) { + return; + } + CHECK(out); + auto bwdDesc = lrn_bwd::desc(algorithm::lrn_across_channels, + in->getMemoryDesc(), + out->getMemoryDesc(), + localSize_, + alpha_, + beta_, + 1.0f); + pd.reset(new lrn_bwd::primitive_desc(bwdDesc, engine_, *fwdPD_)); +} + +void MKLDNNLRNLayer::resetBwdPipeline( + std::vector& pipeline, + std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& out) { + if (pd == nullptr) { + return; + } + CHECK(inVals_[0]); + CHECK(workspace_); + bwdData_ = std::make_shared( + lrn_bwd(*pd, *inVals_[0], *out, *workspace_, *in)); + pipeline.push_back(*bwdData_); +} + +} // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNLRNLayer.h b/paddle/gserver/layers/MKLDNNLRNLayer.h new file mode 100644 index 0000000000000000000000000000000000000000..cfe5621252c71a1de9a0a42a2a88e221e3e56972 --- /dev/null +++ b/paddle/gserver/layers/MKLDNNLRNLayer.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "MKLDNNLayer.h" +#include "mkldnn.hpp" + +namespace paddle { +typedef mkldnn::lrn_forward lrn_fwd; +typedef mkldnn::lrn_backward lrn_bwd; + +/** + * @brief A subclass of MKLDNNLayer LRN(Local Response Norm) layer. + * + * The config file api is mkldnn_lrn + */ +class MKLDNNLRNLayer : public MKLDNNLayer { +protected: + // save forward primitive_desc, which can be used in backward + std::shared_ptr fwdPD_; + // according to https://github.com/01org/mkl-dnn/blob/master/tests/gtests/ + // test_lrn_backward.cpp, lrn need workspace for backward + std::shared_ptr workspace_; + + int localSize_; + float alpha_, beta_; // scale and pow in paddle + +public: + explicit MKLDNNLRNLayer(const LayerConfig& config) : MKLDNNLayer(config) {} + + ~MKLDNNLRNLayer() {} + + bool init(const LayerMap& layerMap, + const ParameterMap& parameterMap) override; + + void reshape( + int& bs, int& ic, int& ih, int& iw, int& oc, int& oh, int& ow) override; + + void resetFwd(std::vector& pipeline, + std::vector& inputs, + MKLDNNMatrixPtr& out) override; + + void resetBwd(std::vector& pipeline, + std::vector& inputs, + MKLDNNMatrixPtr& out) override; + +protected: + void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out); + void resetFwdPD(std::shared_ptr& pd, + MKLDNNMatrixPtr in, + MKLDNNMatrixPtr out); + void resetFwdPipeline(std::vector& pipeline, + std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& out); + void resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out); + void resetBwdPD(std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& out); + void resetBwdPipeline(std::vector& pipeline, + std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& out); +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/PoolLayer.cpp b/paddle/gserver/layers/PoolLayer.cpp index 87613a96c5b3c2da212f63e9e678bcd22308b08e..fceb389d06d8d2cb0357186bf83edda9957c6c19 100644 --- a/paddle/gserver/layers/PoolLayer.cpp +++ b/paddle/gserver/layers/PoolLayer.cpp @@ -45,6 +45,8 @@ bool PoolLayer::init(const LayerMap& layerMap, strideY_ = conf.has_stride_y() ? conf.stride_y() : conf.stride(); confPaddingY_ = conf.has_padding_y() ? conf.padding_y() : conf.padding(); outputY_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); + + excludeMode_ = conf.has_exclude_mode() ? conf.exclude_mode() : true; return true; } diff --git a/paddle/gserver/layers/PoolLayer.h b/paddle/gserver/layers/PoolLayer.h index d43292ad2d4bbe1229ca59ca21bee92c9ec006a3..9df672a935868e9c61f4dd1fd47a9c309b214f12 100644 --- a/paddle/gserver/layers/PoolLayer.h +++ b/paddle/gserver/layers/PoolLayer.h @@ -38,6 +38,8 @@ protected: std::string poolType_; + bool excludeMode_; + public: explicit PoolLayer(const LayerConfig& config) : Layer(config) {} diff --git a/paddle/gserver/layers/PoolProjection.cpp b/paddle/gserver/layers/PoolProjection.cpp index d90b438448eb72e72e22e9a91a3cbcd84ac7e6cb..6a9de394cee3769784a38f5512b15f52b1ed6fa1 100644 --- a/paddle/gserver/layers/PoolProjection.cpp +++ b/paddle/gserver/layers/PoolProjection.cpp @@ -36,6 +36,8 @@ PoolProjection::PoolProjection(const ProjectionConfig& config, strideY_ = conf.has_stride_y() ? conf.stride_y() : conf.stride(); confPaddingY_ = conf.has_padding_y() ? conf.padding_y() : conf.padding(); outputY_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); + + excludeMode_ = conf.has_exclude_mode() ? conf.exclude_mode() : true; } size_t PoolProjection::getSize() { @@ -141,7 +143,8 @@ void AvgPoolProjection::forward() { outputY_, outputX_, confPaddingY_, - confPadding_); + confPadding_, + excludeMode_); } void AvgPoolProjection::backward(const UpdateCallback& callback) { @@ -166,6 +169,7 @@ void AvgPoolProjection::backward(const UpdateCallback& callback) { 1, 1, confPaddingY_, - confPadding_); + confPadding_, + excludeMode_); } } // namespace paddle diff --git a/paddle/gserver/layers/PoolProjection.h b/paddle/gserver/layers/PoolProjection.h index 9a75f465f6fbb2f2a928b0e36fcfbe0e510d7b3a..a0412714bca7a273e999e4d6bd552e833d20d69c 100644 --- a/paddle/gserver/layers/PoolProjection.h +++ b/paddle/gserver/layers/PoolProjection.h @@ -28,6 +28,7 @@ protected: int confPaddingY_, confPadding_; size_t channels_; std::string poolType_; + bool excludeMode_; public: PoolProjection(const ProjectionConfig& config, diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index c295ea19c9ccb3d05c509a41925d2c36efdba8ef..b578a906c2027a1169a0098b93f8d0742920f99d 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -1,5 +1,4 @@ # gserver pacakge unittests - add_simple_unittest(test_LinearChainCRF) add_simple_unittest(test_RecurrentLayer) @@ -29,6 +28,26 @@ gserver_test(test_KmaxSeqScore) gserver_test(test_Expand) gserver_test(test_MaxPoolingWithMaskOutput) +set(PYTHON_PATH + ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d + ${PADDLE_SOURCE_DIR}/python/:${PADDLE_SOURCE_DIR}/paddle/gserver/tests) +function(gserver_test_with_python TARGET) + add_unittest_without_exec(${TARGET} ${TARGET}.cpp) + add_test(NAME ${TARGET} + COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/${TARGET} + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) +endfunction() + +gserver_test_with_python(test_PyDataProvider2) +if(WITH_PYTHON) + gserver_test_with_python(test_PyDataProvider) +endif() +if(NOT MOBILE_INFERENCE) + gserver_test_with_python(test_CompareTwoNets) + # TODO(yuyang18): There is some bug in test_RecurrentGradientMachine, I will fix it. + gserver_test_with_python(test_RecurrentGradientMachine) +endif() + ########## test_MKLDNN layers and activations ########## if(WITH_MKLDNN) add_unittest_without_exec(test_MKLDNN @@ -36,87 +55,43 @@ if(WITH_MKLDNN) MKLDNNTester.cpp LayerGradUtil.cpp) add_test(NAME test_MKLDNN - COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python - ${CMAKE_CURRENT_BINARY_DIR}/test_MKLDNN + COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/test_MKLDNN WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) endif() -############## test_PyDataProvider ######################## -if(WITH_PYTHON) - add_unittest_without_exec(test_PyDataProvider - test_PyDataProvider.cpp) - - add_test(NAME test_PyDataProvider - COMMAND .set_python_path.sh -d ./gserver/tests:${PADDLE_SOURCE_DIR}/python/ ${CMAKE_CURRENT_BINARY_DIR}/test_PyDataProvider - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) -endif() - ############### test_WarpCTCLayer ####################### if(NOT WITH_DOUBLE AND NOT MOBILE_INFERENCE) add_unittest_without_exec(test_WarpCTCLayer test_WarpCTCLayer.cpp) - add_test(NAME test_WarpCTCLayer COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${WARPCTC_LIB_DIR} WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) endif() if(NOT MOBILE_INFERENCE) -################## test_Evaluator ####################### + ################## test_Evaluator ############# add_unittest(test_Evaluator test_Evaluator.cpp) -############### test_RecurrentGradientMachine ############### - # TODO(yuyang18): There is some bug in test_RecurrentGradientMachine - # I will fix it. - add_unittest_without_exec(test_RecurrentGradientMachine - test_RecurrentGradientMachine.cpp) - add_test(NAME test_RecurrentGradientMachine - COMMAND .set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests - ${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) - -############### test_NetworkCompare ############### + ########### test_NetworkCompare ############### add_unittest_without_exec(test_NetworkCompare test_NetworkCompare.cpp) if(WITH_GPU) - add_test(NAME test_NetworkCompare - COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + set(use_gpu true) else() - add_test(NAME test_NetworkCompare - COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + set(use_gpu false) endif() -endif() - - -add_unittest_without_exec(test_PyDataProvider2 - test_PyDataProvider2.cpp) - -add_test(NAME test_PyDataProvider2 - COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/paddle/gserver/tests:${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_PyDataProvider2 - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle -) + add_test(NAME test_NetworkCompare + COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=${use_gpu} + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) -################# test_CompareSparse ################## -add_unittest_without_exec(test_CompareSparse - test_CompareSparse.cpp) -if(NOT ON_TRAVIS) - add_test(NAME test_CompareSparse - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests - ./.set_port.sh -p port -n 6 - ${CMAKE_CURRENT_BINARY_DIR}/test_CompareSparse - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) + ############ test_CompareSparse ################ + add_unittest_without_exec(test_CompareSparse + test_CompareSparse.cpp) + if(NOT ON_TRAVIS) + add_test(NAME test_CompareSparse + COMMAND ${PYTHON_PATH} ./.set_port.sh -p port -n 6 + ${CMAKE_CURRENT_BINARY_DIR}/test_CompareSparse + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) + endif() endif() - -################ test_CompareTwoNets ###################### -add_unittest_without_exec(test_CompareTwoNets - test_CompareTwoNets.cpp) -add_test(NAME test_CompareTwoNets - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests - ${CMAKE_CURRENT_BINARY_DIR}/test_CompareTwoNets - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) diff --git a/paddle/gserver/tests/mkldnn_simple_net.conf b/paddle/gserver/tests/mkldnn_simple_net.conf index 8bbe91e56d0ba6da06475ad16f3162ee1103ee02..0e9d6b31fa8776136b4eee29311383ae6bb21644 100644 --- a/paddle/gserver/tests/mkldnn_simple_net.conf +++ b/paddle/gserver/tests/mkldnn_simple_net.conf @@ -51,6 +51,8 @@ tmp = img_pool_layer(input=tmp, padding=1, pool_type=MaxPooling()) +tmp = img_cmrnorm_layer(input=tmp, size=5, scale=0.0001, power=0.75) + tmp = fc_layer(input=tmp, size=channels, bias_attr=False, diff --git a/paddle/gserver/tests/sequence_rnn_matched_inputs.py b/paddle/gserver/tests/sequence_rnn_matched_inputs.py index e2635b4400b13517bac716a5a0affeb16c218b09..59e8c91733c42b6f13f723321d21bca98ab78bb7 100644 --- a/paddle/gserver/tests/sequence_rnn_matched_inputs.py +++ b/paddle/gserver/tests/sequence_rnn_matched_inputs.py @@ -41,7 +41,7 @@ nonseq = embedding_layer(input=label, size=word_dim) # This hierarchical RNN is designed to be equivalent to the simple RNN in -# sequence_rnn_multi_unequalength_inputs.conf +# sequence_rnn_mixed_inputs.conf def outer_step(subseq, seq, nonseq, encoding): outer_mem = memory(name="outer_rnn_state", size=hidden_dim) diff --git a/paddle/gserver/tests/sequence_rnn_mixed_inputs.py b/paddle/gserver/tests/sequence_rnn_mixed_inputs.py index 84a66e294495c01e03dc83b38a531e482bed1292..6fe9dca6e2cb0e14fee346b8307f67b804328471 100644 --- a/paddle/gserver/tests/sequence_rnn_mixed_inputs.py +++ b/paddle/gserver/tests/sequence_rnn_mixed_inputs.py @@ -37,7 +37,7 @@ encoding = embedding_layer(input=data2, size=word_dim) # This hierarchical RNN is designed to be equivalent to the simple RNN in -# sequence_rnn_multi_unequalength_inputs.conf +# sequence_rnn_matched_inputs.conf def outer_step(subseq, seq, nonseq, encoding): outer_mem = memory(name="outer_rnn_state", size=hidden_dim) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index cacf10692942f5eca2f6c498183f4acc00768460..a2f07937b8834e3f3fa7a6bf2ae10f29a8d84f29 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -238,9 +238,24 @@ void testProjectionConv(size_t groups, bool isDeconv) { /* caffeMode */ true); conv->set_output_x(output_x); conv->set_output_y(output_y); + LOG(INFO) << "DILATION:" << DILATION << "; output_x: " << output_x + << "; output_y: " << output_y; if (isDeconv) { + int deconv_image_x = imageSize(output_x, + (conv->filter_size() - 1) * DILATION + 1, + conv->padding(), + conv->stride(), + /* caffeMode */ true); + int deconv_image_y = imageSize(output_y, + (conv->filter_size_y() - 1) * DILATION + 1, + conv->padding_y(), + conv->stride_y(), + /* caffeMode */ true); + + LOG(INFO) << " deconv_image_x: " << deconv_image_x + << "; deconv_image_y: " << deconv_image_y; conf.set_input_size(output_x * output_y * CHANNELS); - conf.set_output_size(IMAGE_SIZE * IMAGE_SIZE * NUM_FILTERS); + conf.set_output_size(deconv_image_x * deconv_image_y * NUM_FILTERS); } else { conf.set_input_size(IMAGE_SIZE * IMAGE_SIZE * CHANNELS); conf.set_output_size(output_x * output_y * NUM_FILTERS); @@ -681,12 +696,13 @@ TEST(Layer, hsigmoidLayer) { config.layerConfig.add_inputs(); config.layerConfig.add_inputs(); - // Not support GPU now - testLayerGrad(config, - "hsigmoid", - 100, - /* trans */ false, /* useGpu */ - false); + for (auto useGpu : {false, true}) { + testLayerGrad(config, + "hsigmoid", + 100, + /* trans */ false, + /* useGpu */ useGpu); + } } TEST(Layer, multi_cross) { @@ -1210,7 +1226,10 @@ void setPoolConfig(TestConfig* config, pool->set_output_y(oh); } -void testPoolLayer(const string& poolType, bool trans, bool useGpu) { +void testPoolLayer(const string& poolType, + bool trans, + bool useGpu, + bool excludeMode = true) { TestConfig config; config.inputDefs.push_back({INPUT_DATA, "layer_0", 3136, 0}); LayerInputConfig* input = config.layerConfig.add_inputs(); @@ -1218,6 +1237,7 @@ void testPoolLayer(const string& poolType, bool trans, bool useGpu) { pool->set_img_size(14); pool->set_img_size_y(14); + pool->set_exclude_mode(excludeMode); setPoolConfig(&config, pool, poolType); config.layerConfig.set_size(pool->output_x() * pool->output_y() * pool->channels()); @@ -1249,16 +1269,26 @@ void testPoolLayer2(const string& poolType, bool trans, bool useGpu) { TEST(Layer, PoolLayer) { testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ false); + testPoolLayer("avg-projection", + /* trans= */ false, + /* useGpu= */ false, + /* excludeMode= */ false); testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ false); testPoolLayer("max-pool-with-mask", /* trans= */ false, /* useGpu= */ false); #ifdef PADDLE_WITH_CUDA testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ true); + testPoolLayer("avg-projection", + /* trans= */ false, + /* useGpu= */ true, + /* excludeMode= */ false); testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ true); testPoolLayer("cudnn-max-pool", /* trans= */ false, /* useGpu= */ true); testPoolLayer("cudnn-avg-pool", /* trans= */ false, /* useGpu= */ true); testPoolLayer2("cudnn-max-pool", /* trans= */ false, /* useGpu= */ true); testPoolLayer2("cudnn-avg-pool", /* trans= */ false, /* useGpu= */ true); + testPoolLayer2( + "cudnn-avg-incl-pad-pool", /* trans= */ false, /* useGpu= */ true); testPoolLayer("max-pool-with-mask", /* trans= */ false, /* useGpu= */ true); #endif } @@ -2464,6 +2494,25 @@ TEST(Layer, L2DistanceLayer) { } } +void testFactorizationMachineLayer(InputType type, bool useGpu) { + const int FACTOR_SIZE = 10; + TestConfig config; + config.layerConfig.set_type("factorization_machine"); + config.layerConfig.set_factor_size(FACTOR_SIZE); + config.layerConfig.set_size(1); + config.biasSize = 0; + config.inputDefs.push_back({type, "layer_0", 128, 1280}); + config.layerConfig.add_inputs(); + testLayerGrad(config, "factorization_machine", 16, false, useGpu, false); +} + +TEST(Layer, FactorizationMachineLayer) { + for (auto useGpu : {false, true}) { + testFactorizationMachineLayer(INPUT_DATA, useGpu); + } + testFactorizationMachineLayer(INPUT_SPARSE_FLOAT_VALUE_DATA, false); +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp index 56b523f220c2a405851b89db5f63e9aa50bfaaf7..ad1dbc3ee2bfd00a94de06f1e1b2ffe64f19b417 100644 --- a/paddle/gserver/tests/test_MKLDNN.cpp +++ b/paddle/gserver/tests/test_MKLDNN.cpp @@ -272,6 +272,51 @@ TEST(MKLDNNLayer, BatchNormLayer) { testBatchNormLayer({4, 16, 8, 10}); } +struct testLRNDesc { + int bs, ic, ih, iw; + float scale, pow; + int localSize; +}; + +void getMKLDNNLRNConfig(TestConfig& cfg, const testLRNDesc& pm) { + cfg.layerConfig.set_type("mkldnn_lrn"); + cfg.layerConfig.set_active_type("relu"); + size_t layerSize = pm.ic * pm.ih * pm.iw; + cfg.inputDefs.push_back({INPUT_DATA, "layer_0", layerSize, 0}); + LayerInputConfig* input = cfg.layerConfig.add_inputs(); + NormConfig* norm = input->mutable_norm_conf(); + norm->set_channels(pm.ic); + norm->set_size(pm.localSize); + norm->set_scale(pm.scale); + norm->set_pow(pm.pow); + norm->set_blocked(0); + norm->set_img_size(pm.iw); + norm->set_img_size_y(pm.ih); + norm->set_output_x(norm->img_size()); + norm->set_output_y(norm->img_size_y()); + cfg.layerConfig.set_size(layerSize); + cfg.biasSize = 0; +} + +void testLRNLayer(const testLRNDesc& pm) { + TestConfig dnnConfig; + getMKLDNNLRNConfig(dnnConfig, pm); + // mkldnn_lrn <==> norm with cmrnorm-projection type + TestConfig refConfig = dnnConfig; + refConfig.layerConfig.set_type("norm"); + LayerInputConfig* input = refConfig.layerConfig.mutable_inputs(0); + NormConfig* norm = input->mutable_norm_conf(); + norm->set_norm_type("cmrnorm-projection"); + norm->set_scale(norm->scale() / norm->size()); + RUN_MKLDNN_TEST(dnnConfig, refConfig, pm) +} + +TEST(MKLDNNLayer, LRNLayer) { + testLRNLayer({4, 10, 12, 12, 0.001f, 0.75f, 5}); + testLRNLayer({2, 32, 6, 6, 0.001f, 0.75f, 5}); + testLRNLayer({4, 16, 8, 10, 0.01f, 0.5f, 5}); +} + struct testImageDesc { int bs, ic, ih, iw; }; diff --git a/paddle/math/Allocator.h b/paddle/math/Allocator.h index 94ef561f066a127496e2849a419835e175c526d7..17563bf5e1649361b83b896bf864b922296a5487 100644 --- a/paddle/math/Allocator.h +++ b/paddle/math/Allocator.h @@ -48,7 +48,7 @@ public: */ virtual void* alloc(size_t size) { void* ptr; -#ifdef PADDLE_USE_MKLDNN +#ifdef PADDLE_WITH_MKLDNN // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp // memory alignment CHECK_EQ(posix_memalign(&ptr, 4096ul, size), 0); diff --git a/paddle/math/CMakeLists.txt b/paddle/math/CMakeLists.txt index 86bb270a4372841b3e6f4676e222d2190549c153..922fb5172273da24f9c48786961a6d850b1ed7c5 100644 --- a/paddle/math/CMakeLists.txt +++ b/paddle/math/CMakeLists.txt @@ -26,8 +26,6 @@ else() endif() if(MOBILE_INFERENCE) - list(REMOVE_ITEM MATH_SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/SIMDFunctions.cpp) # Remove sparse list(REMOVE_ITEM MATH_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/CpuSparseMatrix.h diff --git a/paddle/math/CpuSparseMatrix.cpp b/paddle/math/CpuSparseMatrix.cpp index bf62229c03bb1d6e2bdf86d8c56a8157938fb832..dc6979cf5a5229fb09866189f28217889d58c2d0 100644 --- a/paddle/math/CpuSparseMatrix.cpp +++ b/paddle/math/CpuSparseMatrix.cpp @@ -260,6 +260,35 @@ void CpuSparseMatrix::printOneRow(std::ostream& os, size_t idx) const { os << ";"; } +void CpuSparseMatrix::rowScale(size_t cCol, CpuSparseMatrix& b, Matrix& c) { + CHECK(getFormat() != SPARSE_CSC) << "Not supported"; + CHECK_EQ(height_, b.getHeight()); + CHECK_EQ(width_, b.getWidth()); + real* A = getValue(); + real* B = b.getValue(); + if (b.getValueType() == FLOAT_VALUE) { + for (size_t i = 0; i < height_; i++) { + size_t start = getRowStartIdx(i); + size_t end = getRowStartIdx(i + 1); + CHECK_EQ(start, b.getRowStartIdx(i)); + CHECK_EQ(end, b.getRowStartIdx(i + 1)); + for (size_t j = start; j < end; j++) { + A[j] = B[j] * c.getElement(i, cCol); + } + } + } else if (b.getValueType() == NO_VALUE) { + for (size_t i = 0; i < height_; i++) { + size_t start = getRowStartIdx(i); + size_t end = getRowStartIdx(i + 1); + CHECK_EQ(start, b.getRowStartIdx(i)); + CHECK_EQ(end, b.getRowStartIdx(i + 1)); + for (size_t j = start; j < end; j++) { + A[j] = c.getElement(i, cCol); + } + } + } +} + void CpuSparseMatrix::randomizeUniform() { CHECK_LE(elementCnt_, height_ * width_); if (valueType_ == FLOAT_VALUE) { diff --git a/paddle/math/CpuSparseMatrix.h b/paddle/math/CpuSparseMatrix.h index aad1348353d558abca72ed0fa5cf943237e3ac78..522b436a2a69179d3f4f17c919d5ba024102db7b 100644 --- a/paddle/math/CpuSparseMatrix.h +++ b/paddle/math/CpuSparseMatrix.h @@ -239,6 +239,15 @@ public: const unsigned int* cols, const real* values); + /** + * @brief this_row = b_row * c_row[cCol] + * + * @param[in] cCol the column of matrix c used to scale each row of b + * @param[in] b CpuSparseMatrix + * @param[in] c Matrix + */ + void rowScale(size_t cCol, CpuSparseMatrix& b, Matrix& c); + void randomizeUniform(); void copyFrom(const GpuSparseMatrix& src, hl_stream_t stream); diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp index ba86eacbb5d53ee43a60d2cd1dd922333a5d48f0..28ab54b450c96b4bdefdf36813595766162b1434 100644 --- a/paddle/math/MathFunctions.cpp +++ b/paddle/math/MathFunctions.cpp @@ -206,7 +206,7 @@ double dotProduct(const int n, const double* x, const double* y) { } #endif -#if defined(PADDLE_USE_MKLML) +#if defined(PADDLE_WITH_MKLML) template <> void vExp(const int n, const float* a, float* r) { diff --git a/paddle/math/MathFunctions.h b/paddle/math/MathFunctions.h index f6e77029bdd75a602f88b688ca810f47ba4ee615..29fe36e3a4bd5e5d372480950a03142822262d41 100644 --- a/paddle/math/MathFunctions.h +++ b/paddle/math/MathFunctions.h @@ -15,7 +15,7 @@ limitations under the License. */ #ifndef MATHFUNCTIONS_H_ #define MATHFUNCTIONS_H_ -#ifdef PADDLE_USE_MKLML +#ifdef PADDLE_WITH_MKLML #include #include #include diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 88e9180690606c92cf46c5b295d80f14e5d64567..1ec4336cabbc7d3073b7638b7484bf61e83a2dc5 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -28,6 +28,7 @@ limitations under the License. */ #include "hl_top_k.h" #include "paddle/utils/Logging.h" +#include "NEONFunctions.h" #include "paddle/function/GemmFunctor.h" #include "paddle/utils/ThreadLocal.h" @@ -1130,7 +1131,8 @@ void GpuMatrix::avgPoolForward(Matrix& inputMat, size_t outputH, size_t outputW, size_t paddingH, - size_t paddingW) { + size_t paddingW, + bool excludeMode) { CHECK(inputMat.useGpu_ == true) << "Matrix type are not equal"; real* inputData = inputMat.getData(); @@ -1153,7 +1155,8 @@ void GpuMatrix::avgPoolForward(Matrix& inputMat, paddingH, paddingW, data_, - getStride()); + getStride(), + excludeMode); } void GpuMatrix::avgPoolBackward(Matrix& outGrad, @@ -1168,7 +1171,8 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad, real scaleTargets, real scaleOutput, size_t paddingH, - size_t paddingW) { + size_t paddingW, + bool excludeMode) { CHECK(outGrad.useGpu_ == true) << "Matrix type are not equal"; real* outDiff = outGrad.getData(); @@ -1194,7 +1198,8 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad, scaleTargets, scaleOutput, data_, - outGrad.getStride()); + outGrad.getStride(), + excludeMode); } void GpuMatrix::maxPool3DForward(Matrix& inputMat, @@ -2136,7 +2141,8 @@ void CpuMatrix::avgPoolForward(Matrix& input, size_t outputH, size_t outputW, size_t paddingH, - size_t paddingW) { + size_t paddingW, + bool excludeMode) { // The main loop size_t num = input.getHeight(); size_t inLength = imgSizeH * imgSizeW; @@ -2165,7 +2171,8 @@ void CpuMatrix::avgPoolForward(Matrix& input, tgtData[ph * outputW + pw] += inData[h * imgSizeW + w]; } } - int poolSize = (hend - hstart) * (wend - wstart); + int poolSize = + excludeMode ? (hend - hstart) * (wend - wstart) : sizeY * sizeX; CHECK(poolSize); tgtData[ph * outputW + pw] /= poolSize; } @@ -2189,7 +2196,8 @@ void CpuMatrix::avgPoolBackward(Matrix& input, real scaleTargets, real scaleOutput, size_t paddingH, - size_t paddingW) { + size_t paddingW, + bool excludeMode) { size_t num = input.getHeight(); size_t channels = input.getWidth() / outputH / outputW; size_t inLength = imgSizeH * imgSizeW; @@ -2211,7 +2219,8 @@ void CpuMatrix::avgPoolBackward(Matrix& input, int wstart = pw * strideW - paddingW; int wend = std::min(wstart + sizeX, imgSizeW); wstart = std::max(wstart, 0); - int poolSize = (hend - hstart) * (wend - wstart); + int poolSize = + excludeMode ? (hend - hstart) * (wend - wstart) : sizeY * sizeX; CHECK(poolSize); for (int h = hstart; h < hend; ++h) { @@ -4157,16 +4166,36 @@ void CpuMatrix::print(std::ostream& os) const { void CpuMatrix::paramReluForward(Matrix& data, Matrix& W) { real* input = data.getData(); real* w = W.getData(); + real* output = data_; size_t numElements = data.getWidth(); size_t numSamples = data.getHeight(); size_t paraSize = W.getHeight() * W.getWidth(); CHECK(!(numElements % paraSize)); // this check from ParameterReluLayer::init + size_t partial_sum = numElements / paraSize; + if (paraSize == numElements) { + for (size_t n = 0; n < numSamples * numElements; ++n) { + output[n] = input[n] > 0 ? input[n] : input[n] * w[n % numElements]; + } + return; + } + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) + for (size_t n = 0; n < numSamples; ++n) { + for (size_t i = 0; i < paraSize; i++) { + neon::prelu( + input + i * partial_sum, w[i], output + i * partial_sum, partial_sum); + } + input = input + numElements; + output = output + numElements; + } +#else for (size_t n = 0, k = 0; n < numSamples; ++n) { for (size_t i = 0; i < numElements; ++i, ++k) { - data_[k] = input[k] > 0 ? input[k] : input[k] * w[i / partial_sum]; + output[k] = input[k] > 0 ? input[k] : input[k] * w[i / partial_sum]; } } +#endif } void CpuMatrix::paramReluBackwardW(Matrix& oGrad, Matrix& data) { diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index e273f1123690e31984c97185c5a8bc5e7b92c38c..c8e690e6421668bdade4e50a61882c915b2ddc7c 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -911,7 +911,8 @@ public: size_t outputH, size_t outputW, size_t paddingH, - size_t paddingW) { + size_t paddingW, + bool excludeMode = true) { LOG(FATAL) << "Not implemeted"; } @@ -927,9 +928,11 @@ public: real scaleTargets, real scaleOutput, size_t paddingH, - size_t paddingW) { + size_t paddingW, + bool excludeMode = true) { LOG(FATAL) << "Not implemeted"; } + /** * Pooling 3D forward operation, pick out the largest element * in the sizeX of value @@ -1458,7 +1461,8 @@ public: size_t outputH, size_t outputW, size_t paddingH, - size_t paddingW); + size_t paddingW, + bool excludeMode = true); void avgPoolBackward(Matrix& input, size_t imgSizeH, @@ -1472,7 +1476,8 @@ public: real scaleTargets, real scaleOutput, size_t paddingH, - size_t paddingW); + size_t paddingW, + bool excludeMode = true); void maxPool3DForward(Matrix& inputMat, Matrix& maxPoolIdx, @@ -1730,7 +1735,8 @@ public: size_t outputH, size_t outputW, size_t paddingH, - size_t paddingW); + size_t paddingW, + bool excludeMode = true); void avgPoolBackward(Matrix& input, size_t imgSizeH, @@ -1744,7 +1750,8 @@ public: real scaleTargets, real scaleOutput, size_t paddingH, - size_t paddingW); + size_t paddingW, + bool excludeMode = true); void maxPool3DForward(Matrix& inputMat, Matrix& maxPoolIdx, diff --git a/paddle/math/NEONFunctions.cpp b/paddle/math/NEONFunctions.cpp index 3bf47901f1069ac228fa1b877e29848d8cc130e8..0f8314942290a71dd327437b8a6da2d64fe48444 100644 --- a/paddle/math/NEONFunctions.cpp +++ b/paddle/math/NEONFunctions.cpp @@ -49,6 +49,46 @@ void relu(const float* a, float* b, int len) { } } +// b[i] = a[i] > 0.0f ? a[i] : a[i] * w +void prelu(const float* a, float w, float* b, int len) { + int offset = len % 16; + float32x4_t ma0, ma1, ma2, ma3; + + float32x4_t zero = vdupq_n_f32(0.f); + float32x4_t vw = vdupq_n_f32(w); + + for (int k = 0; k < len / 16; k++, a += 16, b += 16) { + ma0 = vld1q_f32(a); + ma1 = vld1q_f32(a + 4); + ma2 = vld1q_f32(a + 8); + ma3 = vld1q_f32(a + 12); + + uint32x4_t flag0 = vcgtq_f32(ma0, zero); + uint32x4_t flag1 = vcgtq_f32(ma1, zero); + uint32x4_t flag2 = vcgtq_f32(ma2, zero); + uint32x4_t flag3 = vcgtq_f32(ma3, zero); + + float32x4_t mul0 = vmulq_f32(ma0, vw); + float32x4_t mul1 = vmulq_f32(ma1, vw); + float32x4_t mul2 = vmulq_f32(ma2, vw); + float32x4_t mul3 = vmulq_f32(ma3, vw); + + ma0 = vbslq_f32(flag0, ma0, mul0); + ma1 = vbslq_f32(flag1, ma1, mul1); + ma2 = vbslq_f32(flag2, ma2, mul2); + ma3 = vbslq_f32(flag3, ma3, mul3); + + vst1q_f32(b, ma0); + vst1q_f32(b + 4, ma1); + vst1q_f32(b + 8, ma2); + vst1q_f32(b + 12, ma3); + } + + for (int i = 0; i < offset; i++) { + b[i] = a[i] > 0.0f ? a[i] : a[i] * w; + } +} + } // namespace neon } // namespace paddle diff --git a/paddle/math/NEONFunctions.h b/paddle/math/NEONFunctions.h index 69085e333547a31a341fbfde247f1e30adb957ee..d67b2f47a85a963949d23415e4f6881658203bb7 100644 --- a/paddle/math/NEONFunctions.h +++ b/paddle/math/NEONFunctions.h @@ -18,6 +18,7 @@ namespace paddle { namespace neon { void relu(const float* a, float* b, int len); +void prelu(const float* a, float w, float* b, int len); } // namespace neon } // namespace paddle diff --git a/paddle/math/SIMDFunctions.h b/paddle/math/SIMDFunctions.h index 439f11b79d134d7054f45f2d0a70fc5a6fde6c13..76909720f6aef0eea7cdf0dfe618237403d52c99 100644 --- a/paddle/math/SIMDFunctions.h +++ b/paddle/math/SIMDFunctions.h @@ -116,9 +116,11 @@ inline bool vec_check(size_t len) { } namespace internal { +#ifdef __SSE3__ void addToImpl(float* a, const float* b, size_t len); void batchAddToImpl(float* a, const float* b[], int batch, size_t len); void colMaxImpl(float* result, const float* data, int dim, int numSamples); +#endif #ifdef __AVX__ void decayL1AvxImpl(float* dst, float* src, float lambda, size_t len); void decayL1AvxImpl( diff --git a/paddle/math/float16.h b/paddle/math/float16.h new file mode 100644 index 0000000000000000000000000000000000000000..76ad3a01239e409caeefc36a3d562ed5e388dc92 --- /dev/null +++ b/paddle/math/float16.h @@ -0,0 +1,739 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +#ifdef PADDLE_WITH_CUDA +#include +#endif // PADDLE_WITH_CUDA + +#include "unsupported/Eigen/CXX11/Tensor" + +#include "paddle/platform/hostdevice.h" + +#ifdef __GNUC__ +#define PADDLE_GNUC_VER (__GNUC__ * 10 + __GNUC_MINOR__) +#else +#define PADDLE_GNUC_VER 0 +#endif // __GNUC__ + +#ifdef __clang__ +#define PADDLE_CLANG_VER (__clang_major__ * 10 + __clang_minor__) +#else +#define PADDLE_CLANG_VER 0 +#endif // __clang__ + +#if defined(__CUDACC__) && CUDA_VERSION >= 7050 +#define PADDLE_CUDA_FP16 +#include +#endif + +#if defined(__arm__) || defined(__aarch64__) +#define PADDLE_ARM +#endif + +#if defined(__ARM_NEON) || defined(__ARM_NEON__) +#define PADDLE_NEON +#include +#endif + +#if defined(PADDLE_NEON) && defined(PADDLE_ARM_FP16) && \ + (PADDLE_GNUC_VER >= 62 || PADDLE_CLANG_VER >= 37) +#define PADDLE_WITH_NATIVE_FP16 +#endif + +#ifndef PADDLE_ARM +#include +#endif // PADDLE_ARM + +#define PADDLE_ALIGN(x) __attribute__((aligned(x))) + +namespace paddle { + +// Use PADDLE_ALIGNED(2) to ensure that each float16 will be allocated +// and aligned at least on a 2-byte boundary, which leads to efficient +// memory access of float16 struct and also makes float16 compatible +// with CUDA half, ARM float16_t, and Eigen::half data types. +struct PADDLE_ALIGN(2) float16 { +public: + uint16_t x; + + // Constructors + HOSTDEVICE inline float16() : x(0) {} + + HOSTDEVICE inline float16(const float16& h) : x(h.x) {} + +#ifdef PADDLE_CUDA_FP16 + HOSTDEVICE inline explicit float16(const half& h) { +#if CUDA_VERSION >= 9000 + x = reinterpret_cast<__half_raw*>(&h)->x; +#else + x = h.x; +#endif // CUDA_VERSION >= 9000 + } +#endif // PADDLE_CUDA_FP16 + + HOSTDEVICE inline explicit float16(const Eigen::half& h) : x(h.x) {} + +#ifdef PADDLE_WITH_NATIVE_FP16 + // __fp16 is a native half precision data type for arm cpu, + // float16_t is an alias for __fp16 + HOSTDEVICE inline explicit float16(const float16_t& h) { + x = *reinterpret_cast(&h); + } +#endif + + HOSTDEVICE inline explicit float16(float val) { +#if defined(PADDLE_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 + half tmp = __float2half(val); + x = *reinterpret_cast(&tmp); + +#elif defined(PADDLE_WITH_NATIVE_FP16) + float32x4_t tmp = vld1q_dup_f32(&val); + float16_t res = vget_lane_f16(vcvt_f16_f32(tmp), 0); + x = *reinterpret_cast(&res); + +#elif defined(__F16C__) + x = _cvtss_sh(val, 0); + +#else + // Conversion routine adapted from + // http://stackoverflow.com/questions/1659440/32-bit-to-16-bit-floating-point-conversion + Bits v, s; + v.f = val; + uint32_t sign = v.si & sigN; + v.si ^= sign; + sign >>= shiftSign; // logical shift + s.si = mulN; + s.si = s.f * v.f; // correct subnormals + v.si ^= (s.si ^ v.si) & -(minN > v.si); + v.si ^= (infN ^ v.si) & -((infN > v.si) & (v.si > maxN)); + v.si ^= (nanN ^ v.si) & -((nanN > v.si) & (v.si > infN)); + v.ui >>= shift; // logical shift + v.si ^= ((v.si - maxD) ^ v.si) & -(v.si > maxC); + v.si ^= ((v.si - minD) ^ v.si) & -(v.si > subC); + x = v.ui | sign; + +#endif + } + + HOSTDEVICE inline explicit float16(bool b) : x(b ? 0x3c00 : 0) {} + + template + HOSTDEVICE inline explicit float16(const T& val) + : x(float16(static_cast(val)).x) {} + + HOSTDEVICE inline float16& operator=(const float16& rhs) { + x = rhs.x; + return *this; + } + +// Assignment operators +#ifdef PADDLE_CUDA_FP16 + HOSTDEVICE inline float16& operator=(const half& rhs) { +#if CUDA_VERSION >= 9000 + x = reinterpret_cast<__half_raw*>(&rhs)->x; +#else + x = rhs.x; +#endif + return *this; + } +#endif + + HOSTDEVICE inline float16& operator=(const Eigen::half& rhs) { + x = rhs.x; + return *this; + } + +#ifdef PADDLE_WITH_NATIVE_FP16 + HOSTDEVICE inline float16& operator=(const float16_t& rhs) { + x = *reinterpret_cast(&rhs); + return *this; + } +#endif + + HOSTDEVICE inline float16& operator=(bool b) { + x = b ? 0x3c00 : 0; + return *this; + } + + HOSTDEVICE inline float16& operator=(int8_t val) { + x = float16(val).x; + return *this; + } + + HOSTDEVICE inline float16& operator=(uint8_t val) { + x = float16(val).x; + return *this; + } + + HOSTDEVICE inline float16& operator=(int16_t val) { + x = float16(val).x; + return *this; + } + + HOSTDEVICE inline float16& operator=(uint16_t val) { + x = float16(val).x; + return *this; + } + + HOSTDEVICE inline float16& operator=(int32_t val) { + x = float16(val).x; + return *this; + } + + HOSTDEVICE inline float16& operator=(uint32_t val) { + x = float16(val).x; + return *this; + } + + HOSTDEVICE inline float16& operator=(int64_t val) { + x = float16(val).x; + return *this; + } + + HOSTDEVICE inline float16& operator=(uint64_t val) { + x = float16(val).x; + return *this; + } + + HOSTDEVICE inline float16& operator=(float val) { + x = float16(val).x; + return *this; + } + + HOSTDEVICE inline float16& operator=(double val) { + x = float16(val).x; + return *this; + } + +// Conversion opertors +#ifdef PADDLE_CUDA_FP16 + HOSTDEVICE inline explicit operator half() const { +#if CUDA_VERSION >= 9000 + __half_raw h; + h.x = x; + return half(h); +#else + half h; + h.x = x; + return h; +#endif // CUDA_VERSION >= 9000 + } +#endif // PADDLE_CUDA_FP16 + + HOSTDEVICE inline explicit operator Eigen::half() const { + Eigen::half h; + h.x = x; + return h; + } + +#ifdef PADDLE_WITH_NATIVE_FP16 + HOSTDEVICE inline explicit operator float16_t() const { + return *reinterpret_cast(this); + } +#endif + + HOSTDEVICE inline explicit operator float() const { +#if defined(PADDLE_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 + half tmp = *reinterpret_cast(this); + return __half2float(tmp); + +#elif defined(PADDLE_WITH_NATIVE_FP16) + float16x4_t res = vld1_dup_f16(reinterpret_cast(this)); + return vgetq_lane_f32(vcvt_f32_f16(res), 0); + +#elif defined(__F16C__) + return _cvtsh_ss(this->x); + +#else + // Conversion routine adapted from + // http://stackoverflow.com/questions/1659440/32-bit-to-16-bit-floating-point-conversion + Bits v; + v.ui = this->x; + int32_t sign = v.si & sigC; + v.si ^= sign; + sign <<= shiftSign; + v.si ^= ((v.si + minD) ^ v.si) & -(v.si > subC); + v.si ^= ((v.si + maxD) ^ v.si) & -(v.si > maxC); + Bits s; + s.si = mulC; + s.f *= v.si; + int32_t mask = -(norC > v.si); + v.si <<= shift; + v.si ^= (s.si ^ v.si) & mask; + v.si |= sign; + return v.f; + +#endif + } + + HOSTDEVICE inline explicit operator bool() const { return (x & 0x7fff) != 0; } + + HOSTDEVICE inline explicit operator int8_t() const { + return static_cast(float(*this)); + } + + HOSTDEVICE inline explicit operator uint8_t() const { + return static_cast(float(*this)); + } + + HOSTDEVICE inline explicit operator int16_t() const { + return static_cast(float(*this)); + } + + HOSTDEVICE inline explicit operator uint16_t() const { + return static_cast(float(*this)); + } + + HOSTDEVICE inline explicit operator int32_t() const { + return static_cast(float(*this)); + } + + HOSTDEVICE inline explicit operator uint32_t() const { + return static_cast(float(*this)); + } + + HOSTDEVICE inline explicit operator int64_t() const { + return static_cast(float(*this)); + } + + HOSTDEVICE inline explicit operator uint64_t() const { + return static_cast(float(*this)); + } + + HOSTDEVICE inline explicit operator double() const { + return static_cast(float(*this)); + } + +private: + union Bits { + float f; + int32_t si; + uint32_t ui; + }; + + static const int shift = 13; + static const int shiftSign = 16; + + static const int32_t infN = 0x7F800000; + static const int32_t maxN = 0x477FE000; // max flt16 as flt32 + static const int32_t minN = 0x38800000; // min flt16 normal as flt32 + static const int32_t sigN = 0x80000000; // sign bit + + static constexpr int32_t infC = infN >> shift; + static constexpr int32_t nanN = (infC + 1) + << shift; // minimum flt16 nan as float32 + static constexpr int32_t maxC = maxN >> shift; + static constexpr int32_t minC = minN >> shift; + static constexpr int32_t sigC = sigN >> shiftSign; + + static const int32_t mulN = 0x52000000; // (1 << 23) / minN + static const int32_t mulC = 0x33800000; // minN / (1 << (23 - shift)) + static const int32_t subC = 0x003FF; // max flt32 subnormal downshifted + static const int32_t norC = 0x00400; // min flt32 normal downshifted + + static constexpr int32_t maxD = infC - maxC - 1; + static constexpr int32_t minD = minC - subC - 1; +}; + +// Arithmetic operators on GPU +// CUDA 9.0 provides built-in arithmetic operators for half while +// CUDA 7.5 and 8.0 do not. The arithmetic operators defined here are +// for users to write similar CUDA code in CUDA 7.5 and 8.0 as in +// CUDA 9.0 regarding the half data type. +#if defined(PADDLE_CUDA_FP16) && CUDA_VERSION < 9000 + +DEVICE inline half operator+(const half& a, const half& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hadd(a, b); +#else + float res = float(float16(a)) + float(float16(b)); + return half(float16(res)); +#endif +} + +DEVICE inline half operator-(const half& a, const half& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hsub(a, b); +#else + float res = float(float16(a)) - float(float16(b)); + return half(float16(res)); +#endif +} + +DEVICE inline half operator*(const half& a, const half& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hmul(a, b); +#else + float res = float(float16(a)) * float(float16(b)); + return half(float16(res)); +#endif +} + +DEVICE inline half operator/(const half& a, const half& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 + float num = __half2float(a); + float denom = __half2float(b); + return __float2half(num / denom); +#else + float res = float(float16(a)) / float(float16(b)); + return half(float16(res)); +#endif +} + +DEVICE inline half operator-(const half& a) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hneg(a); +#else + float res = -float(float16(a)); + return half(float16(res)); +#endif +} + +DEVICE inline half& operator+=(half& a, const half& b) { + a = a + b; + return a; +} + +DEVICE inline half& operator-=(half& a, const half& b) { + a = a - b; + return a; +} + +DEVICE inline half& operator*=(half& a, const half& b) { + a = a * b; + return a; +} + +DEVICE inline half& operator/=(half& a, const half& b) { + a = a / b; + return a; +} + +DEVICE inline bool operator==(const half& a, const half& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __heq(a, b); +#else + return float(float16(a)) == float(float16(b)); +#endif +} + +DEVICE inline bool operator!=(const half& a, const half& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hne(a, b); +#else + return float(float16(a)) != float(float16(b)); +#endif +} + +DEVICE inline bool operator<(const half& a, const half& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hlt(a, b); +#else + return float(float16(a)) < float(float16(b)); +#endif +} + +DEVICE inline bool operator<=(const half& a, const half& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hle(a, b); +#else + return float(float16(a)) <= float(float16(b)); +#endif +} + +DEVICE inline bool operator>(const half& a, const half& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hgt(a, b); +#else + return float(float16(a)) > float(float16(b)); +#endif +} + +DEVICE inline bool operator>=(const half& a, const half& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hge(a, b); +#else + return float(float16(a)) >= float(float16(b)); +#endif +} + +#endif // PADDLE_CUDA_FP16 + +// Arithmetic operators on ARMv8.2-A CPU +#if defined(PADDLE_WITH_NATIVE_FP16) +HOST inline float16 operator+(const float16& a, const float16& b) { + float16 res; + asm volatile( + "ld1 {v0.h}[0], [%[a_ptr]]\n" + "ld1 {v1.h}[0], [%[b_ptr]]\n" + "fadd h0, h0, h1\n" + "st1 {v0.h}[0], [%[res_ptr]]\n" + : // outputs + : // inputs + [a_ptr] "r"(&(a.x)), + [b_ptr] "r"(&(b.x)), + [res_ptr] "r"(&(res.x)) + : // clobbers + "memory", "v0", "v1"); + return res; +} + +HOST inline float16 operator-(const float16& a, const float16& b) { + float16 res; + asm volatile( + "ld1 {v0.h}[0], [%[a_ptr]]\n" + "ld1 {v1.h}[0], [%[b_ptr]]\n" + "fsub h0, h0, h1\n" + "st1 {v0.h}[0], [%[res_ptr]]\n" + : // outputs + : // inputs + [a_ptr] "r"(&(a.x)), + [b_ptr] "r"(&(b.x)), + [res_ptr] "r"(&(res.x)) + : // clobbers + "memory", "v0", "v1"); + return res; +} + +HOST inline float16 operator*(const float16& a, const float16& b) { + float16 res; + asm volatile( + "ld1 {v0.h}[0], [%[a_ptr]]\n" + "ld1 {v1.h}[0], [%[b_ptr]]\n" + "fmul h0, h0, h1\n" + "st1 {v0.h}[0], [%[res_ptr]]\n" + : // outputs + : // inputs + [a_ptr] "r"(&(a.x)), + [b_ptr] "r"(&(b.x)), + [res_ptr] "r"(&(res.x)) + : // clobbers + "memory", "v0", "v1"); + return res; +} + +HOST inline float16 operator/(const float16& a, const float16& b) { + float16 res; + asm volatile( + "ld1 {v0.h}[0], [%[a_ptr]]\n" + "ld1 {v1.h}[0], [%[b_ptr]]\n" + "fdiv h0, h0, h1\n" + "st1 {v0.h}[0], [%[res_ptr]]\n" + : // outputs + : // inputs + [a_ptr] "r"(&(a.x)), + [b_ptr] "r"(&(b.x)), + [res_ptr] "r"(&(res.x)) + : // clobbers + "memory", "v0", "v1"); + return res; +} + +HOST inline float16 operator-(const float16& a) { + float16 res; + asm volatile( + "ld1 {v0.h}[0], [%[a_ptr]]\n" + "fneg h0, h0\n" + "st1 {v0.h}[0], [%[res_ptr]]\n" + : // outputs + : // inputs + [a_ptr] "r"(&(a.x)), + [res_ptr] "r"(&(res.x)) + : // clobbers + "memory", "v0"); + return res; +} + +HOST inline float16& operator+=(float16& a, const float16& b) { + a = a + b; + return a; +} + +HOST inline float16& operator-=(float16& a, const float16& b) { + a = a - b; + return a; +} + +HOST inline float16& operator*=(float16& a, const float16& b) { + a = a * b; + return a; +} + +HOST inline float16& operator/=(float16& a, const float16& b) { + a = a / b; + return a; +} + +HOST inline bool operator==(const float16& a, const float16& b) { + uint16_t res; + asm volatile( + "ld1 {v0.h}[0], [%[a_ptr]]\n" + "ld1 {v1.h}[0], [%[b_ptr]]\n" + "fcmeq h0, h0, h1\n" + "st1 {v0.h}[0], [%[res_ptr]]\n" + : // outputs + : // inputs + [a_ptr] "r"(&(a.x)), + [b_ptr] "r"(&(b.x)), + [res_ptr] "r"(&res) + : // clobbers + "memory", "v0", "v1"); + return (res & 0xffff) != 0; +} + +HOST inline bool operator!=(const float16& a, const float16& b) { + return !(a == b); +} + +HOST inline bool operator<(const float16& a, const float16& b) { + uint16_t res; + asm volatile( + "ld1 {v1.h}[0], [%[a_ptr]]\n" + "ld1 {v0.h}[0], [%[b_ptr]]\n" + "fcmgt h0, h0, h1\n" + "st1 {v0.h}[0], [%[res_ptr]]\n" + : // outputs + : // inputs + [a_ptr] "r"(&(a.x)), + [b_ptr] "r"(&(b.x)), + [res_ptr] "r"(&res) + : // clobbers + "memory", "v0", "v1"); + return (res & 0xffff) != 0; +} + +HOST inline bool operator<=(const float16& a, const float16& b) { + uint16_t res; + asm volatile( + "ld1 {v1.h}[0], [%[a_ptr]]\n" + "ld1 {v0.h}[0], [%[b_ptr]]\n" + "fcmge h0, h0, h1\n" + "st1 {v0.h}[0], [%[res_ptr]]\n" + : // outputs + : // inputs + [a_ptr] "r"(&(a.x)), + [b_ptr] "r"(&(b.x)), + [res_ptr] "r"(&res) + : // clobbers + "memory", "v0", "v1"); + return (res & 0xffff) != 0; +} + +HOST inline bool operator>(const float16& a, const float16& b) { + uint16_t res; + asm volatile( + "ld1 {v0.h}[0], [%[a_ptr]]\n" + "ld1 {v1.h}[0], [%[b_ptr]]\n" + "fcmgt h0, h0, h1\n" + "st1 {v0.h}[0], [%[res_ptr]]\n" + : // outputs + : // inputs + [a_ptr] "r"(&(a.x)), + [b_ptr] "r"(&(b.x)), + [res_ptr] "r"(&res) + : // clobbers + "memory", "v0", "v1"); + return (res & 0xffff) != 0; +} + +HOST inline bool operator>=(const float16& a, const float16& b) { + uint16_t res; + asm volatile( + "ld1 {v0.h}[0], [%[a_ptr]]\n" + "ld1 {v1.h}[0], [%[b_ptr]]\n" + "fcmge h0, h0, h1\n" + "st1 {v0.h}[0], [%[res_ptr]]\n" + : // outputs + : // inputs + [a_ptr] "r"(&(a.x)), + [b_ptr] "r"(&(b.x)), + [res_ptr] "r"(&res) + : // clobbers + "memory", "v0", "v1"); + return (res & 0xffff) != 0; +} + +// Arithmetic operators, software emulated on other CPU +#else +HOSTDEVICE inline float16 operator+(const float16& a, const float16& b) { + return float16(float(a) + float(b)); +} + +HOSTDEVICE inline float16 operator-(const float16& a, const float16& b) { + return float16(float(a) - float(b)); +} + +HOSTDEVICE inline float16 operator*(const float16& a, const float16& b) { + return float16(float(a) * float(b)); +} + +HOSTDEVICE inline float16 operator/(const float16& a, const float16& b) { + return float16(float(a) / float(b)); +} + +HOSTDEVICE inline float16 operator-(const float16& a) { + float16 res; + res.x = a.x ^ 0x8000; + return res; +} + +HOSTDEVICE inline float16& operator+=(float16& a, const float16& b) { + a = float16(float(a) + float(b)); + return a; +} + +HOSTDEVICE inline float16& operator-=(float16& a, const float16& b) { + a = float16(float(a) - float(b)); + return a; +} + +HOSTDEVICE inline float16& operator*=(float16& a, const float16& b) { + a = float16(float(a) * float(b)); + return a; +} + +HOSTDEVICE inline float16& operator/=(float16& a, const float16& b) { + a = float16(float(a) / float(b)); + return a; +} + +HOSTDEVICE inline bool operator==(const float16& a, const float16& b) { + return float(a) == float(b); +} + +HOSTDEVICE inline bool operator!=(const float16& a, const float16& b) { + return float(a) != float(b); +} + +HOSTDEVICE inline bool operator<(const float16& a, const float16& b) { + return float(a) < float(b); +} + +HOSTDEVICE inline bool operator<=(const float16& a, const float16& b) { + return float(a) <= float(b); +} + +HOSTDEVICE inline bool operator>(const float16& a, const float16& b) { + return float(a) > float(b); +} + +HOSTDEVICE inline bool operator>=(const float16& a, const float16& b) { + return float(a) >= float(b); +} +#endif +} // namespace paddle diff --git a/paddle/math/tests/CMakeLists.txt b/paddle/math/tests/CMakeLists.txt index d8b7f9e3fc74040189ade83049e4a1c3348e08de..dcd2a34583417993a4bf2976f7a3bc5a10d496ac 100644 --- a/paddle/math/tests/CMakeLists.txt +++ b/paddle/math/tests/CMakeLists.txt @@ -22,6 +22,7 @@ if(WITH_GPU) link_paddle_test(test_Tensor) CUDA_ADD_EXECUTABLE(test_lazyAssign test_lazyAssign.cu) link_paddle_test(test_lazyAssign) + nv_test(test_float16_gpu SRCS test_float16.cu) else() compile_cu_as_cpp(test_Tensor.cu) add_unittest(test_Tensor test_Tensor.cu) @@ -33,3 +34,4 @@ add_simple_unittest(test_FPException) add_simple_unittest(test_GpuProfiler) add_simple_unittest(test_BaseMatrix) add_simple_unittest(test_Matrix) +add_simple_unittest(test_float16) diff --git a/paddle/math/tests/test_float16.cpp b/paddle/math/tests/test_float16.cpp new file mode 100644 index 0000000000000000000000000000000000000000..74cc55aa3792f5e9f86b4f56f28dad97f35996a0 --- /dev/null +++ b/paddle/math/tests/test_float16.cpp @@ -0,0 +1,119 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/math/float16.h" + +#include + +namespace paddle { + +TEST(float16, conversion_cpu) { + // Explicit conversion from Eigen::half + EXPECT_EQ(float16(Eigen::half(1.0f)).x, 0x3c00); + EXPECT_EQ(float16(Eigen::half(0.5f)).x, 0x3800); + EXPECT_EQ(float16(Eigen::half(0.33333f)).x, 0x3555); + EXPECT_EQ(float16(Eigen::half(0.0f)).x, 0x0000); + EXPECT_EQ(float16(Eigen::half(-0.0f)).x, 0x8000); + EXPECT_EQ(float16(Eigen::half(65504.0f)).x, 0x7bff); + EXPECT_EQ(float16(Eigen::half(65536.0f)).x, 0x7c00); + + // Conversion from float + EXPECT_EQ(float16(1.0f).x, 0x3c00); + EXPECT_EQ(float16(0.5f).x, 0x3800); + EXPECT_EQ(float16(0.33333f).x, 0x3555); + EXPECT_EQ(float16(0.0f).x, 0x0000); + EXPECT_EQ(float16(-0.0f).x, 0x8000); + EXPECT_EQ(float16(65504.0f).x, 0x7bff); + EXPECT_EQ(float16(65536.0f).x, 0x7c00); + + // Conversion from double + EXPECT_EQ(float16(1.0).x, 0x3c00); + EXPECT_EQ(float16(0.5).x, 0x3800); + EXPECT_EQ(float16(0.33333).x, 0x3555); + EXPECT_EQ(float16(0.0).x, 0x0000); + EXPECT_EQ(float16(-0.0).x, 0x8000); + EXPECT_EQ(float16(65504.0).x, 0x7bff); + EXPECT_EQ(float16(65536.0).x, 0x7c00); + + // Conversion from int + EXPECT_EQ(float16(-1).x, 0xbc00); + EXPECT_EQ(float16(0).x, 0x0000); + EXPECT_EQ(float16(1).x, 0x3c00); + EXPECT_EQ(float16(2).x, 0x4000); + EXPECT_EQ(float16(3).x, 0x4200); + + // Conversion from bool + EXPECT_EQ(float16(true).x, 0x3c00); + EXPECT_EQ(float16(false).x, 0x0000); + + // Default constructor + float16 v_def; + EXPECT_EQ(v_def.x, 0x0000); + + // Assignment operator + float16 v_assign; + v_assign = v_def; + EXPECT_EQ(v_assign.x, 0x0000); + v_assign = Eigen::half(1.0f); + EXPECT_EQ(v_assign.x, 0x3c00); + v_assign = 0.5f; + EXPECT_EQ(v_assign.x, 0x3800); + v_assign = 0.33333; + EXPECT_EQ(v_assign.x, 0x3555); + v_assign = -1; + EXPECT_EQ(v_assign.x, 0xbc00); + v_assign = true; + EXPECT_EQ(v_assign.x, 0x3c00); + + // Conversion operator + EXPECT_EQ(Eigen::half(float16(1.0f)).x, 0x3c00); + EXPECT_EQ(float(float16(0.5f)), 0.5f); + EXPECT_NEAR(double(float16(0.33333)), 0.33333, 0.0001); + EXPECT_EQ(int(float16(-1)), -1); + EXPECT_EQ(bool(float16(true)), true); +} + +TEST(float16, arithmetic_cpu) { + EXPECT_EQ(float(float16(1) + float16(1)), 2); + EXPECT_EQ(float(float16(5) + float16(-5)), 0); + EXPECT_NEAR(float(float16(0.33333f) + float16(0.66667f)), 1.0f, 0.001); + EXPECT_EQ(float(float16(3) - float16(5)), -2); + EXPECT_NEAR(float(float16(0.66667f) - float16(0.33333f)), 0.33334f, 0.001); + EXPECT_NEAR(float(float16(3.3f) * float16(2.0f)), 6.6f, 0.01); + EXPECT_NEAR(float(float16(-2.1f) * float16(-3.0f)), 6.3f, 0.01); + EXPECT_NEAR(float(float16(2.0f) / float16(3.0f)), 0.66667f, 0.001); + EXPECT_EQ(float(float16(1.0f) / float16(2.0f)), 0.5f); + EXPECT_EQ(float(-float16(512.0f)), -512.0f); + EXPECT_EQ(float(-float16(-512.0f)), 512.0f); +} + +TEST(float16, comparison_cpu) { + EXPECT_TRUE(float16(1.0f) == float16(1.0f)); + EXPECT_FALSE(float16(-1.0f) == float16(-0.5f)); + EXPECT_TRUE(float16(1.0f) != float16(0.5f)); + EXPECT_FALSE(float16(-1.0f) != float16(-1.0f)); + EXPECT_TRUE(float16(1.0f) < float16(2.0f)); + EXPECT_FALSE(float16(-1.0f) < float16(-1.0f)); + EXPECT_TRUE(float16(1.0f) <= float16(1.0f)); + EXPECT_TRUE(float16(2.0f) > float16(1.0f)); + EXPECT_FALSE(float16(-2.0f) > float16(-2.0f)); + EXPECT_TRUE(float16(2.0f) >= float16(2.0f)); + + EXPECT_TRUE(float16(0.0f) == float16(-0.0f)); + EXPECT_TRUE(float16(0.0f) <= float16(-0.0f)); + EXPECT_TRUE(float16(0.0f) >= float16(-0.0f)); + EXPECT_FALSE(float16(0.0f) < float16(-0.0f)); + EXPECT_FALSE(float16(-0.0f) < float16(0.0f)); + EXPECT_FALSE(float16(0.0f) > float16(-0.0f)); + EXPECT_FALSE(float16(-0.0f) > float16(0.0f)); +} + +} // namespace paddle diff --git a/paddle/math/tests/test_float16.cu b/paddle/math/tests/test_float16.cu new file mode 100644 index 0000000000000000000000000000000000000000..4b520feaaf552302a969d8caee8aa28cc143304b --- /dev/null +++ b/paddle/math/tests/test_float16.cu @@ -0,0 +1,213 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/math/float16.h" + +#include + +#include "paddle/utils/Logging.h" + +#define ARITHMETIC_KERNEL(op_type, sign) \ + __global__ void op_type(const half* in1, const half* in2, half* out) { \ + out[0] = in1[0] sign in2[0]; \ + } + +#define COMPOUND_KERNEL(op_type, sign) \ + __global__ void op_type(half* in1, const half* in2) { in1[0] sign in2[0]; } + +#define COMPARISON_KERNEL(op_type, sign) \ + __global__ void op_type(const half* in1, const half* in2, bool* out) { \ + out[0] = in1[0] sign in2[0]; \ + } + +#define ARITHMETIC_KERNEL_LAUNCH(op_type) \ + void Test##op_type(float v_in1, float v_in2, float v_out) { \ + LOG(INFO) << "Test " << #op_type << " on GPU!"; \ + half *in1, *in2, *out; \ + half *d_in1, *d_in2, *d_out; \ + int size = sizeof(half); \ + cudaMalloc((void**)&d_in1, size); \ + cudaMalloc((void**)&d_in2, size); \ + cudaMalloc((void**)&d_out, size); \ + in1 = (half*)malloc(size); \ + in2 = (half*)malloc(size); \ + out = (half*)malloc(size); \ + in1[0] = half(float16(v_in1)); \ + in2[0] = half(float16(v_in2)); \ + cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ + cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \ + op_type<<<1, 1>>>(d_in1, d_in2, d_out); \ + cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); \ + EXPECT_EQ(float(float16(out[0])), v_out); \ + free(in1); \ + free(in2); \ + free(out); \ + cudaFree(d_in1); \ + cudaFree(d_in2); \ + cudaFree(d_out); \ + } + +#define COMPOUND_KERNEL_LAUNCH(op_type) \ + void Test##op_type(float v_in1, float v_in2, float v_out) { \ + LOG(INFO) << "Test " << #op_type << " on GPU!"; \ + half *in1, *in2; \ + half *d_in1, *d_in2; \ + int size = sizeof(half); \ + cudaMalloc((void**)&d_in1, size); \ + cudaMalloc((void**)&d_in2, size); \ + in1 = (half*)malloc(size); \ + in2 = (half*)malloc(size); \ + in1[0] = half(float16(v_in1)); \ + in2[0] = half(float16(v_in2)); \ + cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ + cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \ + op_type<<<1, 1>>>(d_in1, d_in2); \ + cudaMemcpy(in1, d_in1, size, cudaMemcpyDeviceToHost); \ + EXPECT_EQ(float(float16(in1[0])), v_out); \ + free(in1); \ + free(in2); \ + cudaFree(d_in1); \ + cudaFree(d_in2); \ + } + +#define COMPARISON_KERNEL_LAUNCH(op_type) \ + void Test##op_type(float v_in1, float v_in2, bool v_out) { \ + LOG(INFO) << "Test " << #op_type << " on GPU!"; \ + half *in1, *in2; \ + half *d_in1, *d_in2; \ + bool *out, *d_out; \ + int size = sizeof(half); \ + cudaMalloc((void**)&d_in1, size); \ + cudaMalloc((void**)&d_in2, size); \ + cudaMalloc((void**)&d_out, 1); \ + in1 = (half*)malloc(size); \ + in2 = (half*)malloc(size); \ + out = (bool*)malloc(1); \ + in1[0] = half(float16(v_in1)); \ + in2[0] = half(float16(v_in2)); \ + cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ + cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \ + op_type<<<1, 1>>>(d_in1, d_in2, d_out); \ + cudaMemcpy(out, d_out, 1, cudaMemcpyDeviceToHost); \ + EXPECT_EQ(out[0], v_out); \ + free(in1); \ + free(in2); \ + free(out); \ + cudaFree(d_in1); \ + cudaFree(d_in2); \ + cudaFree(d_out); \ + } + +#ifdef PADDLE_CUDA_FP16 +namespace paddle { + +#if CUDA_VERSION < 9000 +ARITHMETIC_KERNEL(Add, +) +ARITHMETIC_KERNEL(Sub, -) +ARITHMETIC_KERNEL(Mul, *) +ARITHMETIC_KERNEL(Div, /) + +ARITHMETIC_KERNEL_LAUNCH(Add) +ARITHMETIC_KERNEL_LAUNCH(Sub) +ARITHMETIC_KERNEL_LAUNCH(Mul) +ARITHMETIC_KERNEL_LAUNCH(Div) + +// Negative sign kernel +__global__ void Neg(half* in) { in[0] = -in[0]; } + +void TestNeg(float v_in, float v_out) { + LOG(INFO) << "Test Neg on GPU!"; + half *in, *d_in; + int size = sizeof(half); + cudaMalloc((void**)&d_in, size); + in = (half*)malloc(size); + in[0] = half(float16(v_in)); + cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); + Neg<<<1, 1>>>(d_in); + cudaMemcpy(in, d_in, size, cudaMemcpyDeviceToHost); + EXPECT_EQ(float(float16(in[0])), v_out); + free(in); + cudaFree(d_in); +} + +COMPOUND_KERNEL(AddAssign, +=) +COMPOUND_KERNEL(SubAssign, -=) +COMPOUND_KERNEL(MulAssign, *=) +COMPOUND_KERNEL(DivAssign, /=) + +COMPOUND_KERNEL_LAUNCH(AddAssign) +COMPOUND_KERNEL_LAUNCH(SubAssign) +COMPOUND_KERNEL_LAUNCH(MulAssign) +COMPOUND_KERNEL_LAUNCH(DivAssign) + +COMPARISON_KERNEL(Equal, ==) +COMPARISON_KERNEL(NotEqual, !=) +COMPARISON_KERNEL(Less, <) +COMPARISON_KERNEL(LessEqual, <=) +COMPARISON_KERNEL(Greater, >) +COMPARISON_KERNEL(GreaterEqual, >=) + +COMPARISON_KERNEL_LAUNCH(Equal) +COMPARISON_KERNEL_LAUNCH(NotEqual) +COMPARISON_KERNEL_LAUNCH(Less) +COMPARISON_KERNEL_LAUNCH(LessEqual) +COMPARISON_KERNEL_LAUNCH(Greater) +COMPARISON_KERNEL_LAUNCH(GreaterEqual) + +TEST(float16, arithmetic_on_gpu) { + TestAdd(1, 2, 3); + TestSub(2, 1, 1); + TestMul(2, 3, 6); + TestDiv(6, 2, 3); + TestNeg(1, -1); +} + +TEST(float16, compound_on_gpu) { + TestAddAssign(1, 2, 3); + TestSubAssign(2, 1, 1); + TestMulAssign(2, 3, 6); + TestDivAssign(6, 2, 3); +} + +TEST(float16, comparision_on_gpu) { + TestEqual(1, 1, true); + TestEqual(1, 2, false); + TestNotEqual(2, 3, true); + TestNotEqual(2, 2, false); + TestLess(3, 4, true); + TestLess(3, 3, false); + TestLessEqual(3, 3, true); + TestLessEqual(3, 2, false); + TestGreater(4, 3, true); + TestGreater(4, 4, false); + TestGreaterEqual(4, 4, true); + TestGreaterEqual(4, 5, false); +} +#endif // CUDA_VERSION + +TEST(float16, conversion_on_gpu) { + // Explicit conversion to and from cuda half + EXPECT_EQ(float16(half(float16(1.0f))).x, 0x3c00); + EXPECT_EQ(float16(half(float16(0.5f))).x, 0x3800); + EXPECT_EQ(float16(half(float16(0.33333f))).x, 0x3555); + EXPECT_EQ(float16(half(float16(0.0f))).x, 0x0000); + EXPECT_EQ(float16(half(float16(-0.0f))).x, 0x8000); + EXPECT_EQ(float16(half(float16(65504.0f))).x, 0x7bff); + EXPECT_EQ(float16(half(float16(65536.0f))).x, 0x7c00); + + // Assignment operator + float16 v_assign; + v_assign = half(float16(1.0f)); + EXPECT_EQ(v_assign.x, 0x3c00); +} + +} // namespace paddle +#endif // PADDLE_CUDA_FP16 diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index 7e5a1db44a5302e3b4e5d2768755824666e880ba..afb8d9d599b15a0b6d19b7ecca5e91b623695dea 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -244,7 +244,7 @@ TEST(Matrix, unary) { LOG(WARNING) << "This version of PaddlePaddle was not built with LAPACK" << "support so we cannot test matrix inverse. To test " << "matrix inverse, please install LAPACKE " - << "and MKL/Openblas/ATLAS, and re-build PaddlePaddle."; + << "and MKL/Openblas, and re-build PaddlePaddle."; #endif } } diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc index 6b4e46f56a0c9c9836c5b353ec9c554454ab0491..6a815a1b57db1d833781ca224f34e4559af9b9a5 100644 --- a/paddle/memory/detail/system_allocator.cc +++ b/paddle/memory/detail/system_allocator.cc @@ -43,7 +43,7 @@ void* CPUAllocator::Alloc(size_t& index, size_t size) { void* p; -#ifdef PADDLE_USE_MKLDNN +#ifdef PADDLE_WITH_MKLDNN // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp // memory alignment PADDLE_ENFORCE_EQ(posix_memalign(&p, 4096ul, size), 0); @@ -83,7 +83,7 @@ void* GPUAllocator::Alloc(size_t& index, size_t size) { paddle::platform::GpuMemoryUsage(available, capacity); // Reserve memory for page tables, etc. - size_t reserving = capacity - paddle::platform::GpuMaxAllocSize(); + size_t reserving = 0.05 * capacity + paddle::platform::GpuMinChunkSize(); size_t usable = available > reserving ? available - reserving : 0; // If remaining size no less than expected size, using general diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 5eb1c44eb6fc45db31ef44bf79e74b79193e08aa..9cafdfda75d0511227ef648d50a8635320a81d32 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -64,35 +64,52 @@ BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { int gpu_num = platform::GetCUDADeviceCount(); as = new BuddyAllocator*[gpu_num]; for (int gpu = 0; gpu < gpu_num; gpu++) { - platform::SetDeviceId(gpu); - as[gpu] = new BuddyAllocator(new detail::GPUAllocator, - platform::GpuMinChunkSize(), - platform::GpuMaxChunkSize()); + as[gpu] = nullptr; } + } + platform::SetDeviceId(gpu_id); + if (!as[gpu_id]) { + as[gpu_id] = new BuddyAllocator(new detail::GPUAllocator, + platform::GpuMinChunkSize(), + platform::GpuMaxChunkSize()); VLOG(10) << "\n\nNOTE: each GPU device use " << FLAGS_fraction_of_gpu_memory_to_use * 100 << "% of GPU memory.\n" - << "You can set environment variable '" - << platform::kEnvFractionGpuMemoryToUse + << "You can set GFlags environment variable '" + << "FLAGS_fraction_of_gpu_memory_to_use" << "' to change the fraction of GPU usage.\n\n"; } - platform::SetDeviceId(gpu_id); return as[gpu_id]; } template <> -void* Alloc(platform::GPUPlace place, size_t size) { - return GetGPUBuddyAllocator(place.device)->Alloc(size); +size_t Used(platform::GPUPlace place) { + return GetGPUBuddyAllocator(place.device)->Used(); } template <> -void Free(platform::GPUPlace place, void* p) { - GetGPUBuddyAllocator(place.device)->Free(p); +void* Alloc(platform::GPUPlace place, size_t size) { + auto* buddy_allocator = GetGPUBuddyAllocator(place.device); + auto* ptr = buddy_allocator->Alloc(size); + if (ptr == nullptr) { + int cur_dev = platform::GetCurrentDeviceId(); + platform::SetDeviceId(place.device); + size_t avail, total; + platform::GpuMemoryUsage(avail, total); + LOG(WARNING) << "Cannot allocate " << size << " bytes in GPU " + << place.device << ", available " << avail << " bytes"; + LOG(WARNING) << "total " << total; + LOG(WARNING) << "GpuMinChunkSize " << platform::GpuMinChunkSize(); + LOG(WARNING) << "GpuMaxChunkSize " << platform::GpuMaxChunkSize(); + LOG(WARNING) << "GPU memory used: " << Used(place); + platform::SetDeviceId(cur_dev); + } + return ptr; } template <> -size_t Used(platform::GPUPlace place) { - return GetGPUBuddyAllocator(place.device)->Used(); +void Free(platform::GPUPlace place, void* p) { + GetGPUBuddyAllocator(place.device)->Free(p); } #endif diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 495d4a5662401a34f7daffb85441390cfdbe77e3..d79f19e670dce605b50dab883576cbf6fb2db3ac 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -138,7 +138,7 @@ function(op_library TARGET) if ("${TARGET}" STREQUAL "nccl_op") set(pybind_flag 1) # It's enough to just adding one operator to pybind - file(APPEND ${pybind_file} "USE_GPU_ONLY_OP(ncclAllReduce);\n") + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(ncclAllReduce);\n") endif() # reduce_op contains several operators @@ -191,6 +191,7 @@ set(DEPS_OPS sum_op pool_op maxout_op + unpool_op pool_with_index_op conv_op conv_transpose_op @@ -207,7 +208,27 @@ set(DEPS_OPS adagrad_op sgd_op hierarchical_sigmoid_op) + save_op + load_op + send_op + recv_op) +if(WITH_DISTRIBUTE) +add_subdirectory(detail) +op_library(send_op SRCS send_op.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib_target protobuf) +set_source_files_properties( + send_op.cc + PROPERTIES + COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") + +op_library(recv_op SRCS recv_op.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib_target protobuf) +set_source_files_properties( + recv_op.cc + PROPERTIES + COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") + +cc_test(test_send_recv SRCS send_recv_op_test.cc DEPS send_op recv_op sum_op executor) +endif() op_library(cond_op SRCS cond_op.cc DEPS framework_proto tensor operator net_op) op_library(cross_entropy_op DEPS cross_entropy) @@ -220,6 +241,7 @@ op_library(adagrad_op DEPS selected_rows_functor) op_library(conv_op DEPS vol2col) op_library(pool_op DEPS pooling) op_library(maxout_op DEPS maxouting) +op_library(unpool_op DEPS unpooling) op_library(pool_with_index_op DEPS pooling) op_library(lod_rank_table_op SRCS lod_rank_table_op.cc DEPS lod_rank_table) op_library(lod_tensor_to_array_op SRCS lod_tensor_to_array_op.cc DEPS lod_rank_table_op) @@ -237,6 +259,10 @@ op_library(conv_transpose_op DEPS vol2col) op_library(gru_op DEPS sequence2batch gru_compute) op_library(recurrent_op SRCS recurrent_op.cc DEPS executor) +# FIXME(typhoonzero): save/load depends lodtensor serialization functions +op_library(save_op DEPS lod_tensor) +op_library(load_op DEPS lod_tensor) + list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) op_library(${src}) @@ -244,6 +270,8 @@ endforeach() set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") + + cc_test(gather_test SRCS gather_test.cc DEPS tensor) cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc index 2785a8c6fb62527db4d203788be88ebead068a19..76da21c4726a1245241c1cf61860f9c8b62ea452 100644 --- a/paddle/operators/accuracy_op.cc +++ b/paddle/operators/accuracy_op.cc @@ -57,7 +57,7 @@ class AccuracyOp : public framework::OperatorWithKernel { const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Out")->type()), - ctx.device_context()); + ctx.GetPlace()); } }; diff --git a/paddle/operators/accuracy_op.cu b/paddle/operators/accuracy_op.cu index d2dcab4e548b99c6beecfaa570ac31804fd07d82..539a93530206c93a37791a9ccb2fb104af17f940 100644 --- a/paddle/operators/accuracy_op.cu +++ b/paddle/operators/accuracy_op.cu @@ -104,5 +104,6 @@ class AccuracyOpCUDAKernel : public framework::OpKernel { // FIXME(typhoonzero): types of T is for inference data. // label data is always int64 -REGISTER_OP_GPU_KERNEL(accuracy, paddle::operators::AccuracyOpCUDAKernel, - paddle::operators::AccuracyOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(accuracy, + paddle::operators::AccuracyOpCUDAKernel, + paddle::operators::AccuracyOpCUDAKernel); diff --git a/paddle/operators/accuracy_op.h b/paddle/operators/accuracy_op.h index d060e6edddb31ecc1a4d27836f80b8ac5fa7d36d..04104a695fac6a967ad94780e31ba3fdd2ca2eda 100644 --- a/paddle/operators/accuracy_op.h +++ b/paddle/operators/accuracy_op.h @@ -21,7 +21,7 @@ namespace operators { using Tensor = framework::Tensor; -template +template class AccuracyKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 154c618e8e7c4650b7f22684d3357de9c52a416c..63490f0ec9f4852a3ead574b9d52c807d8ba6d89 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -44,9 +44,9 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "Input of Sigmoid operator"); AddOutput("Y", "Output of Sigmoid operator"); AddComment(R"DOC( -Sigmoid Activation Operator. +Sigmoid Activation Operator -$y = 1 / (1 + e^{-x})$ +$$y = \frac{1}{1 + e^{-x}}$$ )DOC"); } @@ -60,9 +60,9 @@ class LogSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "Input of LogSigmoid operator"); AddOutput("Y", "Output of LogSigmoid operator"); AddComment(R"DOC( -Logsigmoid Activation Operator. +Logsigmoid Activation Operator -$y = \log(1 / (1 + e^{-x}))$ +$$y = \log \frac{1}{1 + e^{-x}}$$ )DOC"); } @@ -506,6 +506,22 @@ It is recommended to use the defaults for this activation. } }; +class SwishOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SwishOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of Swish operator"); + AddOutput("Y", "Output of Swish operator"); + AddAttr("beta", "Constant beta of swish operator").SetDefault(1.0f); + AddComment(R"DOC( +Swish Activation Operator. + +$$y = \frac{x}{1 + e^{- \beta x}}$$ + +)DOC"); + } +}; + } // namespace operators } // namespace paddle @@ -592,16 +608,20 @@ REGISTER_OP(thresholded_relu, ops::ActivationOp, ops::ThresholdedReluOpMaker, REGISTER_OP(hard_sigmoid, ops::ActivationOp, ops::HardSigmoidOpMaker, hard_sigmoid_grad, ops::ActivationOpGrad); -#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \ - REGISTER_OP_CPU_KERNEL( \ - act_type, \ - ops::ActivationKernel>, \ - ops::ActivationKernel>); \ - REGISTER_OP_CPU_KERNEL( \ - act_type##_grad, ops::ActivationGradKernel>, \ - ops::ActivationGradKernel>, \ + ops::ActivationKernel>); \ + REGISTER_OP_CPU_KERNEL( \ + act_type##_grad, \ + ops::ActivationGradKernel>, \ + ops::ActivationGradKernel>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CPU_KERNEL); diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index 97737857ab25dfa92163b64a750fd7a7d9ea0ac3..856d3fc35dafe6b22c25c55dfda2dc4973072615 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -17,16 +17,17 @@ namespace ops = paddle::operators; -#define REGISTER_ACTIVATION_GPU_KERNEL(act_type, functor, grad_functor) \ - REGISTER_OP_GPU_KERNEL( \ - act_type, \ - ops::ActivationKernel>, \ - ops::ActivationKernel>); \ - REGISTER_OP_GPU_KERNEL( \ - act_type##_grad, ops::ActivationGradKernel>, \ - ops::ActivationGradKernel>, \ + ops::ActivationKernel>); \ + REGISTER_OP_CUDA_KERNEL( \ + act_type##_grad, \ + ops::ActivationGradKernel>, \ + ops::ActivationGradKernel>); -FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_GPU_KERNEL); +FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CUDA_KERNEL); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index 8cd3bfbbd3f8f3210f94aef3a1586c8295730c1d..75eefca8b8c7ba8831a2f90c83718d00b83fba30 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -19,7 +19,7 @@ namespace paddle { namespace operators { -template +template class ActivationKernel : public framework::OpKernel { public: @@ -32,18 +32,19 @@ class ActivationKernel auto x = framework::EigenVector::Flatten(*X); auto y = framework::EigenVector::Flatten(*Y); - auto place = context.GetEigenDevice(); + auto* place = + context.template device_context().eigen_device(); Functor functor; auto attrs = functor.GetAttrs(); for (auto& attr : attrs) { *attr.second = context.Attr(attr.first); } - functor(place, x, y); + functor(*place, x, y); } }; -template +template class ActivationGradKernel : public framework::OpKernel { public: @@ -59,13 +60,14 @@ class ActivationGradKernel auto x = framework::EigenVector::Flatten(*X); auto y = framework::EigenVector::Flatten(*Y); auto dx = framework::EigenVector::Flatten(*dX); - auto place = context.GetEigenDevice(); + auto* place = + context.template device_context().eigen_device(); Functor functor; auto attrs = functor.GetAttrs(); for (auto& attr : attrs) { *attr.second = context.Attr(attr.first); } - functor(place, x, y, dy, dx); + functor(*place, x, y, dy, dx); } }; @@ -700,6 +702,35 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor { } }; +template +struct SwishFunctor : public BaseActivationFunctor { + float beta; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"beta", &beta}}; + } + + template + void operator()(Device d, X x, Y y) const { + y.device(d) = x / (static_cast(1) + (static_cast(-beta) * x).exp()); + } +}; + +template +struct SwishGradFunctor : public BaseActivationFunctor { + float beta; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"beta", &beta}}; + } + + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + auto temp1 = static_cast(1) / + (static_cast(1) + (static_cast(-beta) * x).exp()); + auto temp2 = temp1 * (static_cast(1) - (beta * y)); + dx.device(d) = dy * ((beta * y) + temp2); + } +}; + } // namespace operators } // namespace paddle @@ -730,4 +761,5 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor { __macro(elu, ELUFunctor, ELUGradFunctor); \ __macro(hard_shrink, HardShrinkFunctor, HardShrinkGradFunctor); \ __macro(hard_sigmoid, HardSigmoidFunctor, HardSigmoidGradFunctor); \ + __macro(swish, SwishFunctor, SwishGradFunctor); \ __macro(thresholded_relu, ThresholdedReluFunctor, ThresholdedReluGradFunctor); diff --git a/paddle/operators/adadelta_op.cc b/paddle/operators/adadelta_op.cc index 16a7794d5b7bf1d56cd9f5874454c41cab43b41f..507811e7b59b9426c599570ead9b42f8d02380fd 100644 --- a/paddle/operators/adadelta_op.cc +++ b/paddle/operators/adadelta_op.cc @@ -92,12 +92,12 @@ for gradient descent. Adadelta updates are as follows: -$$avgSquaredGradOut = \rho * avgSquaredGrad + (1 - \rho) * grad * grad \break -paramUpdate = - $\sqrt{((avgSquaredUpdate + \epsilon) / - (avgSquaredGrad_out + \epsilon))}$ * grad \break -avgSquaredUpdateOut = \rho * avgSquaredUpdate + (1 - \rho) * - {(paramUpdate)}^2 \break -paramOut = param + paramUpdate$$ +$$ +avg\_squared\_grad\_out = \rho * avg\_squared\_grad + (1 - \rho) * grad * grad \\ +param\_update = - \sqrt{\frac{avg\_squared\_update + \epsilon}{avg\_squared\_grad\_out + \epsilon}} * grad \\ +avg\_squared\_update\_out = \rho * avg\_squared\_update + (1 - \rho) * {param\_update}^2 \\ +param\_out = param + param\_update +$$ )DOC"); } @@ -109,5 +109,5 @@ paramOut = param + paramUpdate$$ namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(adadelta, ops::AdadeltaOp, ops::AdadeltaOpMaker); REGISTER_OP_CPU_KERNEL( - adadelta, ops::AdadeltaOpKernel, - ops::AdadeltaOpKernel); + adadelta, ops::AdadeltaOpKernel, + ops::AdadeltaOpKernel); diff --git a/paddle/operators/adadelta_op.cu b/paddle/operators/adadelta_op.cu index 9fb61852071f11670b8bc51321bb0881de196777..eee2d0a2f55f877bc5c87c72bca07bfd9485e517 100644 --- a/paddle/operators/adadelta_op.cu +++ b/paddle/operators/adadelta_op.cu @@ -16,6 +16,6 @@ #include "paddle/operators/adadelta_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - adadelta, ops::AdadeltaOpKernel, - ops::AdadeltaOpKernel); +REGISTER_OP_CUDA_KERNEL( + adadelta, ops::AdadeltaOpKernel, + ops::AdadeltaOpKernel); diff --git a/paddle/operators/adadelta_op.h b/paddle/operators/adadelta_op.h index a8c5f0c8aa20ce506f5279fa696079ba64034bd5..819d0845dbdafab95d993a455013300fa71495e2 100644 --- a/paddle/operators/adadelta_op.h +++ b/paddle/operators/adadelta_op.h @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class AdadeltaOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -51,7 +51,7 @@ class AdadeltaOpKernel : public framework::OpKernel { framework::EigenVector::Flatten(*avg_squared_grad_out_tensor); auto avg_squared_update_out = framework::EigenVector::Flatten(*avg_squared_update_out_tensor); - auto place = ctx.GetEigenDevice(); + auto& place = *ctx.template device_context().eigen_device(); avg_squared_grad_out.device(place) = rho * avg_squared_grad + (1 - rho) * grad.square(); diff --git a/paddle/operators/adagrad_op.cc b/paddle/operators/adagrad_op.cc index d6686e3ef3165976cf4c077a7a0f213082aa7716..5d007163161cd4bf4a9fd46eda57f7984c6a414f 100644 --- a/paddle/operators/adagrad_op.cc +++ b/paddle/operators/adagrad_op.cc @@ -80,8 +80,8 @@ Adaptive Gradient Algorithm (Adagrad). The update is done as follows: -$$momentOut = moment + grad * grad \break -paramOut = param - learningRate * grad / ($\sqrt{momentOut}$ + \epsilon) \break +$$moment\_out = moment + grad * grad \\ +param\_out = param - \frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon} $$ The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) @@ -100,8 +100,8 @@ size_t FindPos(const std::vector& rows, int64_t value) { } // namespace template -struct SparseAdagradFunctor { - void operator()(const platform::DeviceContext& context, +struct SparseAdagradFunctor { + void operator()(const platform::CPUDeviceContext& context, const framework::SelectedRows& grad, const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param) { @@ -120,7 +120,7 @@ struct SparseAdagradFunctor { {static_cast(merge_rows.size()), grad_width}), context.GetPlace()); - math::SetConstant constant_functor; + math::SetConstant constant_functor; constant_functor(context, grad_merge->mutable_value(), 0.0); auto* grad_merge_data = grad_merge->mutable_value()->data(); @@ -144,9 +144,9 @@ struct SparseAdagradFunctor { auto gs = framework::EigenVector::Flatten(*(grad_square->mutable_value())); auto gm = framework::EigenVector::Flatten(grad_merge->value()); - gs.device(*context.GetEigenDevice()) = gm * gm; + gs.device(*context.eigen_device()) = gm * gm; - math::SelectedRowsAddToTensor functor; + math::SelectedRowsAddToTensor functor; functor(context, *grad_square, moment); // 3. update parameter @@ -164,13 +164,13 @@ struct SparseAdagradFunctor { } }; -template struct SparseAdagradFunctor; -template struct SparseAdagradFunctor; +template struct SparseAdagradFunctor; +template struct SparseAdagradFunctor; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(adagrad, ops::AdagradOp, ops::AdagradOpMaker); REGISTER_OP_CPU_KERNEL( - adagrad, ops::AdagradOpKernel, - ops::AdagradOpKernel); + adagrad, ops::AdagradOpKernel, + ops::AdagradOpKernel); diff --git a/paddle/operators/adagrad_op.cu b/paddle/operators/adagrad_op.cu index 1c870214b29dbfcabb7414317b1214d6bef369cb..585b2d92894af65b8ed15a596f0377fdcf564cfa 100644 --- a/paddle/operators/adagrad_op.cu +++ b/paddle/operators/adagrad_op.cu @@ -72,8 +72,8 @@ __global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows, } // namespace template -struct SparseAdagradFunctor { - void operator()(const platform::DeviceContext& context, +struct SparseAdagradFunctor { + void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& grad, const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param) { @@ -92,7 +92,7 @@ struct SparseAdagradFunctor { {static_cast(merge_rows.size()), grad_width}), context.GetPlace()); - math::SetConstant constant_functor; + math::SetConstant constant_functor; constant_functor(context, grad_merge->mutable_value(), 0.0); auto* grad_merge_data = grad_merge->mutable_value()->data(); @@ -119,9 +119,9 @@ struct SparseAdagradFunctor { auto gs = framework::EigenVector::Flatten(*(grad_square->mutable_value())); auto gm = framework::EigenVector::Flatten(grad_merge->value()); - gs.device(*context.GetEigenDevice()) = gm * gm; + gs.device(*context.eigen_device()) = gm * gm; - math::SelectedRowsAddToTensor functor; + math::SelectedRowsAddToTensor functor; functor(context, *grad_square, moment); // 3. update parameter @@ -139,13 +139,13 @@ struct SparseAdagradFunctor { } }; -template struct SparseAdagradFunctor; -template struct SparseAdagradFunctor; +template struct SparseAdagradFunctor; +template struct SparseAdagradFunctor; } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - adagrad, ops::AdagradOpKernel, - ops::AdagradOpKernel); +REGISTER_OP_CUDA_KERNEL( + adagrad, ops::AdagradOpKernel, + ops::AdagradOpKernel); diff --git a/paddle/operators/adagrad_op.h b/paddle/operators/adagrad_op.h index 4d4a6434c7c472d8ceb01edfc4050fbb009d6c9f..0d77dbcbacd4efb6c1900e57b5c4ea9e9b136771 100644 --- a/paddle/operators/adagrad_op.h +++ b/paddle/operators/adagrad_op.h @@ -19,15 +19,15 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template struct SparseAdagradFunctor { - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::SelectedRows& grad, const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param); }; -template +template class AdagradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -52,11 +52,11 @@ class AdagradOpKernel : public framework::OpKernel { auto param_out = framework::EigenVector::Flatten(*param_out_tensor); auto moment_out = framework::EigenVector::Flatten(*moment_out_tensor); - auto place = ctx.GetEigenDevice(); + auto* place = ctx.template device_context().eigen_device(); - moment_out.device(place) = moment + grad * grad; + moment_out.device(*place) = moment + grad * grad; Eigen::DSizes m_dsize(moment_out_tensor->numel()); - param_out.device(place) = + param_out.device(*place) = param - lr.broadcast(m_dsize) * grad / (moment_out.sqrt() + epsilon); } else if (grad_var->IsType()) { auto* param_tensor = ctx.Input("Param"); @@ -65,8 +65,9 @@ class AdagradOpKernel : public framework::OpKernel { auto* moment_tensor = ctx.Input("Moment"); PADDLE_ENFORCE_EQ(moment_tensor, moment_out_tensor); - SparseAdagradFunctor functor; - functor(ctx.device_context(), *ctx.Input("Grad"), + SparseAdagradFunctor functor; + functor(ctx.template device_context(), + *ctx.Input("Grad"), *ctx.Input("LearningRate"), epsilon, moment_out_tensor, param_out_tensor); } else { diff --git a/paddle/operators/adam_op.cc b/paddle/operators/adam_op.cc index 03faa2a7c5a486cb0d2b6f2f10d140eeb4c6c04e..cf6ef6dd53979b23de125014b8d5150d8ce4c053 100644 --- a/paddle/operators/adam_op.cc +++ b/paddle/operators/adam_op.cc @@ -112,11 +112,13 @@ adaptive estimates of lower-order moments. Adam updates: -$$moment_1_{out} = \beta_1 * moment_1 + (1 - \beta_1) * grad \break -moment_2_{out} = \beta_2 * moment_2 + (1 - \beta_2) * grad * grad \break -learningRate = learningRate * - $\sqrt{(1 - \beta_2_{pow})}$ / (1 - \beta_1_{pow}) \break -paramOut = param - learningRate * moment_1/ ($\sqrt{(moment_2)} + \epsilon)$$ +$$ +moment\_1\_out = \beta_1 * moment\_1 + (1 - \beta_1) * grad \\ +moment\_2_\out = \beta_2 * moment\_2 + (1 - \beta_2) * grad * grad \\ +learning\_rate = learning\_rate * + \frac{\sqrt{1 - \beta_{2\_pow}}}{1 - \beta_{1\_pow}} \\ +param\_out = param - learning\_rate * \frac{moment\_1}{\sqrt{moment\_2} + \epsilon} +$$ )DOC"); } @@ -126,6 +128,6 @@ paramOut = param - learningRate * moment_1/ ($\sqrt{(moment_2)} + \epsilon)$$ namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(adam, ops::AdamOp, ops::AdamOpMaker); -REGISTER_OP_CPU_KERNEL(adam, - ops::AdamOpKernel, - ops::AdamOpKernel); +REGISTER_OP_CPU_KERNEL( + adam, ops::AdamOpKernel, + ops::AdamOpKernel); diff --git a/paddle/operators/adam_op.cu b/paddle/operators/adam_op.cu index 6e34f7818ce20c75692fe21776721ce200b7a147..c135b3737899a1ae92041b4759698ddc30c20e12 100644 --- a/paddle/operators/adam_op.cu +++ b/paddle/operators/adam_op.cu @@ -16,6 +16,6 @@ #include "paddle/operators/adam_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(adam, - ops::AdamOpKernel, - ops::AdamOpKernel); +REGISTER_OP_CUDA_KERNEL( + adam, ops::AdamOpKernel, + ops::AdamOpKernel); diff --git a/paddle/operators/adam_op.h b/paddle/operators/adam_op.h index 7f7fa1da1c0d8d81d1bcb18a1bf542838eddccf7..45157842a6f92348909498f83d304d53b36c7d47 100644 --- a/paddle/operators/adam_op.h +++ b/paddle/operators/adam_op.h @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class AdamOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -52,17 +52,17 @@ class AdamOpKernel : public framework::OpKernel { auto param_out = framework::EigenVector::Flatten(*param_out_tensor); auto moment1_out = framework::EigenVector::Flatten(*moment1_out_tensor); auto moment2_out = framework::EigenVector::Flatten(*moment2_out_tensor); - auto place = ctx.GetEigenDevice(); + auto* place = ctx.template device_context().eigen_device(); - moment1_out.device(place) = beta1 * moment1 + (1 - beta1) * grad; - moment2_out.device(place) = beta2 * moment2 + (1 - beta2) * grad.square(); + moment1_out.device(*place) = beta1 * moment1 + (1 - beta1) * grad; + moment2_out.device(*place) = beta2 * moment2 + (1 - beta2) * grad.square(); // All of these are tensors of 1 element auto lr_t = lr * (1 - beta2_pow).sqrt() / (1 - beta1_pow); // Eigen does not support automatic broadcast // Get dimensions of moment vector to broadcast lr_t Eigen::DSizes m_dsize(moment1_out_tensor->numel()); - param_out.device(place) = + param_out.device(*place) = param - lr_t.broadcast(m_dsize) * (moment1_out / (moment2_out.sqrt() + epsilon)); diff --git a/paddle/operators/adamax_op.cc b/paddle/operators/adamax_op.cc index d5bbc672e18f392d6a91383b919fefc4b2d8ff0e..49ce497bb710de24b198fb4b5f56ff6d277c6f52 100644 --- a/paddle/operators/adamax_op.cc +++ b/paddle/operators/adamax_op.cc @@ -107,10 +107,12 @@ Adam algorithm based on the infinity norm. Adamax updates: -$$momentOut = \beta_1 * moment + (1 - \beta_1) * grad \break -infNormOut = max(\beta_2 * infNorm + \epsilon, |grad|) \break -learningRate = learningRate /(1 - \beta_1_{pow}) \break -paramOut = param - learningRate * momentPut / infNormOut$$ +$$ +moment\_out = \beta_1 * moment + (1 - \beta_1) * grad \\ +inf\_norm\_out = max(\beta_2 * inf\_norm + \epsilon, |grad|) \\ +learning\_rate = \frac{learning\_rate}{1 - \beta_{1\_pow}} \\ +param\_out = param - learning\_rate * \frac{moment\_out}{inf\_norm\_out} +$$ The original paper does not have an epsilon attribute. However, it is added here for numerical stability to prevent the @@ -125,6 +127,6 @@ division by 0 error. namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(adamax, ops::AdamaxOp, ops::AdamaxOpMaker); -REGISTER_OP_CPU_KERNEL(adamax, - ops::AdamaxOpKernel, - ops::AdamaxOpKernel); +REGISTER_OP_CPU_KERNEL( + adamax, ops::AdamaxOpKernel, + ops::AdamaxOpKernel); diff --git a/paddle/operators/adamax_op.cu b/paddle/operators/adamax_op.cu index 057ef39025aa23704457ef7bbe54934d06cdc87f..2d143905c4819dbf5f94391bdcf093971849e7a3 100644 --- a/paddle/operators/adamax_op.cu +++ b/paddle/operators/adamax_op.cu @@ -16,6 +16,6 @@ #include "paddle/operators/adamax_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(adamax, - ops::AdamaxOpKernel, - ops::AdamaxOpKernel); +REGISTER_OP_CUDA_KERNEL( + adamax, ops::AdamaxOpKernel, + ops::AdamaxOpKernel); diff --git a/paddle/operators/adamax_op.h b/paddle/operators/adamax_op.h index bf36ed78604dd88c537db51fbeb38f43d0c46173..172c179c5fabf5ca106bf11479aff2d94a4e21d2 100644 --- a/paddle/operators/adamax_op.h +++ b/paddle/operators/adamax_op.h @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class AdamaxOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -51,14 +51,14 @@ class AdamaxOpKernel : public framework::OpKernel { auto moment_out = framework::EigenVector::Flatten(*moment_out_tensor); auto inf_norm_out = framework::EigenVector::Flatten(*inf_norm_out_tensor); - auto place = ctx.GetEigenDevice(); + auto* place = ctx.template device_context().eigen_device(); - moment_out.device(place) = beta1 * moment + (1 - beta1) * grad; - inf_norm_out.device(place) = + moment_out.device(*place) = beta1 * moment + (1 - beta1) * grad; + inf_norm_out.device(*place) = grad.abs().cwiseMax((beta2 * inf_norm) + epsilon); auto lr_t = lr / (1 - beta1_pow); Eigen::DSizes m_dsize(moment_out_tensor->numel()); - param_out.device(place) = + param_out.device(*place) = param - lr_t.broadcast(m_dsize) * (moment_out / inf_norm_out); } }; diff --git a/paddle/operators/auc_op.h b/paddle/operators/auc_op.h index e5ac57b038ac32ed35bce35e477ede0cdb5da813..b80509e2a99a2a255dff2a98d950257588a21d29 100644 --- a/paddle/operators/auc_op.h +++ b/paddle/operators/auc_op.h @@ -25,7 +25,7 @@ template using EigenVector = framework::EigenVector; -template +template class AucKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { diff --git a/paddle/operators/batch_norm_op.cc b/paddle/operators/batch_norm_op.cc index f884e6efa917ce3f8554dce0e248f2b29273e3f3..94a972b7ab56f41f8b6a203b6bf0330a69f84e54 100644 --- a/paddle/operators/batch_norm_op.cc +++ b/paddle/operators/batch_norm_op.cc @@ -62,13 +62,14 @@ class BatchNormOp : public framework::OperatorWithKernel { const auto x_dims = ctx->GetInputDim("X"); const TensorFormat tensor_format = StringToTensorFormat(ctx->Attrs().Get("tensor_format")); + + PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, + "Input X must have 2 to 5 dimensions."); + const int C = (tensor_format == TensorFormat::NCHW ? x_dims[1] : x_dims[x_dims.size() - 1]); - PADDLE_ENFORCE(x_dims.size() >= 3 && x_dims.size() <= 5, - "Input X must have 3 to 5 dimensions."); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], C); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); @@ -134,7 +135,8 @@ The required data format for this layer is one of the following: }; template -class BatchNormKernel : public framework::OpKernel { +class BatchNormKernel + : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { const float epsilon = ctx.Attr("epsilon"); @@ -146,8 +148,8 @@ class BatchNormKernel : public framework::OpKernel { const auto *x = ctx.Input("X"); const auto &x_dims = x->dims(); - PADDLE_ENFORCE(x_dims.size() >= 3 && x_dims.size() <= 5, - "The Input dim size should be between 3 and 5"); + PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, + "The Input dim size should be between 2 and 5"); const int N = x_dims[0]; const int C = (tensor_format == TensorFormat::NCHW ? x_dims[1] @@ -317,12 +319,12 @@ class BatchNormGradOp : public framework::OperatorWithKernel { PADDLE_THROW("can't find Y@GRAD"); } return framework::OpKernelType(framework::ToDataType(t->type()), - ctx.device_context()); + ctx.GetPlace()); } }; template -class BatchNormGradKernel +class BatchNormGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { @@ -339,8 +341,8 @@ class BatchNormGradKernel // Get the size for each dimension. // NCHW [batch_size, in_channels, in_height, in_width] const auto &x_dims = x->dims(); - PADDLE_ENFORCE(x_dims.size() >= 3 && x_dims.size() <= 5, - "The Input dim size should be between 3 and 5"); + PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, + "The Input dim size should be between 2 and 5"); const int N = x_dims[0]; const int C = (tensor_format == TensorFormat::NCHW ? x_dims[1] @@ -435,8 +437,9 @@ class BatchNormGradKernel namespace ops = paddle::operators; REGISTER_OP(batch_norm, ops::BatchNormOp, ops::BatchNormOpMaker, batch_norm_grad, ops::BatchNormGradOp); -REGISTER_OP_CPU_KERNEL(batch_norm, - ops::BatchNormKernel); +REGISTER_OP_CPU_KERNEL( + batch_norm, + ops::BatchNormKernel); REGISTER_OP_CPU_KERNEL( batch_norm_grad, - ops::BatchNormGradKernel); + ops::BatchNormGradKernel); diff --git a/paddle/operators/batch_norm_op.cu.cc b/paddle/operators/batch_norm_op.cu.cc index 726d1ea1b8d7ced93f94bb0e5bb4df9e43b0ac7b..c7adc3d80ed25d129cec41a0fd3d22fd42aba363 100644 --- a/paddle/operators/batch_norm_op.cu.cc +++ b/paddle/operators/batch_norm_op.cu.cc @@ -29,18 +29,26 @@ void ExtractNCWHD(const framework::DDim &dims, const TensorFormat &tensor_format, int *N, int *C, int *H, int *W, int *D) { *N = dims[0]; - *C = tensor_format == TensorFormat::NCHW ? dims[1] : dims[dims.size() - 1]; - *H = tensor_format == TensorFormat::NCHW ? dims[2] : dims[1]; - *W = dims.size() > 3 - ? (tensor_format == TensorFormat::NCHW ? dims[3] : dims[2]) - : 1; - *D = dims.size() > 4 - ? (tensor_format == TensorFormat::NCHW ? dims[4] : dims[3]) - : 1; + if (dims.size() == 2) { + *C = dims[1]; + *H = 1; + *W = 1; + *D = 1; + } else { + *C = tensor_format == TensorFormat::NCHW ? dims[1] : dims[dims.size() - 1]; + *H = tensor_format == TensorFormat::NCHW ? dims[2] : dims[1]; + *W = dims.size() > 3 + ? (tensor_format == TensorFormat::NCHW ? dims[3] : dims[2]) + : 1; + *D = dims.size() > 4 + ? (tensor_format == TensorFormat::NCHW ? dims[4] : dims[3]) + : 1; + } } template -class BatchNormKernel : public framework::OpKernel { +class BatchNormKernel + : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -56,8 +64,8 @@ class BatchNormKernel : public framework::OpKernel { // NCHW [batch_size, in_channels, in_height, in_width] const auto *x = ctx.Input("X"); const auto &x_dims = x->dims(); - PADDLE_ENFORCE(x_dims.size() >= 3 && x_dims.size() <= 5, - "The Input dim size should be between 3 and 5"); + PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, + "The Input dim size should be between 2 and 5"); int N, C, H, W, D; ExtractNCWHD(x_dims, tensor_format, &N, &C, &H, &W, &D); @@ -114,11 +122,12 @@ class BatchNormKernel : public framework::OpKernel { saved_mean->mutable_data(ctx.GetPlace()); saved_variance->mutable_data(ctx.GetPlace()); - math::SetConstant functor; - functor(ctx.device_context(), saved_mean, 0); - functor(ctx.device_context(), saved_variance, 0); + auto &dev_ctx = ctx.template device_context(); + math::SetConstant functor; + functor(dev_ctx, saved_mean, 0); + functor(dev_ctx, saved_variance, 0); - auto handle = ctx.cuda_device_context().cudnn_handle(); + auto handle = dev_ctx.cudnn_handle(); // Now, depending on whether we are running test or not, we have two paths. if (is_test) { @@ -164,7 +173,7 @@ class BatchNormKernel : public framework::OpKernel { }; template -class BatchNormGradKernel +class BatchNormGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { @@ -180,8 +189,8 @@ class BatchNormGradKernel const auto &x_dims = x->dims(); - PADDLE_ENFORCE(x_dims.size() >= 3 && x_dims.size() <= 5, - "The Input dim size should be between 3 and 5"); + PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, + "The Input dim size should be between 2 and 5"); int N, C, H, W, D; ExtractNCWHD(x_dims, tensor_format, &N, &C, &H, &W, &D); @@ -237,11 +246,12 @@ class BatchNormGradKernel const void *saved_mean_data = saved_mean->template data(); const void *saved_var_data = saved_var->template data(); + auto &dev_ctx = ctx.template device_context(); CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationBackward( - ctx.cuda_device_context().cudnn_handle(), mode_, - CudnnDataType::kOne(), CudnnDataType::kZero(), - CudnnDataType::kOne(), CudnnDataType::kZero(), data_desc_, - x->template data(), data_desc_, d_y->template data(), data_desc_, + dev_ctx.cudnn_handle(), mode_, CudnnDataType::kOne(), + CudnnDataType::kZero(), CudnnDataType::kOne(), + CudnnDataType::kZero(), data_desc_, x->template data(), + data_desc_, d_y->template data(), data_desc_, d_x->template mutable_data(ctx.GetPlace()), bn_param_desc_, scale->template data(), d_scale->template mutable_data(ctx.GetPlace()), @@ -259,8 +269,9 @@ class BatchNormGradKernel } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(batch_norm, - ops::BatchNormKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( + batch_norm, + ops::BatchNormKernel); +REGISTER_OP_CUDA_KERNEL( batch_norm_grad, - ops::BatchNormGradKernel); + ops::BatchNormGradKernel); diff --git a/paddle/operators/batch_norm_op.h b/paddle/operators/batch_norm_op.h index 4e80134a1acf3b4d66154453dd0ed709133d1c7c..8d99b6864776e81b30e87c09028b336309cf2838 100644 --- a/paddle/operators/batch_norm_op.h +++ b/paddle/operators/batch_norm_op.h @@ -34,13 +34,13 @@ inline TensorFormat StringToTensorFormat(const std::string& str) { } } -template +template class BatchNormKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override; }; -template +template class BatchNormGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override; diff --git a/paddle/operators/bilinear_tensor_product_op.cc b/paddle/operators/bilinear_tensor_product_op.cc index c88b2c9beb4497b617078c8ac5582d2f246f43fd..217fd523667777f7d250295d2a036867dac94f04 100644 --- a/paddle/operators/bilinear_tensor_product_op.cc +++ b/paddle/operators/bilinear_tensor_product_op.cc @@ -159,9 +159,12 @@ REGISTER_OP(bilinear_tensor_product, ops::BilinearTensorProductOp, ops::BilinearTensorProductOpGrad); REGISTER_OP_CPU_KERNEL( bilinear_tensor_product, - ops::BilinearTensorProductKernel, - ops::BilinearTensorProductKernel); + ops::BilinearTensorProductKernel, + ops::BilinearTensorProductKernel); REGISTER_OP_CPU_KERNEL( bilinear_tensor_product_grad, - ops::BilinearTensorProductGradKernel, - ops::BilinearTensorProductGradKernel); + ops::BilinearTensorProductGradKernel, + ops::BilinearTensorProductGradKernel); diff --git a/paddle/operators/bilinear_tensor_product_op.cu b/paddle/operators/bilinear_tensor_product_op.cu index 858d2668d01379afe8082cd1eda32a2a5d09bd18..0f48010716f086a64c0b6a35b76e06a42430ab84 100644 --- a/paddle/operators/bilinear_tensor_product_op.cu +++ b/paddle/operators/bilinear_tensor_product_op.cu @@ -16,11 +16,15 @@ limitations under the License. */ #include "paddle/operators/bilinear_tensor_product_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( bilinear_tensor_product, - ops::BilinearTensorProductKernel, - ops::BilinearTensorProductKernel); -REGISTER_OP_GPU_KERNEL( + ops::BilinearTensorProductKernel, + ops::BilinearTensorProductKernel); +REGISTER_OP_CUDA_KERNEL( bilinear_tensor_product_grad, - ops::BilinearTensorProductGradKernel, - ops::BilinearTensorProductGradKernel); + ops::BilinearTensorProductGradKernel, + ops::BilinearTensorProductGradKernel); diff --git a/paddle/operators/bilinear_tensor_product_op.h b/paddle/operators/bilinear_tensor_product_op.h index 1113a4c6f357edb4f6b14b73c6eec9c6cca24ce5..ba9a2c5ce3c024a82e864a399ad90281d8dcdb20 100644 --- a/paddle/operators/bilinear_tensor_product_op.h +++ b/paddle/operators/bilinear_tensor_product_op.h @@ -27,7 +27,7 @@ template using EigenMatrix = framework::EigenMatrix; -template +template class BilinearTensorProductKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -46,7 +46,8 @@ class BilinearTensorProductKernel : public framework::OpKernel { int out_dim = weight_dims[0]; auto x_dim = weight_dims[1]; auto y_dim = weight_dims[2]; - auto place = ctx.GetEigenDevice(); + auto& place = *ctx.template device_context().eigen_device(); + auto& dev_ctx = ctx.template device_context(); // Create the intermediate variable to caculate the result of // Input(X) multiplied by Input(Weight_i), the formula is: @@ -60,9 +61,9 @@ class BilinearTensorProductKernel : public framework::OpKernel { auto output_col_vec = output_mat.chip(i, 1); Tensor weight_mat = weight->Slice(i, i + 1).Resize(framework::make_ddim({x_dim, y_dim})); - math::gemm(ctx.device_context(), CblasNoTrans, CblasNoTrans, - batch_size, y_dim, x_dim, 1, x->data(), - weight_mat.data(), 0, left_mul.data()); + math::gemm(dev_ctx, CblasNoTrans, CblasNoTrans, + batch_size, y_dim, x_dim, 1, x->data(), + weight_mat.data(), 0, left_mul.data()); output_col_vec.device(place) = (left_mul_mat * y_mat).sum(Eigen::DSizes(1)); } @@ -74,7 +75,7 @@ class BilinearTensorProductKernel : public framework::OpKernel { } }; -template +template class BilinearTensorProductGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -96,8 +97,8 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { auto x_mat = EigenMatrix::From(*x); auto y_mat = EigenMatrix::From(*y); auto d_out_mat = EigenMatrix::From(*d_out); - auto place = ctx.GetEigenDevice(); - + auto& place = *ctx.template device_context().eigen_device(); + auto& dev_ctx = ctx.template device_context(); // Create the intermediate variable to caculate the Output(Y@Grad). Tensor x_scale; x_scale.mutable_data(framework::make_ddim({batch_size, x_dim}), @@ -110,18 +111,18 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { ctx.GetPlace()); auto y_scale_mat = EigenMatrix::From(y_scale); - math::SetConstant set_zero; + math::SetConstant set_zero; // Set Output(X@Grad) be zero. if (d_x) { d_x->mutable_data(ctx.GetPlace()); - set_zero(ctx.device_context(), d_x, static_cast(0)); + set_zero(dev_ctx, d_x, static_cast(0)); } // Set Output(Y@Grad) be zero. if (d_y) { d_y->mutable_data(ctx.GetPlace()); - set_zero(ctx.device_context(), d_y, static_cast(0)); + set_zero(dev_ctx, d_y, static_cast(0)); } // Caculate the Output(X@Grad) and Output(Y@Grad). @@ -137,18 +138,18 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { output_vec.reshape(Eigen::DSizes(batch_size, 1)) .broadcast(bcast_for_x) * y_mat; - math::gemm(ctx.device_context(), CblasNoTrans, CblasTrans, - batch_size, x_dim, y_dim, 1, y_scale.data(), - weight_i.data(), 1, d_x->data()); + math::gemm( + dev_ctx, CblasNoTrans, CblasTrans, batch_size, x_dim, y_dim, 1, + y_scale.data(), weight_i.data(), 1, d_x->data()); } if (d_y) { x_scale_mat.device(place) = output_vec.reshape(Eigen::DSizes(batch_size, 1)) .broadcast(bcast_for_y) * x_mat; - math::gemm(ctx.device_context(), CblasNoTrans, CblasNoTrans, - batch_size, y_dim, x_dim, 1, x_scale.data(), - weight_i.data(), 1, d_y->data()); + math::gemm( + dev_ctx, CblasNoTrans, CblasNoTrans, batch_size, y_dim, x_dim, 1, + x_scale.data(), weight_i.data(), 1, d_y->data()); } } } @@ -165,9 +166,9 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { output_vec.reshape(Eigen::DSizes(batch_size, 1)) .broadcast(bcast_for_weight) * x_mat; - math::gemm(ctx.device_context(), CblasTrans, CblasNoTrans, - x_dim, y_dim, batch_size, 1, x_scale.data(), - y->data(), 0, d_weight_i.data()); + math::gemm(dev_ctx, CblasTrans, CblasNoTrans, x_dim, + y_dim, batch_size, 1, x_scale.data(), + y->data(), 0, d_weight_i.data()); } } diff --git a/paddle/operators/cast_op.cc b/paddle/operators/cast_op.cc index 3082a53ccfbe4f8666cfdfc2efed6b46ffdfede9..d641b8fc9fea81d1e364ae05de98ed7760a32648 100644 --- a/paddle/operators/cast_op.cc +++ b/paddle/operators/cast_op.cc @@ -68,10 +68,11 @@ class CastOpGradMaker : public framework::SingleGradOpDescMaker { } // namespace paddle namespace ops = paddle::operators; -using CPU = paddle::platform::CPUPlace; +using CPU = paddle::platform::CPUDeviceContext; REGISTER_OP_WITH_KERNEL(cast, ops::CastOpGradMaker, ops::CastOpInferShape, ops::CastOpProtoMaker); REGISTER_OP_CPU_KERNEL(cast, ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel, - ops::CastOpKernel); + ops::CastOpKernel, + ops::CastOpKernel); diff --git a/paddle/operators/cast_op.cu b/paddle/operators/cast_op.cu index fb75ddbabfefd8d00420d8c96f958abcb8fdce62..91e6fb391c637cc0d70a401d8d834451059ef6df 100644 --- a/paddle/operators/cast_op.cu +++ b/paddle/operators/cast_op.cu @@ -16,7 +16,8 @@ template using CastOpKernel = - paddle::operators::CastOpKernel; + paddle::operators::CastOpKernel; -REGISTER_OP_GPU_KERNEL(cast, CastOpKernel, CastOpKernel, - CastOpKernel, CastOpKernel); +REGISTER_OP_CUDA_KERNEL(cast, CastOpKernel, CastOpKernel, + CastOpKernel, CastOpKernel, + CastOpKernel); diff --git a/paddle/operators/cast_op.h b/paddle/operators/cast_op.h index 850dc8e3498351e54d41fcd2b6596c6fe668df14..a6773f13a8deb443b022c6045f1b3b976b3e6607 100644 --- a/paddle/operators/cast_op.h +++ b/paddle/operators/cast_op.h @@ -27,13 +27,13 @@ struct CastOpTransformFunctor { HOSTDEVICE OutT operator()(InT in) const { return static_cast(in); } }; -template +template struct CastOpFunctor { const framework::Tensor* in_; framework::Tensor* out_; - const platform::DeviceContext& ctx_; + const DeviceContext& ctx_; CastOpFunctor(const framework::Tensor* in, framework::Tensor* out, - const platform::DeviceContext& ctx) + const DeviceContext& ctx) : in_(in), out_(out), ctx_(ctx) {} template @@ -42,13 +42,13 @@ struct CastOpFunctor { auto numel = in_->numel(); auto* in_end = in_begin + numel; auto* out_begin = out_->mutable_data(ctx_.GetPlace()); - platform::Transform trans; + platform::Transform trans; trans(ctx_, in_begin, in_end, out_begin, CastOpTransformFunctor()); } }; -template +template class CastOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -56,7 +56,8 @@ class CastOpKernel : public framework::OpKernel { auto* out = context.Output("Out"); framework::VisitDataType( static_cast(context.Attr("out_dtype")), - CastOpFunctor(in, out, context.device_context())); + CastOpFunctor( + in, out, context.template device_context())); } }; diff --git a/paddle/operators/chunk_eval_op.cc b/paddle/operators/chunk_eval_op.cc index 309660b01fe7052de2f9300acdf00779d0228221..94127ab33e51d5529b63b5e3696032ef8adcf03e 100644 --- a/paddle/operators/chunk_eval_op.cc +++ b/paddle/operators/chunk_eval_op.cc @@ -58,9 +58,10 @@ class ChunkEvalOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Inference", - "(Tensor, default: Tensor). Predictions from the network."); + "(Tensor, default: Tensor). " + "Predictions from the network."); AddInput("Label", - "(Tensor, default: Tensor). The true tag sequences."); + "(Tensor, default: Tensor). The true tag sequences."); AddOutput("Precision", "(float). The evaluated precision (called positive predictive " "value) of chunks on the given mini-batch."); @@ -84,7 +85,7 @@ class ChunkEvalOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(std::vector{}); AddComment(R"DOC( For some basics of chunking, please refer to -‘Chunking with Support Vector Mechines ’. +‘Chunking with Support Vector Machines ’. CheckEvalOp computes the precision, recall, and F1-score of chunk detection, @@ -97,7 +98,7 @@ Here is a NER example of labeling for these tagging schemes: IOE: I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC IOBES: B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC -There are three chunk types(named entity types) including PER(person), ORG(orgnazation) +There are three chunk types(named entity types) including PER(person), ORG(organization) and LOC(LOCATION), and we can see that the labels have the form -. Since the calculations actually use label ids rather than labels, extra attention diff --git a/paddle/operators/chunk_eval_op.h b/paddle/operators/chunk_eval_op.h index 81aa07817b673b2ff85a35a51cc43742b7ad7fed..9cd758a8253914515437b480e17a94d5d6b21fd2 100644 --- a/paddle/operators/chunk_eval_op.h +++ b/paddle/operators/chunk_eval_op.h @@ -23,7 +23,7 @@ namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; -template +template class ChunkEvalKernel : public framework::OpKernel { public: struct Segment { @@ -35,10 +35,10 @@ class ChunkEvalKernel : public framework::OpKernel { } }; - void GetSegments(const int* label, int length, std::vector& segments, - int num_chunk_types, int num_tag_types, int other_chunk_type, - int tag_begin, int tag_inside, int tag_end, - int tag_single) const { + void GetSegments(const int64_t* label, int length, + std::vector& segments, int num_chunk_types, + int num_tag_types, int other_chunk_type, int tag_begin, + int tag_inside, int tag_end, int tag_single) const { segments.clear(); segments.reserve(length); int chunk_start = 0; @@ -152,8 +152,8 @@ class ChunkEvalKernel : public framework::OpKernel { auto* recall = context.Output("Recall"); auto* f1 = context.Output("F1-Score"); - const int* inference_data = inference->data(); - const int* label_data = label->data(); + const int64_t* inference_data = inference->data(); + const int64_t* label_data = label->data(); T* precision_data = precision->mutable_data(context.GetPlace()); T* racall_data = recall->mutable_data(context.GetPlace()); T* f1_data = f1->mutable_data(context.GetPlace()); @@ -179,7 +179,7 @@ class ChunkEvalKernel : public framework::OpKernel { ((*precision_data) + (*racall_data)); } - void EvalOneSeq(const int* output, const int* label, int length, + void EvalOneSeq(const int64_t* output, const int64_t* label, int length, std::vector& output_segments, std::vector& label_segments, int64_t& num_output_segments, int64_t& num_label_segments, diff --git a/paddle/operators/clip_by_norm_op.cc b/paddle/operators/clip_by_norm_op.cc index d9fc532e39500fa397be80396b075e866bad9362..0b7975a63f7d364bf9b0ce529e2dd72d9f3cd2e9 100644 --- a/paddle/operators/clip_by_norm_op.cc +++ b/paddle/operators/clip_by_norm_op.cc @@ -47,15 +47,19 @@ class ClipByNormOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) The output of clip_by_norm op with shape as input(X)"); AddAttr("max_norm", "(float) The maximum norm value."); AddComment(R"DOC( -ClipByNorm operator limits the L2 norm of the input 'X' within 'max_norm'. -If the L2 norm of 'X' is less than or equal to 'max_norm', 'Out' will be -the same as 'X'. If the L2 norm of 'X' is greater than 'max_norm', 'X' will -be linearly scaled to make the L2 norm of 'Out' equal to 'max_norm', as -shown in the following formula: +ClipByNorm Operator. -'Out' = 'max_norm' * 'X' / norm('X'), +This operator limits the L2 norm of the input $X$ within $max\_norm$. +If the L2 norm of $X$ is less than or equal to $max\_norm$, $Out$ will be +the same as $X$. If the L2 norm of $X$ is greater than $max\_norm$, $X$ will +be linearly scaled to make the L2 norm of $Out$ equal to $max\_norm$, as +shown in the following formula: -where norm('X') represents the L2 norm of 'X'. +$$ +Out = \frac{max\_norm * X}{norm(X)}, +$$ + +where $norm(X)$ represents the L2 norm of $X$. )DOC"); } }; @@ -67,4 +71,5 @@ namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(clip_by_norm, ops::ClipByNormOp, ops::ClipByNormOpMaker); REGISTER_OP_CPU_KERNEL( - clip_by_norm, ops::ClipByNormKernel); + clip_by_norm, + ops::ClipByNormKernel); diff --git a/paddle/operators/clip_by_norm_op.cu b/paddle/operators/clip_by_norm_op.cu index 2593a24ebbf56ecd286a726e527d2414247576e8..acd75438230715420470b81f7a5e5953bd8b8abe 100644 --- a/paddle/operators/clip_by_norm_op.cu +++ b/paddle/operators/clip_by_norm_op.cu @@ -15,5 +15,6 @@ #include "paddle/operators/clip_by_norm_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - clip_by_norm, ops::ClipByNormKernel); +REGISTER_OP_CUDA_KERNEL( + clip_by_norm, + ops::ClipByNormKernel); diff --git a/paddle/operators/clip_by_norm_op.h b/paddle/operators/clip_by_norm_op.h index b26476cae9b5b2fa290bc9186b9a64c48ba703d6..d8db1566b0e8c9c351d3b6d6aca1d22d991fe76e 100644 --- a/paddle/operators/clip_by_norm_op.h +++ b/paddle/operators/clip_by_norm_op.h @@ -26,7 +26,7 @@ template using EigenVector = framework::EigenVector; -template +template class ClipByNormKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -38,7 +38,8 @@ class ClipByNormKernel : public framework::OpKernel { auto x = EigenVector::Flatten(*input); auto out = EigenVector::Flatten(*output); auto x_norm = x.square().sum().sqrt(); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); auto temp = (x_norm <= max_norm).template cast().eval(); auto scaling = temp + (static_cast(1) - temp) * max_norm / x_norm; diff --git a/paddle/operators/clip_op.cc b/paddle/operators/clip_op.cc index 3e9066ceb2a4a4dc19fdf5ef02bb7fadaab4bfff..6092212de4635e2ada81f8383a0ccf64a8116158 100644 --- a/paddle/operators/clip_op.cc +++ b/paddle/operators/clip_op.cc @@ -52,7 +52,11 @@ class ClipOpMaker : public framework::OpProtoAndCheckerMaker { Clip Operator. The clip operator limits the value of given input within an interval. The interval is -specified with arguments 'min' and 'max'. +specified with arguments 'min' and 'max': + +$$ +Out = \min(\max(X, min), max) +$$ )DOC"); } @@ -79,7 +83,7 @@ class ClipOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(clip, ops::ClipOp, ops::ClipOpMaker, clip_grad, ops::ClipOpGrad); -REGISTER_OP_CPU_KERNEL(clip, - ops::ClipKernel); -REGISTER_OP_CPU_KERNEL(clip_grad, - ops::ClipGradKernel); +REGISTER_OP_CPU_KERNEL( + clip, ops::ClipKernel); +REGISTER_OP_CPU_KERNEL( + clip_grad, ops::ClipGradKernel); diff --git a/paddle/operators/clip_op.cu b/paddle/operators/clip_op.cu index ca9701298fdae3fabe234925edaf9e4d775cc66e..bb7dcc671a46758a6bd09e8035cf8d3f5e464b3b 100644 --- a/paddle/operators/clip_op.cu +++ b/paddle/operators/clip_op.cu @@ -15,7 +15,7 @@ #include "paddle/operators/clip_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(clip, - ops::ClipKernel); -REGISTER_OP_GPU_KERNEL(clip_grad, - ops::ClipGradKernel); +REGISTER_OP_CUDA_KERNEL( + clip, ops::ClipKernel); +REGISTER_OP_CUDA_KERNEL( + clip_grad, ops::ClipGradKernel); diff --git a/paddle/operators/clip_op.h b/paddle/operators/clip_op.h index ac702e9935201ba5263a80ebeb1ab22fa0bd1340..0c40797410950641d3d509a4980d5c4bdbd75cff 100644 --- a/paddle/operators/clip_op.h +++ b/paddle/operators/clip_op.h @@ -55,7 +55,7 @@ class ClipGradFunctor { T max_; }; -template +template class ClipKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -66,13 +66,13 @@ class ClipKernel : public framework::OpKernel { T* out_data = out->mutable_data(context.GetPlace()); const T* x_data = x->data(); int64_t numel = x->numel(); - Transform trans; - trans(context.device_context(), x_data, x_data + numel, out_data, - ClipFunctor(min, max)); + Transform trans; + trans(context.template device_context(), x_data, + x_data + numel, out_data, ClipFunctor(min, max)); } }; -template +template class ClipGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -86,9 +86,9 @@ class ClipGradKernel : public framework::OpKernel { auto* d_x_data = d_x->mutable_data(context.GetPlace()); const T* d_out_data = d_out->data(); const T* x_data = x->data(); - Transform trans; - trans(context.device_context(), d_out_data, d_out_data + numel, x_data, - d_x_data, ClipGradFunctor(min, max)); + Transform trans; + trans(context.template device_context(), d_out_data, + d_out_data + numel, x_data, d_x_data, ClipGradFunctor(min, max)); } } }; diff --git a/paddle/operators/compare_op.cu b/paddle/operators/compare_op.cu index 6ac8c124b9b2e7c808808ecc8802a2e5aeaa5b5d..596a878bcf9f5b81c87c3bd419a2f46c0a450635 100644 --- a/paddle/operators/compare_op.cu +++ b/paddle/operators/compare_op.cu @@ -14,10 +14,10 @@ #include "paddle/operators/compare_op.h" -REGISTER_LOGICAL_KERNEL(less_than, GPU, paddle::operators::LessThanFunctor); -REGISTER_LOGICAL_KERNEL(less_equal, GPU, paddle::operators::LessEqualFunctor); -REGISTER_LOGICAL_KERNEL(greater_than, GPU, +REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); +REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); +REGISTER_LOGICAL_KERNEL(greater_than, CUDA, paddle::operators::GreaterThanFunctor); -REGISTER_LOGICAL_KERNEL(greater_equal, GPU, +REGISTER_LOGICAL_KERNEL(greater_equal, CUDA, paddle::operators::GreaterEqualFunctor); -REGISTER_LOGICAL_KERNEL(equal, GPU, paddle::operators::EqualFunctor); +REGISTER_LOGICAL_KERNEL(equal, CUDA, paddle::operators::EqualFunctor); diff --git a/paddle/operators/compare_op.h b/paddle/operators/compare_op.h index afdf3ab3e098b4e7f4c996471617d97ec49264b1..a56536e155531ac9ea3d17256210bdb9f4212181 100644 --- a/paddle/operators/compare_op.h +++ b/paddle/operators/compare_op.h @@ -59,7 +59,7 @@ struct EqualFunctor { } }; -template +template class CompareOpKernel : public framework::OpKernel { public: @@ -69,24 +69,23 @@ class CompareOpKernel auto* y = context.Input("Y"); auto* out = context.Output("Out"); Functor binary_func; - platform::Transform trans; - trans(context.device_context(), x->data(), x->data() + x->numel(), - y->data(), out->mutable_data(context.GetPlace()), - binary_func); + platform::Transform trans; + trans(context.template device_context(), x->data(), + x->data() + x->numel(), y->data(), + out->mutable_data(context.GetPlace()), binary_func); } }; } // namespace operators } // namespace paddle -#define REGISTER_LOGICAL_KERNEL(op_type, dev, functor) \ - REGISTER_OP_##dev##_KERNEL( \ - op_type, \ - ::paddle::operators::CompareOpKernel<::paddle::platform::dev##Place, \ - functor>, \ - ::paddle::operators::CompareOpKernel<::paddle::platform::dev##Place, \ - functor>, \ - ::paddle::operators::CompareOpKernel<::paddle::platform::dev##Place, \ - functor>, \ - ::paddle::operators::CompareOpKernel<::paddle::platform::dev##Place, \ - functor>); +#define REGISTER_LOGICAL_KERNEL(op_type, dev, functor) \ + REGISTER_OP_##dev##_KERNEL( \ + op_type, ::paddle::operators::CompareOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::CompareOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::CompareOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>, \ + ::paddle::operators::CompareOpKernel< \ + ::paddle::platform::dev##DeviceContext, functor>); diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc index 5f052689251bc023df635d41c1e64a660a0aa488..cf522d6921ee746d03d8082b8fc4d051f4d504e6 100644 --- a/paddle/operators/concat_op.cc +++ b/paddle/operators/concat_op.cc @@ -25,7 +25,7 @@ class ConcatOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE_GE(ctx->Inputs("X").size(), 1UL, - "Inputs(X) of ConcatOp should be empty.") + "Inputs(X) of ConcatOp should be empty."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of ConcatOp should not be null."); @@ -41,14 +41,18 @@ class ConcatOp : public framework::OperatorWithKernel { for (size_t j = 0; j < in_zero_dims_size; j++) { if (j == axis) { out_dims[axis] += ins[i][j]; - continue; + } else { + PADDLE_ENFORCE_EQ(out_dims[j], ins[i][j], + "Input tensors should have the same " + "elements except the specify axis."); } - PADDLE_ENFORCE_EQ(out_dims[j], ins[i][j], - "Input tensors should have the same " - "elements except the specify axis.") } } + if (out_dims[axis] < 0) { + out_dims[axis] = -1; + } ctx->SetOutputDim("Out", out_dims); + ctx->ShareLoD("X", /*->*/ "Out"); } }; diff --git a/paddle/operators/concat_op.cu.cc b/paddle/operators/concat_op.cu.cc index ede832ddcd486729db56bba016683b33875f8837..7b46452d3d5db58799923a3dc76bb9df3471d9e7 100644 --- a/paddle/operators/concat_op.cu.cc +++ b/paddle/operators/concat_op.cu.cc @@ -14,7 +14,8 @@ limitations under the License. */ #include "paddle/operators/concat_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(concat, - ops::ConcatKernel); -REGISTER_OP_GPU_KERNEL( - concat_grad, ops::ConcatGradKernel); +REGISTER_OP_CUDA_KERNEL( + concat, ops::ConcatKernel); +REGISTER_OP_CUDA_KERNEL( + concat_grad, + ops::ConcatGradKernel); diff --git a/paddle/operators/concat_op.h b/paddle/operators/concat_op.h index c113f19fb5cf806709bff845ee0f1078b34014bb..de4011585af81363368a096a5c361ff3f7aeecdb 100644 --- a/paddle/operators/concat_op.h +++ b/paddle/operators/concat_op.h @@ -21,7 +21,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class ConcatKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -43,7 +43,7 @@ class ConcatKernel : public framework::OpKernel { } }; -template +template class ConcatGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { diff --git a/paddle/operators/conditional_block_op.cc b/paddle/operators/conditional_block_op.cc index d5b124682d755ffb39f32c9f001a3cf113a01a2c..03c58a7eab8b2071a3a0b75ac0c665e32ef39876 100644 --- a/paddle/operators/conditional_block_op.cc +++ b/paddle/operators/conditional_block_op.cc @@ -142,9 +142,9 @@ class ConditionalBlockGradOp : public ConditionalOp { continue; } auto new_in_grad_name = cur_scope.Rename(in_grad_name); - auto assign = - framework::OpRegistry::CreateOp("assign", {{"X", {new_in_grad_name}}}, - {{"Out", {out_grad_name}}}, {}); + auto assign = framework::OpRegistry::CreateOp( + "assign", {{"X", {new_in_grad_name}}}, {{"Out", {out_grad_name}}}, + framework::AttributeMap{}); assign->Run(cur_scope, dev_ctx); cur_scope.Rename(new_in_grad_name, in_grad_name); } diff --git a/paddle/operators/conv_cudnn_op.cc b/paddle/operators/conv_cudnn_op.cc index 0dd8c13b2ad6ff206066ccb98a4c009e4c3b4fd0..008bf01885ecddd1fee76a33c43370d07a8988a2 100644 --- a/paddle/operators/conv_cudnn_op.cc +++ b/paddle/operators/conv_cudnn_op.cc @@ -57,18 +57,20 @@ REGISTER_OP(conv2d_cudnn, ops::ConvOp, ops::CudnnConv2DOpMaker, REGISTER_OP(conv3d_cudnn, ops::ConvOp, ops::CudnnConv3DOpMaker, conv3d_cudnn_grad, ops::ConvOpGrad); -REGISTER_OP_CPU_KERNEL(conv2d_cudnn, - ops::GemmConvKernel, - ops::GemmConvKernel); +REGISTER_OP_CPU_KERNEL( + conv2d_cudnn, + ops::GemmConvKernel, + ops::GemmConvKernel); REGISTER_OP_CPU_KERNEL( conv2d_cudnn_grad, - ops::GemmConvGradKernel, - ops::GemmConvGradKernel); + ops::GemmConvGradKernel, + ops::GemmConvGradKernel); -REGISTER_OP_CPU_KERNEL(conv3d_cudnn, - ops::GemmConvKernel, - ops::GemmConvKernel); +REGISTER_OP_CPU_KERNEL( + conv3d_cudnn, + ops::GemmConvKernel, + ops::GemmConvKernel); REGISTER_OP_CPU_KERNEL( conv3d_cudnn_grad, - ops::GemmConvGradKernel, - ops::GemmConvGradKernel); + ops::GemmConvGradKernel, + ops::GemmConvGradKernel); diff --git a/paddle/operators/conv_cudnn_op.cu.cc b/paddle/operators/conv_cudnn_op.cu.cc index a9763d424801cfced5fe4c4718a335a24b81cfdc..3da0a9001aafbb5b2c4b9a91c4527d9437ac38a1 100644 --- a/paddle/operators/conv_cudnn_op.cu.cc +++ b/paddle/operators/conv_cudnn_op.cu.cc @@ -28,7 +28,8 @@ using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using DataLayout = platform::DataLayout; -static constexpr size_t kCONV_CUDNN_WORKSPACE_LIMIT_BYTES = 1024 * 1024 * 1024; +static constexpr size_t kCONV_CUDNN_WORKSPACE_LIMIT_BYTES = + static_cast(1024) * 1024 * 1024; template class CudnnConvOpKernel : public framework::OpKernel { @@ -44,7 +45,8 @@ class CudnnConvOpKernel : public framework::OpKernel { std::vector paddings = ctx.Attr>("paddings"); std::vector dilations = ctx.Attr>("dilations"); int groups = ctx.Attr("groups"); - int user_workspace_size = ctx.Attr("workspace_size_MB"); + int64_t user_workspace_size = + static_cast(ctx.Attr("workspace_size_MB")); const T* input_data = input->data(); const T* filter_data = filter->data(); @@ -63,7 +65,7 @@ class CudnnConvOpKernel : public framework::OpKernel { cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor(paddings, strides, dilations); -#if CUDNN_VERSION_MIN(7, 0, 0) +#if CUDNN_VERSION_MIN(7, 0, 1) // cudnn 7 can support groups, no need to do it mannually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. @@ -116,7 +118,8 @@ class CudnnConvOpKernel : public framework::OpKernel { } // ------------------- cudnn conv algorithm --------------------- cudnnConvolutionFwdAlgo_t algo; - auto handle = ctx.cuda_device_context().cudnn_handle(); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); PADDLE_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, @@ -163,7 +166,8 @@ class CudnnConvGradOpKernel : public framework::OpKernel { std::vector paddings = ctx.Attr>("paddings"); std::vector dilations = ctx.Attr>("dilations"); int groups = ctx.Attr("groups"); - int user_workspace_size = ctx.Attr("workspace_size_MB"); + int64_t user_workspace_size = + static_cast(ctx.Attr("workspace_size_MB")); // ------------------- cudnn descriptors --------------------- ScopedTensorDescriptor input_desc; @@ -180,7 +184,7 @@ class CudnnConvGradOpKernel : public framework::OpKernel { cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor(paddings, strides, dilations); -#if CUDNN_VERSION_MIN(7, 0, 0) +#if CUDNN_VERSION_MIN(7, 0, 1) // cudnn 7 can support groups, no need to do it mannually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. @@ -235,7 +239,8 @@ class CudnnConvGradOpKernel : public framework::OpKernel { workspace_size_limit = user_workspace_size * 1024 * 1024; } - auto handle = ctx.cuda_device_context().cudnn_handle(); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); if (input_grad) { PADDLE_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( @@ -310,16 +315,16 @@ class CudnnConvGradOpKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -REGISTER_OP_GPU_KERNEL(conv2d_cudnn, - paddle::operators::CudnnConvOpKernel, - paddle::operators::CudnnConvOpKernel); -REGISTER_OP_GPU_KERNEL(conv2d_cudnn_grad, - paddle::operators::CudnnConvGradOpKernel, - paddle::operators::CudnnConvGradOpKernel); - -REGISTER_OP_GPU_KERNEL(conv3d_cudnn, - paddle::operators::CudnnConvOpKernel, - paddle::operators::CudnnConvOpKernel); -REGISTER_OP_GPU_KERNEL(conv3d_cudnn_grad, - paddle::operators::CudnnConvGradOpKernel, - paddle::operators::CudnnConvGradOpKernel); +REGISTER_OP_CUDA_KERNEL(conv2d_cudnn, + paddle::operators::CudnnConvOpKernel, + paddle::operators::CudnnConvOpKernel); +REGISTER_OP_CUDA_KERNEL(conv2d_cudnn_grad, + paddle::operators::CudnnConvGradOpKernel, + paddle::operators::CudnnConvGradOpKernel); + +REGISTER_OP_CUDA_KERNEL(conv3d_cudnn, + paddle::operators::CudnnConvOpKernel, + paddle::operators::CudnnConvOpKernel); +REGISTER_OP_CUDA_KERNEL(conv3d_cudnn_grad, + paddle::operators::CudnnConvGradOpKernel, + paddle::operators::CudnnConvGradOpKernel); diff --git a/paddle/operators/conv_op.cc b/paddle/operators/conv_op.cc index 7a36a9b21aa6a1b415ac5a232e65eda8051c87f8..7ef805fd44bf94d3279ffa50f86993b3f2b64412 100644 --- a/paddle/operators/conv_op.cc +++ b/paddle/operators/conv_op.cc @@ -97,7 +97,7 @@ Conv2DOpMaker::Conv2DOpMaker(framework::OpProto* proto, .SetDefault({0, 0}); AddAttr( "groups", - "(int default:1), the group size of convolution operator. " + "(int default:1), the groups number of the convolution operator. " "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: " "when group=2, the first half of the filters is only connected to the " "first half of the input channels, while the second half of the filters " @@ -112,23 +112,29 @@ Conv2DOpMaker::Conv2DOpMaker(framework::OpProto* proto, Convolution Operator. The convolution operation calculates the output based on the input, filter -and strides, paddings, groups, dilations parameters. The size of each dimension of the +and strides, paddings, dilations, groups parameters. The size of each dimension of the parameters is checked in the infer-shape. -Input(Input, Filter) and output(Output) are in NCHW format. Where N is batch +Input(Input) and Output(Output) are in NCHW format. Where N is batch size, C is the number of channels, H is the height of the feature, and W is -the width of the feature. Parameters(ksize, strides, paddings, dilations) are two elements. -These two elements represent height and width, respectively. +the width of the feature. +Filters(Input) is MCHW format. Where M is the number of output image channels, C is +the number of input image channels, H is the height of the filter, and W +is the width of the filter. +Parameters(strides, paddings, dilations) are two elements. These two elements represent +height and width, respectively. The input(X) size and output(Out) size may be different. Example: Input: - Input shape: (N, C_in, H_in, W_in) - Filter shape: (C_out, C_in, H_f, W_f) + Input shape: $(N, C_{in}, H_{in}, W_{in})$ + Filter shape: $(C_{out}, C_{in}, H_f, W_f)$ Output: - Output shape: (N, C_out, H_out, W_out) - where - H_out = (H_in + 2 * paddings[0] - (dilations[0]*(filter_size[0] - 1) + 1)) / strides[0] + 1; - W_out = (W_in + 2 * paddings[1] - (dilations[1]*(filter_size[1] - 1) + 1)) / strides[1] + 1; + Output shape: $(N, C_{out}, H_{out}, W_{out})$ + Where +$$ + H_{out}= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]}+ 1 \\ + W_{out}= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]}+ 1 +$$ )DOC"); } @@ -165,7 +171,7 @@ Conv3DOpMaker::Conv3DOpMaker(framework::OpProto* proto, .SetDefault({0, 0, 0}); AddAttr( "groups", - "(int default:1), the group size of convolution operator. " + "(int default:1), the groups number of the convolution operator. " "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: " "when group=2, the first half of the filters is only connected to the " "first half of the input channels, while the second half of the filters " @@ -174,32 +180,37 @@ Conv3DOpMaker::Conv3DOpMaker(framework::OpProto* proto, AddAttr>("dilations", "(vector default:{1, 1, 1}), the " "dilations(d_dilation, h_dilation, w_dilation) of " - "convolution operator. Currently, conv3d doesn't " - "support dilation.") + "convolution operator.") .SetDefault({1, 1, 1}); AddComment(R"DOC( Convolution3D Operator. The convolution operation calculates the output based on the input, filter -and strides, paddings, groups parameters. The size of each dimension of the +and strides, paddings, dilations, groups parameters. The size of each dimension of the parameters is checked in the infer-shape. -Input(Input, Filter) and output(Output) are in NCDHW format. Where N is batch +Input(Input) and output(Output) are in NCDHW format, where N is batch size, C is the number of channels,D is the depth of the feature, H is the height of -the feature, and W is the width of the feature. Parameters(ksize, strides, paddings) -are three elements. These three elements represent depth, height and width, respectively. +the feature, and W is the width of the feature. +Filters(Input) is MCDHW format, where M is the number of output image channels, +C is the number of input image channels, D is the depth of the filter, +H is the height of the filter, and W is the width of the filter. +Parameters(strides, paddings, dilations) are three elements. These three elements +represent depth, height and width, respectively. The input(X) size and output(Out) size may be different. Example: Input: - Input shape: (N, C_in, D_in, H_in, W_in) - Filter shape: (C_out, C_in, D_f, H_f, W_f) + Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$ + Filter shape: $(C_{out}, C_{in}, D_f, H_f, W_f)$ Output: - Output shape: (N, C_out, D_out, H_out, W_out) - where - D_out = (D_in - filter_size[0] + 2 * paddings[0]) / strides[0] + 1; - H_out = (H_in - filter_size[1] + 2 * paddings[1]) / strides[1] + 1; - W_out = (W_in - filter_size[2] + 2 * paddings[2]) / strides[2] + 1; + Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$ + Where + $$ + D_{out}= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{ strides[0]}+ 1 \\ + H_{out}= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{ strides[1]}+ 1 \\ + W_{out}= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{ strides[2]}+ 1 + $$ )DOC"); } @@ -224,16 +235,18 @@ namespace ops = paddle::operators; REGISTER_OP(conv3d, ops::ConvOp, ops::Conv3DOpMaker, conv3d_grad, ops::ConvOpGrad); -REGISTER_OP_CPU_KERNEL(conv2d, - ops::GemmConvKernel, - ops::GemmConvKernel); REGISTER_OP_CPU_KERNEL( - conv2d_grad, ops::GemmConvGradKernel, - ops::GemmConvGradKernel); + conv2d, ops::GemmConvKernel, + ops::GemmConvKernel); +REGISTER_OP_CPU_KERNEL( + conv2d_grad, + ops::GemmConvGradKernel, + ops::GemmConvGradKernel); -REGISTER_OP_CPU_KERNEL(conv3d, - ops::GemmConvKernel, - ops::GemmConvKernel); REGISTER_OP_CPU_KERNEL( - conv3d_grad, ops::GemmConvGradKernel, - ops::GemmConvGradKernel); + conv3d, ops::GemmConvKernel, + ops::GemmConvKernel); +REGISTER_OP_CPU_KERNEL( + conv3d_grad, + ops::GemmConvGradKernel, + ops::GemmConvGradKernel); diff --git a/paddle/operators/conv_op.cu.cc b/paddle/operators/conv_op.cu.cc index 546451234a1ed1a4d3119cb175c6d37ae3f0aac1..38615a8befab91633423b7cd8536253a0d049ac3 100644 --- a/paddle/operators/conv_op.cu.cc +++ b/paddle/operators/conv_op.cu.cc @@ -16,16 +16,18 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(conv2d, - ops::GemmConvKernel, - ops::GemmConvKernel); -REGISTER_OP_GPU_KERNEL( - conv2d_grad, ops::GemmConvGradKernel, - ops::GemmConvGradKernel); +REGISTER_OP_CUDA_KERNEL( + conv2d, ops::GemmConvKernel, + ops::GemmConvKernel); +REGISTER_OP_CUDA_KERNEL( + conv2d_grad, + ops::GemmConvGradKernel, + ops::GemmConvGradKernel); -REGISTER_OP_GPU_KERNEL(conv3d, - ops::GemmConvKernel, - ops::GemmConvKernel); -REGISTER_OP_GPU_KERNEL( - conv3d_grad, ops::GemmConvGradKernel, - ops::GemmConvGradKernel); +REGISTER_OP_CUDA_KERNEL( + conv3d, ops::GemmConvKernel, + ops::GemmConvKernel); +REGISTER_OP_CUDA_KERNEL( + conv3d_grad, + ops::GemmConvGradKernel, + ops::GemmConvGradKernel); diff --git a/paddle/operators/conv_op.h b/paddle/operators/conv_op.h index 09bff0a68db82aa723dc08aa83c775910e17c5b8..d2de4e80f751d4938ac9cad60871b470fccf225c 100644 --- a/paddle/operators/conv_op.h +++ b/paddle/operators/conv_op.h @@ -72,7 +72,7 @@ class ConvOpGrad : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override; }; -template +template class GemmConvKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -141,9 +141,10 @@ class GemmConvKernel : public framework::OpKernel { int in_step = static_cast(input->dims()[1]) / groups; int out_step = static_cast(output->dims()[1]) / groups; - math::Vol2ColFunctor vol2col; - math::Im2ColFunctor im2col; + math::Vol2ColFunctor vol2col; + math::Im2ColFunctor im2col; + auto& dev_ctx = context.template device_context(); for (int i = 0; i < batch_size; i++) { Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape); Tensor out_batch = output->Slice(i, i + 1).Resize(output_matrix_shape); @@ -157,27 +158,26 @@ class GemmConvKernel : public framework::OpKernel { col_matrix.Resize(col_matrix_shape); } else if (data_dim == 2U) { // im2col - im2col(context.device_context(), in_slice, dilations, strides, + im2col(dev_ctx, in_slice, dilations, strides, std::vector{paddings[0], paddings[1], paddings[0], paddings[1]}, &col); } else if (data_dim == 3U) { // vol2col - vol2col(context.device_context(), in_slice, dilations, strides, - paddings, &col); + vol2col(dev_ctx, in_slice, dilations, strides, paddings, &col); } // gemm Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); - math::matmul(context.device_context(), filter_slice, false, - col_matrix, false, T(1.0), &out_slice, T(0.0)); + math::matmul(dev_ctx, filter_slice, false, col_matrix, + false, T(1.0), &out_slice, T(0.0)); } } } }; -template +template class GemmConvGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -256,14 +256,19 @@ class GemmConvGradKernel : public framework::OpKernel { col_matrix.Resize(col_matrix_shape); } - math::SetConstant set_zero; + math::SetConstant set_zero; + auto& dev_ctx = context.template device_context(); if (input_grad) { input_grad->mutable_data(context.GetPlace()); - set_zero(context.device_context(), input_grad, static_cast(0)); - math::Col2VolFunctor col2vol; - math::Col2ImFunctor col2im; + // if is_expand is false, the operation of set_zero is unnecessary, + // because math::matmul will reset input_grad. + if (is_expand) { + set_zero(dev_ctx, input_grad, static_cast(0)); + } + math::Col2VolFunctor col2vol; + math::Col2ImFunctor col2im; for (int i = 0; i < batch_size; i++) { Tensor out_grad_batch = @@ -282,18 +287,17 @@ class GemmConvGradKernel : public framework::OpKernel { col_matrix.ShareDataWith(in_grad_slice); col_matrix.Resize(col_matrix_shape); } - math::matmul(context.device_context(), filter_slice, true, - out_grad_slice, false, T(1.0), &col_matrix, - T(0.0)); + math::matmul(dev_ctx, filter_slice, true, + out_grad_slice, false, T(1.0), + &col_matrix, T(0.0)); if (is_expand && data_dim == 2U) { - col2im(context.device_context(), col, dilations, strides, + col2im(dev_ctx, col, dilations, strides, std::vector{paddings[0], paddings[1], paddings[0], paddings[1]}, &in_grad_slice); } else if (is_expand && data_dim == 3U) { - col2vol(context.device_context(), col, dilations, strides, paddings, - &in_grad_slice); + col2vol(dev_ctx, col, dilations, strides, paddings, &in_grad_slice); } } } @@ -303,9 +307,9 @@ class GemmConvGradKernel : public framework::OpKernel { filter_grad->mutable_data(context.GetPlace()); Tensor filter_grad_ = *filter_grad; filter_grad_.Resize(filter_matrix_shape); - set_zero(context.device_context(), filter_grad, static_cast(0)); - math::Im2ColFunctor im2col; - math::Vol2ColFunctor vol2col; + set_zero(dev_ctx, filter_grad, static_cast(0)); + math::Im2ColFunctor im2col; + math::Vol2ColFunctor vol2col; for (int i = 0; i < batch_size; i++) { Tensor out_grad_batch = output_grad->Slice(i, i + 1).Resize(output_matrix_shape); @@ -321,21 +325,20 @@ class GemmConvGradKernel : public framework::OpKernel { col_matrix.ShareDataWith(col); col_matrix.Resize(col_matrix_shape); } else if (data_dim == 2U) { - im2col(context.device_context(), in_slice, dilations, strides, + im2col(dev_ctx, in_slice, dilations, strides, std::vector{paddings[0], paddings[1], paddings[0], paddings[1]}, &col); } else if (data_dim == 3U) { - vol2col(context.device_context(), in_slice, dilations, strides, - paddings, &col); + vol2col(dev_ctx, in_slice, dilations, strides, paddings, &col); } // gemm Tensor filter_grad_slice = filter_grad_.Slice(g * out_step, (g + 1) * out_step); - math::matmul(context.device_context(), out_grad_slice, - false, col_matrix, true, T(1.0), - &filter_grad_slice, T(1.0)); + math::matmul(dev_ctx, out_grad_slice, false, + col_matrix, true, T(1.0), + &filter_grad_slice, T(1.0)); } } } diff --git a/paddle/operators/conv_shift_op.cu b/paddle/operators/conv_shift_op.cu index 95e13c38a8dd234f49393d2d4808607a447b0d4c..f7ca82ce2635f9ef9d7e9a062d148448e61c163c 100644 --- a/paddle/operators/conv_shift_op.cu +++ b/paddle/operators/conv_shift_op.cu @@ -111,7 +111,8 @@ __global__ void ConvShiftDy(const T *x, const T *dout, int x_width, int y_width, } // namespace template -class ConvShiftKernel : public framework::OpKernel { +class ConvShiftKernel + : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { const Tensor *X = context.Input("X"); @@ -132,7 +133,8 @@ class ConvShiftKernel : public framework::OpKernel { dim3 grid_dim(num_x_blocks, batch_size); - auto stream = context.cuda_device_context().stream(); + auto stream = + context.template device_context().stream(); ConvShiftForward<<>>( x_data, y_data, x_width, y_width, y_half_width, batch_size, out_data); @@ -140,7 +142,7 @@ class ConvShiftKernel : public framework::OpKernel { }; template -class ConvShiftGradKernel +class ConvShiftGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -159,8 +161,9 @@ class ConvShiftGradKernel int y_width = Y->dims()[1]; int y_half_width = (y_width - 1) / 2; - auto &device_ctx = context.cuda_device_context(); - math::SetConstant zero; + auto &device_ctx = + context.template device_context(); + math::SetConstant zero; const int x_per_block = 256; int num_x_blocks = DivUp(x_width, x_per_block); @@ -186,8 +189,9 @@ class ConvShiftGradKernel } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(conv_shift, - ops::ConvShiftKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( + conv_shift, + ops::ConvShiftKernel); +REGISTER_OP_CUDA_KERNEL( conv_shift_grad, - ops::ConvShiftGradKernel); + ops::ConvShiftGradKernel); diff --git a/paddle/operators/conv_shift_op.h b/paddle/operators/conv_shift_op.h index 5a160b0f1696c70868fc48d219b38cde2018e8a3..1a70b38a0d8cb82ad1f818148306b7ec5f334744 100644 --- a/paddle/operators/conv_shift_op.h +++ b/paddle/operators/conv_shift_op.h @@ -18,13 +18,13 @@ namespace paddle { namespace operators { -template +template class ConvShiftKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override; }; -template +template class ConvShiftGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override; diff --git a/paddle/operators/conv_transpose_cudnn_op.cc b/paddle/operators/conv_transpose_cudnn_op.cc index 0192178ce3a0a47196232f0723baec8324bea60b..4cb6a2ccffc76066ea0868f76ba2a3bfb9e5e450 100644 --- a/paddle/operators/conv_transpose_cudnn_op.cc +++ b/paddle/operators/conv_transpose_cudnn_op.cc @@ -61,12 +61,13 @@ REGISTER_OP(conv2d_transpose_cudnn, ops::ConvTransposeOp, REGISTER_OP_CPU_KERNEL( conv2d_transpose_cudnn, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); + ops::GemmConvTransposeKernel, + ops::GemmConvTransposeKernel); REGISTER_OP_CPU_KERNEL( conv2d_transpose_cudnn_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); + ops::GemmConvTransposeGradKernel, + ops::GemmConvTransposeGradKernel); REGISTER_OP(conv3d_transpose_cudnn, ops::ConvTransposeOp, ops::CudnnConv3DTransposeOpMaker, conv3d_transpose_cudnn_grad, @@ -74,9 +75,10 @@ REGISTER_OP(conv3d_transpose_cudnn, ops::ConvTransposeOp, REGISTER_OP_CPU_KERNEL( conv3d_transpose_cudnn, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); + ops::GemmConvTransposeKernel, + ops::GemmConvTransposeKernel); REGISTER_OP_CPU_KERNEL( conv3d_transpose_cudnn_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); + ops::GemmConvTransposeGradKernel, + ops::GemmConvTransposeGradKernel); diff --git a/paddle/operators/conv_transpose_cudnn_op.cu.cc b/paddle/operators/conv_transpose_cudnn_op.cu.cc index 494904fe524ae30a5032e489a0c5f20179d8e8ce..f0297f6c40c132c28b50184997d657451f26362b 100644 --- a/paddle/operators/conv_transpose_cudnn_op.cu.cc +++ b/paddle/operators/conv_transpose_cudnn_op.cu.cc @@ -83,7 +83,8 @@ class CudnnConvTransposeOpKernel : public framework::OpKernel { } // ------------------- cudnn conv algorithm --------------------- cudnnConvolutionBwdDataAlgo_t algo; - auto handle = ctx.cuda_device_context().cudnn_handle(); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); // Get the algorithm PADDLE_ENFORCE(platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc, @@ -165,7 +166,8 @@ class CudnnConvTransposeGradOpKernel : public framework::OpKernel { workspace_size_limit = user_workspace_size * 1024 * 1024; } - auto handle = ctx.cuda_device_context().cudnn_handle(); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); if (input_grad) { // choose backward algorithm for data PADDLE_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( @@ -234,16 +236,16 @@ class CudnnConvTransposeGradOpKernel : public framework::OpKernel { namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(conv2d_transpose_cudnn, - ops::CudnnConvTransposeOpKernel, - ops::CudnnConvTransposeOpKernel); -REGISTER_OP_GPU_KERNEL(conv2d_transpose_cudnn_grad, - ops::CudnnConvTransposeGradOpKernel, - ops::CudnnConvTransposeGradOpKernel); - -REGISTER_OP_GPU_KERNEL(conv3d_transpose_cudnn, - ops::CudnnConvTransposeOpKernel, - ops::CudnnConvTransposeOpKernel); -REGISTER_OP_GPU_KERNEL(conv3d_transpose_cudnn_grad, - ops::CudnnConvTransposeGradOpKernel, - ops::CudnnConvTransposeGradOpKernel); +REGISTER_OP_CUDA_KERNEL(conv2d_transpose_cudnn, + ops::CudnnConvTransposeOpKernel, + ops::CudnnConvTransposeOpKernel); +REGISTER_OP_CUDA_KERNEL(conv2d_transpose_cudnn_grad, + ops::CudnnConvTransposeGradOpKernel, + ops::CudnnConvTransposeGradOpKernel); + +REGISTER_OP_CUDA_KERNEL(conv3d_transpose_cudnn, + ops::CudnnConvTransposeOpKernel, + ops::CudnnConvTransposeOpKernel); +REGISTER_OP_CUDA_KERNEL(conv3d_transpose_cudnn_grad, + ops::CudnnConvTransposeGradOpKernel, + ops::CudnnConvTransposeGradOpKernel); diff --git a/paddle/operators/conv_transpose_op.cc b/paddle/operators/conv_transpose_op.cc index 3e55ef036a7fb976117054574d1347fa943acd55..ca063e94bbe64817567a298c3b1ad9306667536d 100644 --- a/paddle/operators/conv_transpose_op.cc +++ b/paddle/operators/conv_transpose_op.cc @@ -39,7 +39,7 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { "ConvTransposeOp input dimension and strides dimension should " "be consistent."); PADDLE_ENFORCE_EQ(paddings.size(), strides.size(), - "ConvTransposeOp paddings dimension and Conv strides " + "ConvTransposeOp paddings dimension and strides " "dimension should be the same."); PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[0], "In ConvTransposeOp, The input channel should be the same " @@ -62,24 +62,25 @@ Conv2DTransposeOpMaker::Conv2DTransposeOpMaker( "The format of input tensor is NCHW. Where N is batch size, C is the " "number of input channels, H is the height of the feature, and " "W is the width of the feature."); - AddInput("Filter", - "(Tensor) The filter tensor of convolution transpose operator. " - "The format of the filter tensor is CMHW, where C is the number of " - "output image channels, M is the number of input image channels, " - "H is the height of the filter, and W is the width of the filter. " - "We enforce groups number == 1 and padding == 0 in " - "the convolution transpose scenario."); + AddInput( + "Filter", + "(Tensor) The filter tensor of convolution transpose operator. " + "The format of the filter tensor is MCHW, where M is the number of " + "input feature channels, C is the number of " + "output feature channels," + "H is the height of the filter, and W is the width of the filter. " + "We enforce groups number == 1 in the convolution transpose scenario."); AddOutput("Output", "(Tensor) The output tensor of convolution transpose operator. " "The format of output tensor is also NCHW."); AddAttr>( "strides", - "(vector defalut:{1, 1}), the strides(h_stride, w_stride) of " + "(vector default:{1, 1}), the strides(h_stride, w_stride) of " "convolution transpose operator.") .SetDefault({1, 1}); AddAttr>( "paddings", - "(vector defalut:{0, 0}), the paddings(h_pad, w_pad) of convolution " + "(vector default:{0, 0}), the paddings(h_pad, w_pad) of convolution " "transpose operator.") .SetDefault({0, 0}); AddComment(R"DOC( @@ -88,21 +89,26 @@ Convolution2D Transpose Operator. The convolution transpose operation calculates the output based on the input, filter and strides, paddings, groups parameters. The size of each dimension of the parameters is checked in the infer-shape. - -Input(Input, Filter) and output(Output) are in NCHW format. Where N is batch -size, C is the number of channels, H is the height of the feature, and -W is the width of the feature. Parameters(ksize, strides, paddings) are two elements. -These two elements represent height and width, respectively. +Input(Input) and output(Output) are in NCHW format. Where N is batchsize, C is the +number of channels, H is the height of the feature, and W is the width of the feature. +Filter(Input) is in MCHW format. Where M is the number of input feature channels, +C is the number of output feature channels, H is the height of the filter, +and W is the width of the filter. +Parameters(strides, paddings) are two elements. These two elements represent height +and width, respectively. The input(X) size and output(Out) size may be different. + Example: Input: - Input shape: (N, C_in, H_in, W_in) - Filter shape: (C_in, C_out, H_f, W_f) + Input shape: $(N, C_{in}, H_{in}, W_{in})$ + Filter shape: $(C_{in}, C_{out}, H_f, W_f)$ Output: - Output shape: (N, C_out, H_out, W_out) - where - H_out = (H_in - 1) * strides[0] - 2 * paddings[0] + filter_size[0]; - W_out = (W_in - 1) * strides[1] - 2 * paddings[1] + filter_size[1]; + Output shape: $(N, C_{out}, H_{out}, W_{out})$ + Where + $$ + H_{out} = (H_{in} - 1) * strides[0] - 2 * paddings[0] + H_f \\ + W_{out} = (W_{in} - 1) * strides[1] - 2 * paddings[1] + W_f + $$ )DOC"); } @@ -117,8 +123,9 @@ Conv3DTransposeOpMaker::Conv3DTransposeOpMaker( "W is the width of the feature."); AddInput("Filter", "(Tensor) The filter tensor of convolution transpose operator." - "The format of the filter tensor is CMDHW, where C is the number of " - "output image channels, M is the number of input image channels, D " + "The format of the filter tensor is MCDHW, where M is the number of " + "input feature channels, C is the number of " + "output feature channels, D " "is the depth of the filter, H is the height of the filter, and " "W is the width of the filter." "We enforce groups number == 1 and padding == 0 in " @@ -130,12 +137,12 @@ Conv3DTransposeOpMaker::Conv3DTransposeOpMaker( "the number of channels, D is the depth of the feature, H is the " "height of the feature, and W is the width of the feature."); AddAttr>("strides", - "(vector defalut:{1, 1, 1}), the " + "(vector default:{1, 1, 1}), the " "strides{d_stride, h_stride, w_stride} of " "convolution transpose operator.") .SetDefault({1, 1, 1}); AddAttr>("paddings", - "(vector defalut:{0, 0, 0}), paddings(d_pad, " + "(vector default:{0, 0, 0}), paddings(d_pad, " "h_pad, w_pad) of convolution transpose operator.") .SetDefault({0, 0, 0}); AddComment(R"DOC( @@ -144,23 +151,28 @@ Convolution3D Transpose Operator. The convolution transpose operation calculates the output based on the input, filter and strides, paddings, groups parameters. The size of each dimension of the parameters is checked in the infer-shape. - -Input(Input, Filter) and output(Output) are in NCDHW format. Where N is batch -size, C is the number of channels, D is the depth of the feature, -H is the height of the feature, and W is the width of the feature. -Parameters(ksize, strides, paddings) are three elements. -These three elements represent depth, height and width, respectively. +Input(Input) and output(Output) are in NCDHW format. Where N is batch size, C is the +number of channels, D is the depth of the feature, H is the height of the feature, +and W is the width of the feature. +Filter(Input) is in MCDHW format. Where M is the number of input feature channels, +C is the number of output feature channels, D is the depth of the filter,H is the +height of the filter, and W is the width of the filter. +Parameters(strides, paddings) are three elements. These three elements represent +depth, height and width, respectively. The input(X) size and output(Out) size may be different. -Example: + +Example: Input: - Input shape: (N, C_in, D_in, H_in, W_in) - Filter shape: (C_in, C_out, D_f, H_f, W_f) + Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$ + Filter shape: $(C_{in}, C_{out}, D_f, H_f, W_f)$ Output: - Output shape: (N, C_out, D_out, H_out, W_out) - where - D_out = (D_in - 1) * strides[0] - 2 * paddings[0] + filter_size[0]; - H_out = (H_in - 1) * strides[1] - 2 * paddings[1] + filter_size[1]; - W_out = (W_in - 1) * strides[2] - 2 * paddings[2] + filter_size[2]; + Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$ + Where + $$ + D_{out} = (D_{in} - 1) * strides[0] - 2 * paddings[0] + D_f \\ + H_{out} = (H_{in} - 1) * strides[1] - 2 * paddings[1] + H_f \\ + W_{out} = (W_{in} - 1) * strides[2] - 2 * paddings[2] + W_f + $$ )DOC"); } @@ -185,21 +197,23 @@ REGISTER_OP(conv2d_transpose, ops::ConvTransposeOp, ops::Conv2DTransposeOpMaker, REGISTER_OP_CPU_KERNEL( conv2d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); + ops::GemmConvTransposeKernel, + ops::GemmConvTransposeKernel); REGISTER_OP_CPU_KERNEL( conv2d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); + ops::GemmConvTransposeGradKernel, + ops::GemmConvTransposeGradKernel); REGISTER_OP(conv3d_transpose, ops::ConvTransposeOp, ops::Conv3DTransposeOpMaker, conv3d_transpose_grad, ops::ConvTransposeOpGrad); REGISTER_OP_CPU_KERNEL( conv3d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); + ops::GemmConvTransposeKernel, + ops::GemmConvTransposeKernel); REGISTER_OP_CPU_KERNEL( conv3d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); + ops::GemmConvTransposeGradKernel, + ops::GemmConvTransposeGradKernel); diff --git a/paddle/operators/conv_transpose_op.cu.cc b/paddle/operators/conv_transpose_op.cu.cc index 4165eb0c7b048b83bbd94c57b971530043b66545..b91ebd7922f2e101df8d6ef5892a62ec5a10cf99 100644 --- a/paddle/operators/conv_transpose_op.cu.cc +++ b/paddle/operators/conv_transpose_op.cu.cc @@ -16,20 +16,24 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( conv2d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_GPU_KERNEL( + ops::GemmConvTransposeKernel, + ops::GemmConvTransposeKernel); +REGISTER_OP_CUDA_KERNEL( conv2d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); + ops::GemmConvTransposeGradKernel, + ops::GemmConvTransposeGradKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( conv3d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_GPU_KERNEL( + ops::GemmConvTransposeKernel, + ops::GemmConvTransposeKernel); +REGISTER_OP_CUDA_KERNEL( conv3d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); + ops::GemmConvTransposeGradKernel, + ops::GemmConvTransposeGradKernel); diff --git a/paddle/operators/conv_transpose_op.h b/paddle/operators/conv_transpose_op.h index 0fc0735788c499c2d520c0cc689e1ce07ba67ce8..1171b0435fd2b1abe541043e8283a8fc09dc13c7 100644 --- a/paddle/operators/conv_transpose_op.h +++ b/paddle/operators/conv_transpose_op.h @@ -52,7 +52,7 @@ class ConvTransposeOpGrad : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override; }; -template +template class GemmConvTransposeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -63,7 +63,6 @@ class GemmConvTransposeKernel : public framework::OpKernel { std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - // TODO(Zhuoyuan): Paddings can be added in future. // groups will alway be disabled in conv2dtranspose. const int batch_size = static_cast(input->dims()[0]); @@ -110,11 +109,12 @@ class GemmConvTransposeKernel : public framework::OpKernel { filter.Resize(filter_matrix_shape); output->mutable_data(context.GetPlace()); - math::SetConstant set_zero; - set_zero(context.device_context(), output, static_cast(0)); + math::SetConstant set_zero; + auto& dev_ctx = context.template device_context(); + set_zero(dev_ctx, output, static_cast(0)); - math::Col2ImFunctor col2im; - math::Col2VolFunctor col2vol; + math::Col2ImFunctor col2im; + math::Col2VolFunctor col2vol; std::vector dilations({1, 1, 1}); // convolution transpose: gemm + col2im or col2vol (similar to conv-backward @@ -128,29 +128,27 @@ class GemmConvTransposeKernel : public framework::OpKernel { // col_matrix = filter * input_batch // of shape (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w) - math::matmul(context.device_context(), filter, true, - input_batch, false, static_cast(1.0), - &col_matrix, static_cast(0.0)); + math::matmul(dev_ctx, filter, true, input_batch, false, + static_cast(1.0), &col_matrix, + static_cast(0.0)); if (data_dim == 2U) { // col2im: col_matrix -> dy // from (c * k_h * k_w, h * w) to (c, o_h, o_w) - col2im(context.device_context(), col, - std::vector{dilations[0], dilations[1]}, strides, - std::vector{paddings[0], paddings[1], paddings[0], - paddings[1]}, + col2im(dev_ctx, col, std::vector{dilations[0], dilations[1]}, + strides, std::vector{paddings[0], paddings[1], paddings[0], + paddings[1]}, &output_batch); } else if (data_dim == 3U) { // col2vol: col_matrix -> dy // from (c * k_d * k_h * k_w, d * h * w) to (c, o_d, o_h, o_w) - col2vol(context.device_context(), col, dilations, strides, paddings, - &output_batch); + col2vol(dev_ctx, col, dilations, strides, paddings, &output_batch); } } } }; -template +template class GemmConvTransposeGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -207,6 +205,7 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { // convolution transpose grad on input: // im2col + gemm (similar to conv-forward) // input need to compute gradient + auto& dev_ctx = context.template device_context(); if (input_grad || filter_grad) { Tensor col; col.mutable_data(col_shape, context.GetPlace()); @@ -218,19 +217,18 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { col_matrix.Resize(col_matrix_shape); Tensor filter_grad_; - math::SetConstant set_zero; + math::SetConstant set_zero; - math::Im2ColFunctor im2col; - math::Vol2ColFunctor vol2col; + math::Im2ColFunctor im2col; + math::Vol2ColFunctor vol2col; std::vector dilations({1, 1, 1}); if (input_grad) { input_grad->mutable_data(context.GetPlace()); - set_zero(context.device_context(), input_grad, static_cast(0)); } if (filter_grad) { // filter size (m, c, k_h, k_w) filter_grad->mutable_data(context.GetPlace()); - set_zero(context.device_context(), filter_grad, static_cast(0)); + set_zero(dev_ctx, filter_grad, static_cast(0)); filter_grad_ = *filter_grad; filter_grad_.Resize(filter_matrix_shape); } @@ -243,7 +241,7 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { if (data_dim == 2U) { // im2col: dy -> col matrix // from (c, o_h, o_w) to (c * k_h * k_w, h * w) - im2col(context.device_context(), output_grad_batch, + im2col(dev_ctx, output_grad_batch, std::vector{dilations[0], dilations[1]}, strides, std::vector{paddings[0], paddings[1], paddings[0], paddings[1]}, @@ -251,8 +249,8 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { } else if (data_dim == 3U) { // vol2col: dy -> col_matrix // from (c, o_d, o_h, o_w) to (c * k_d * k_h * k_w, d * h * w) - vol2col(context.device_context(), output_grad_batch, dilations, - strides, paddings, &col); + vol2col(dev_ctx, output_grad_batch, dilations, strides, paddings, + &col); } if (input_grad) { @@ -264,9 +262,9 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { // or // (m, c * k_d * k_h * k_w) * (c * k_d * k_h * k_w, d * h * w) -> (m, // d, h, w) - math::matmul(context.device_context(), filter, false, - col_matrix, false, static_cast(1.0), - &input_grad_batch, static_cast(0.0)); + math::matmul( + dev_ctx, filter, false, col_matrix, false, static_cast(1.0), + &input_grad_batch, static_cast(0.0)); } if (filter_grad) { // input batch @@ -276,9 +274,9 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { // or // (m, d * h * w) * (d * h * w, c * k_d * k_h * k_w) -> (m, c * k_d * // k_h * k_w) - math::matmul(context.device_context(), in_batch, false, - col_matrix, true, static_cast(1.0), - &filter_grad_, static_cast(1.0)); + math::matmul(dev_ctx, in_batch, false, col_matrix, + true, static_cast(1.0), + &filter_grad_, static_cast(1.0)); } } } diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 312264ccd48d1405a247a2c864d9f5897c897bea..440c427cba9396ec6d0ebf7814d671e45f45412d 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -155,7 +155,8 @@ class CosSimOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(cos_sim, ops::CosSimOp, ops::CosSimOpMaker, cos_sim_grad, ops::CosSimOpGrad); -REGISTER_OP_CPU_KERNEL(cos_sim, - ops::CosSimKernel); REGISTER_OP_CPU_KERNEL( - cos_sim_grad, ops::CosSimGradKernel); + cos_sim, ops::CosSimKernel); +REGISTER_OP_CPU_KERNEL( + cos_sim_grad, + ops::CosSimGradKernel); diff --git a/paddle/operators/cos_sim_op.cu b/paddle/operators/cos_sim_op.cu index 0cb8fd26de47a4a464db98664263544e3e503d63..1cb01f5945f691747bac609ca4a93e2d15cde5bf 100644 --- a/paddle/operators/cos_sim_op.cu +++ b/paddle/operators/cos_sim_op.cu @@ -16,7 +16,8 @@ #include "paddle/operators/cos_sim_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(cos_sim, - ops::CosSimKernel); -REGISTER_OP_GPU_KERNEL( - cos_sim_grad, ops::CosSimGradKernel); +REGISTER_OP_CUDA_KERNEL( + cos_sim, ops::CosSimKernel); +REGISTER_OP_CUDA_KERNEL( + cos_sim_grad, + ops::CosSimGradKernel); diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 62a4e484eceeabc4cc26e68ac54a50be1ac95df7..fecb5a79b2397dd73d991a1a87efcf84d60ef882 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -27,7 +27,7 @@ template using EigenVector = framework::EigenVector; -template +template class CosSimKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -51,7 +51,8 @@ class CosSimKernel : public framework::OpKernel { auto y_norm = EigenVector::Flatten(*out_y_norm); // compute - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); auto row_along = Eigen::array({{1}}); x_norm.device(place) = x.square().sum(row_along).sqrt(); y_norm.device(place) = y.square().sum(row_along).sqrt(); @@ -66,7 +67,7 @@ class CosSimKernel : public framework::OpKernel { } }; -template +template class CosSimGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -96,7 +97,8 @@ class CosSimGradKernel : public framework::OpKernel { auto z_bcast = z.broadcast(bcast_cols); auto dz_bcast = dz.broadcast(bcast_cols); auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast_cols); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); if (rows_x == rows_y) { auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_cols); auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast_cols); diff --git a/paddle/operators/crf_decoding_op.cc b/paddle/operators/crf_decoding_op.cc index f418f489c0ff471464a23380598e9f4c8da16ca9..1ce189fa6ebba3712467572c55d599975bbe7534 100644 --- a/paddle/operators/crf_decoding_op.cc +++ b/paddle/operators/crf_decoding_op.cc @@ -36,17 +36,18 @@ class CRFDecodingOpMaker : public framework::OpProtoAndCheckerMaker { "w. See more details in comments of the linear_chain_crf operator."); AddInput( "Label", - "(LoDTensor, LoDTensor). The ground truth with shape " + "(LoDTensor, LoDTensor). The ground truth with shape " "[N x 1]. This input is optional. See more details in the operator's " "comments.") .AsDispensable(); - AddOutput("ViterbiPath", - "(LoDTensor, LoDTensor). The decoding results. What to " - "return changes depending on whether the Input(Label) (the groud " - "truth) is given. See more details in the operator's comment."); + AddOutput( + "ViterbiPath", + "(LoDTensor, LoDTensor). The decoding results. What to " + "return changes depending on whether the Input(Label) (the ground " + "truth) is given. See more details in the operator's comment."); AddComment(R"DOC( The crf_decoding operator reads the emission feature weights and the transition -freature weights learned by the linear_chain_crf operator. It implements the +feature weights learned by the linear_chain_crf operator. It implements the Viterbi algorithm which is a dynamic programming algorithm for finding the most likely sequence of hidden states, called the Viterbi path, that results in a sequence of observed tags. @@ -60,14 +61,14 @@ operator. When Input(Label) is given, the crf_decoding operator returns a row vector with shape [N x 1] whose values are fixed to be 0, indicating an incorrect -prediction, or 1 indicating a tag is correctly predicted. Such an ouput is the +prediction, or 1 indicating a tag is correctly predicted. Such an output is the input to chunk_eval operator. 2. Input(Label) is not given: This is the standard decoding process. -The crf_decoding operator returns a row vecotr with shape [N x 1] whose values +The crf_decoding operator returns a row vector with shape [N x 1] whose values range from 0 to maximum tag number - 1. Each element indicates an index of a predicted tag. )DOC"); @@ -134,5 +135,6 @@ namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(crf_decoding, ops::CRFDecodingOp, ops::CRFDecodingOpMaker); REGISTER_OP_CPU_KERNEL( - crf_decoding, ops::CRFDecodingOpKernel, - ops::CRFDecodingOpKernel); + crf_decoding, + ops::CRFDecodingOpKernel, + ops::CRFDecodingOpKernel); diff --git a/paddle/operators/crf_decoding_op.h b/paddle/operators/crf_decoding_op.h index 526e0c5dcb2649b35ee28f5153c8472ca7a0af7b..f6827b7b1128251b2bb7e0a6a032389e5adc1371 100644 --- a/paddle/operators/crf_decoding_op.h +++ b/paddle/operators/crf_decoding_op.h @@ -24,7 +24,7 @@ using framework::LoDTensor; using framework::LoD; using framework::Tensor; -template +template class CRFDecodingOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -43,9 +43,9 @@ class CRFDecodingOpKernel : public framework::OpKernel { const size_t level = 0; const size_t seq_num = lod[level].size() - 1; - int* path = decoded_path->mutable_data(platform::CPUPlace()); - math::SetConstant()(ctx.device_context(), - decoded_path, 0); + int64_t* path = decoded_path->mutable_data(platform::CPUPlace()); + math::SetConstant()( + ctx.template device_context(), decoded_path, 0); for (size_t i = 0; i < seq_num; ++i) { int start_pos = static_cast(lod[level][i]); int end_pos = static_cast(lod[level][i + 1]); @@ -57,7 +57,7 @@ class CRFDecodingOpKernel : public framework::OpKernel { if (label) { PADDLE_ENFORCE_EQ(label->NumLevels(), 1UL, "The Input(Label) should be a sequence."); - const int* label_value = label->data(); + const int64_t* label_value = label->data(); size_t batch_size = emission_weights->dims()[0]; for (size_t i = 0; i < batch_size; ++i) { path[i] = label_value[i] == path[i] ? 1 : 0; @@ -76,7 +76,7 @@ class CRFDecodingOpKernel : public framework::OpKernel { const T* x = emission_weights.data(); const T* w = transition_weights.data(); - int* path = decoded_path->data(); + int64_t* path = decoded_path->data(); // alpha is a memo table. An element alpha(k, v) records the score of the // best sequence of tags from position 1 to position k with v being the end diff --git a/paddle/operators/crop_op.cc b/paddle/operators/crop_op.cc index 6752eb8c1c72150b0b1cf5595211ca1d01ef2bf4..7c2a0ac7a705e5aac3d181545f8dfc8881e811f2 100644 --- a/paddle/operators/crop_op.cc +++ b/paddle/operators/crop_op.cc @@ -133,5 +133,5 @@ class CropOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(crop, ops::CropOp, ops::CropOpMaker, crop_grad, ops::CropOpGrad); REGISTER_OP_CPU_KERNEL(crop, ops::CropKernel); -REGISTER_OP_CPU_KERNEL(crop_grad, - ops::CropGradKernel); +REGISTER_OP_CPU_KERNEL( + crop_grad, ops::CropGradKernel); diff --git a/paddle/operators/crop_op.cu b/paddle/operators/crop_op.cu index f8ee18a1d6e894cbb2d71dd4b6b459abeb076817..90fd83ca10b750896a9fe144d3c30fabb2f54e0a 100644 --- a/paddle/operators/crop_op.cu +++ b/paddle/operators/crop_op.cu @@ -16,6 +16,6 @@ #include "paddle/operators/crop_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(crop, ops::CropKernel); -REGISTER_OP_GPU_KERNEL(crop_grad, - ops::CropGradKernel); +REGISTER_OP_CUDA_KERNEL(crop, ops::CropKernel); +REGISTER_OP_CUDA_KERNEL( + crop_grad, ops::CropGradKernel); diff --git a/paddle/operators/crop_op.h b/paddle/operators/crop_op.h index 2e72583d68d0acf0e2f5044637dba55de3b57209..d531a19c783d2768d24142bb7b974ccfc2b39350 100644 --- a/paddle/operators/crop_op.h +++ b/paddle/operators/crop_op.h @@ -49,7 +49,7 @@ class CropKernel : public framework::OpKernel { } }; -template +template void CropGradFunction(const framework::ExecutionContext& context) { auto* d_x = context.Output(framework::GradVarName("X")); if (d_x != nullptr) { @@ -63,12 +63,13 @@ void CropGradFunction(const framework::ExecutionContext& context) { } auto d_x_tensor = EigenTensor::From(*d_x); auto d_out_tensor = EigenTensor::From(*d_out); - d_x_tensor.device(context.GetEigenDevice()) = + d_x_tensor.device( + *context.template device_context().eigen_device()) = d_out_tensor.pad(paddings, 0); } } -template +template class CropGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -76,22 +77,22 @@ class CropGradKernel : public framework::OpKernel { context.Input(framework::GradVarName("Out"))->dims().size(); switch (rank) { case 1: - CropGradFunction(context); + CropGradFunction(context); break; case 2: - CropGradFunction(context); + CropGradFunction(context); break; case 3: - CropGradFunction(context); + CropGradFunction(context); break; case 4: - CropGradFunction(context); + CropGradFunction(context); break; case 5: - CropGradFunction(context); + CropGradFunction(context); break; case 6: - CropGradFunction(context); + CropGradFunction(context); break; default: PADDLE_THROW( diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 1e82742eaf86711fe4f9d02d517ad1853131cf67..2b06012b690c6725fd150cd99e992912655dc9c6 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -95,6 +95,7 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { "Input(Label) should be 1."); } ctx->SetOutputDim(framework::GradVarName("X"), x_dims); + ctx->ShareLoD("X", framework::GradVarName("X")); } protected: diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 6212e39dfde33c5943958adbd1a0a052262e119e..05469645880fa466a2a3324ad1b7a8b9d681c440 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -53,8 +53,9 @@ class CrossEntropyOpCUDAKernel : public framework::OpKernel { Tensor* y = ctx.Output("Y"); y->mutable_data(ctx.GetPlace()); - math::CrossEntropyFunctor()( - ctx.device_context(), y, x, label, ctx.Attr("soft_label")); + math::CrossEntropyFunctor()( + ctx.template device_context(), y, x, label, + ctx.Attr("soft_label")); } }; @@ -80,15 +81,17 @@ class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { int block = 512; int grid = (batch_size * class_num + block - 1) / block; - auto stream = ctx.cuda_device_context().stream(); + + auto& dev_ctx = ctx.template device_context(); + auto stream = dev_ctx.stream(); if (ctx.Attr("soft_label")) { auto* label_data = label->data(); SoftCrossEntropyGradientKernel<<>>( dx_data, dy_data, x_data, label_data, batch_size, class_num); } else { - math::SetConstant functor; - functor(ctx.device_context(), dx, 0); + math::SetConstant functor; + functor(dev_ctx, dx, 0); auto* label_data = label->data(); grid = (batch_size + block - 1) / block; CrossEntropyGradientKernel<<>>( @@ -101,8 +104,8 @@ class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(cross_entropy, ops::CrossEntropyOpCUDAKernel, - ops::CrossEntropyOpCUDAKernel); -REGISTER_OP_GPU_KERNEL(cross_entropy_grad, - ops::CrossEntropyGradientOpCUDAKernel, - ops::CrossEntropyGradientOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(cross_entropy, ops::CrossEntropyOpCUDAKernel, + ops::CrossEntropyOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(cross_entropy_grad, + ops::CrossEntropyGradientOpCUDAKernel, + ops::CrossEntropyGradientOpCUDAKernel); diff --git a/paddle/operators/cross_entropy_op.h b/paddle/operators/cross_entropy_op.h index 37db0a930a6aea0ba333395ca9c5b9d231c07b32..5623d2ded16daaf51dd26c9d9a8c04a0ae5be5ec 100644 --- a/paddle/operators/cross_entropy_op.h +++ b/paddle/operators/cross_entropy_op.h @@ -37,8 +37,9 @@ class CrossEntropyOpKernel : public framework::OpKernel { Tensor* y = ctx.Output("Y"); y->mutable_data(ctx.GetPlace()); - math::CrossEntropyFunctor()( - ctx.device_context(), y, x, labels, ctx.Attr("soft_label")); + math::CrossEntropyFunctor()( + ctx.template device_context(), y, x, labels, + ctx.Attr("soft_label")); } }; @@ -61,7 +62,8 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel { auto lbl_mat = EigenMatrix::From(*label); auto dx_mat = EigenMatrix::From(*dx); - dx_mat.device(ctx.GetEigenDevice()) = + dx_mat.device(*ctx.template device_context() + .eigen_device()) = -(lbl_mat * dy_mat.broadcast(Eigen::DSizes(1, class_num)) / x_mat); } else { @@ -70,8 +72,8 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel { const T* x_data = x->data(); const int64_t* label_data = label->data(); - math::SetConstant functor; - functor(ctx.device_context(), dx, 0); + math::SetConstant functor; + functor(ctx.template device_context(), dx, 0); for (int64_t i = 0; i < batch_size; ++i) { PADDLE_ASSERT(label_data[i] >= 0 || label_data[i] < class_num); diff --git a/paddle/operators/decayed_adagrad_op.cc b/paddle/operators/decayed_adagrad_op.cc index 640b4e77448d1b64bcf7375f26c07ff1d2bdeaa3..fd29c7270b0442da740a74f83fdfeed8f47f830d 100644 --- a/paddle/operators/decayed_adagrad_op.cc +++ b/paddle/operators/decayed_adagrad_op.cc @@ -99,4 +99,4 @@ REGISTER_OP_WITHOUT_GRADIENT(decayed_adagrad, ops::DecayedAdagradOp, ops::DecayedAdagradOpMaker); REGISTER_OP_CPU_KERNEL( decayed_adagrad, - ops::DecayedAdagradOpKernel); + ops::DecayedAdagradOpKernel); diff --git a/paddle/operators/decayed_adagrad_op.cu b/paddle/operators/decayed_adagrad_op.cu index 6fce77fe4ec6b76cb7b0259aab6a3d55d2edb36c..282b90f275ad1542d5941e001dbf646348fc01b6 100644 --- a/paddle/operators/decayed_adagrad_op.cu +++ b/paddle/operators/decayed_adagrad_op.cu @@ -16,6 +16,6 @@ #include "paddle/operators/decayed_adagrad_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( decayed_adagrad, - ops::DecayedAdagradOpKernel); + ops::DecayedAdagradOpKernel); diff --git a/paddle/operators/decayed_adagrad_op.h b/paddle/operators/decayed_adagrad_op.h index 0fe0fc5acd66c9824a864618b69097c5c063ea3f..fec9705cfc1e14e5423e23d6afb218c6c051f5a1 100644 --- a/paddle/operators/decayed_adagrad_op.h +++ b/paddle/operators/decayed_adagrad_op.h @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class DecayedAdagradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -43,7 +43,7 @@ class DecayedAdagradOpKernel : public framework::OpKernel { auto param_out = framework::EigenVector::Flatten(*param_out_tensor); auto moment_out = framework::EigenVector::Flatten(*moment_out_tensor); - auto place = ctx.GetEigenDevice(); + auto& place = *ctx.template device_context().eigen_device(); moment_out.device(place) = decay * moment + (1 - decay) * grad * grad; Eigen::DSizes m_dsize(moment_out_tensor->numel()); diff --git a/paddle/operators/detail/CMakeLists.txt b/paddle/operators/detail/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..f6bdc63cc2cfae526fe911ee4d989675452d5c5d --- /dev/null +++ b/paddle/operators/detail/CMakeLists.txt @@ -0,0 +1 @@ +grpc_library(sendrecvop_grpc SRCS recv_impl.cc send_impl.cc PROTO send_recv.proto DEPS lod_tensor selected_rows) diff --git a/paddle/operators/detail/recv_impl.cc b/paddle/operators/detail/recv_impl.cc new file mode 100644 index 0000000000000000000000000000000000000000..89dc5045221156eed7aa9411bc96ad86f91136d2 --- /dev/null +++ b/paddle/operators/detail/recv_impl.cc @@ -0,0 +1,44 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "send_recv_impl.h" + +namespace paddle { +namespace operators { +namespace detail { + +Status SendRecvServerImpl::SendVariable(ServerContext *context, + const VariableMessage *in_var, + VariableMessage *out_var) { + framework::LoDTensor t; + // TODO(typhoonzero): desirealize in_tensor and run pserver network. + std::istringstream iss(in_var->serialized()); + framework::DeserializeFromStream(iss, &t); + lodtensor_queue_.Push(std::move(t)); + // Block util the sub graph is done. + t = lodtensor_return_queue_.Pop(); + std::ostringstream oss; + // FIXME(typhoonzero): get context from op. + framework::SerializeToStream(oss, t, platform::CPUDeviceContext()); + std::string *varname = out_var->mutable_varname(); + *varname = in_var->varname(); + std::string *serialized = out_var->mutable_serialized(); + *serialized = oss.str(); + + return Status::OK; +} + +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/detail/send_impl.cc b/paddle/operators/detail/send_impl.cc new file mode 100644 index 0000000000000000000000000000000000000000..da1ddf75d2afb85670c5ea0c9884376415f28208 --- /dev/null +++ b/paddle/operators/detail/send_impl.cc @@ -0,0 +1,54 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "send_recv_impl.h" + +namespace paddle { +namespace operators { +namespace detail { + +bool RPCClient::SendVariable(const framework::Scope& scope, + const std::string& inname, + const std::string& outname) { + ClientContext context; + VariableMessage msg, out_msg; + // FIXME(typhoonzero): pass device context to here. + auto ctx = platform::CPUDeviceContext(); + auto* var = scope.FindVar(inname); + PADDLE_ENFORCE(var); + // TODO(typhoonzero): support SelectedRows + PADDLE_ENFORCE(var->IsType(), + "Only support LoDTensor, %s has wrong type", inname); + const framework::LoDTensor& tensor = var->Get(); + std::ostringstream oss; + framework::SerializeToStream(oss, tensor, ctx); + msg.set_varname(inname); + msg.set_serialized(oss.str()); + Status status = stub_->SendVariable(&context, msg, &out_msg); + if (!status.ok()) { + return false; + } + std::istringstream iss(out_msg.serialized()); + framework::LoDTensor ret_tensor; + framework::DeserializeFromStream(iss, &ret_tensor); + auto* outvar = scope.FindVar(outname); + framework::LoDTensor* out_tensor = outvar->GetMutable(); + // FIXME(typhoonzero): do not copy. + framework::CopyFrom(ret_tensor, ctx.GetPlace(), ctx, out_tensor); + return true; +} + +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/detail/send_recv.proto b/paddle/operators/detail/send_recv.proto new file mode 100644 index 0000000000000000000000000000000000000000..07ff9d2c621a2dfb51792821a0d3fc398c315835 --- /dev/null +++ b/paddle/operators/detail/send_recv.proto @@ -0,0 +1,35 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +syntax = "proto3"; + +package sendrecv; + +service SendRecvService { + // For parameter server round-robin like hashing, do not split tensors. + // Send and recv only one tensor + rpc SendVariable(VariableMessage) returns (VariableMessage) {} +} + +// VariableMessage is serialized paddle variable message. +// It can be: +// Tensor +// LoDTensor +// SelectedRows +message VariableMessage { + string varname = 1; + bytes serialized = 2; +} + +message VoidMessage {} diff --git a/paddle/operators/detail/send_recv_impl.h b/paddle/operators/detail/send_recv_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..b9a5340a8636db7b5d6ec7b21368632d3916b4aa --- /dev/null +++ b/paddle/operators/detail/send_recv_impl.h @@ -0,0 +1,87 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/data_type.h" +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/scope.h" +#include "paddle/framework/selected_rows.h" +#include "paddle/operators/detail/simple_block_queue.h" + +// #include +// #include +// #include +// #include +#include "paddle/operators/detail/send_recv.grpc.pb.h" +#include "paddle/operators/detail/send_recv.pb.h" + +#include + +using grpc::Channel; +using grpc::Server; +using grpc::ServerContext; +using grpc::ServerReader; +using grpc::ServerBuilder; + +using grpc::ClientContext; +using grpc::ClientReader; +using grpc::ClientReaderWriter; +using grpc::ClientWriter; +using grpc::Status; +using sendrecv::SendRecvService; +using sendrecv::VariableMessage; +using sendrecv::VoidMessage; + +namespace paddle { +namespace operators { +namespace detail { + +class SendRecvServerImpl final : public SendRecvService::Service { + public: + explicit SendRecvServerImpl() {} + + Status SendVariable(ServerContext *context, const VariableMessage *in_var, + VariableMessage *out_var) override; + + const framework::LoDTensor Get() { return this->lodtensor_queue_.Pop(); } + + void Push(const framework::LoDTensor &tensor) { + this->lodtensor_return_queue_.Push(tensor); + } + + private: + SimpleBlockQueue lodtensor_queue_; + SimpleBlockQueue lodtensor_return_queue_; + SimpleBlockQueue selected_rows_queue_; + SimpleBlockQueue selected_rows_return_queue_; +}; + +// RPCClient is a class to send tensors to pserver sub-network +// using different hashing methods. +class RPCClient { + public: + RPCClient(std::shared_ptr channel) + : stub_(SendRecvService::NewStub(channel)) {} + + bool SendVariable(const framework::Scope &scope, const std::string &inname, + const std::string &outname); + + private: + std::unique_ptr stub_; +}; + +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/detail/simple_block_queue.h b/paddle/operators/detail/simple_block_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..44899217579532af2c1d2e6074ec0e08231e7b86 --- /dev/null +++ b/paddle/operators/detail/simple_block_queue.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include +#include +#include + +namespace paddle { +namespace operators { +namespace detail { + +template +class SimpleBlockQueue { + private: + std::mutex mutex_; + std::condition_variable condition_; + std::deque queue_; + + public: + void Push(T const& value) { + { + std::unique_lock lock(this->mutex_); + queue_.push_front(value); + } + this->condition_.notify_one(); + } + + T Pop() { + std::unique_lock lock(this->mutex_); + this->condition_.wait(lock, [=] { return !this->queue_.empty(); }); + T rc(std::move(this->queue_.back())); + this->queue_.pop_back(); + return rc; + } +}; + +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/dropout_op.cc b/paddle/operators/dropout_op.cc index 932c0bf8fbf6ffdc466516bb7c8578abf0f57209..acd526ae8047292ce6c6756f174c80053dca0d9f 100644 --- a/paddle/operators/dropout_op.cc +++ b/paddle/operators/dropout_op.cc @@ -100,6 +100,8 @@ namespace ops = paddle::operators; REGISTER_OP(dropout, ops::DropoutOp, ops::DropoutOpMaker, dropout_grad, ops::DropoutOpGrad); REGISTER_OP_CPU_KERNEL( - dropout, ops::CPUDropoutKernel); + dropout, + ops::CPUDropoutKernel); REGISTER_OP_CPU_KERNEL( - dropout_grad, ops::DropoutGradKernel); + dropout_grad, + ops::DropoutGradKernel); diff --git a/paddle/operators/dropout_op.cu b/paddle/operators/dropout_op.cu index db3578b9bf4c081e431f202f0828ec6392c924b2..10c670751d026ef92e01aad7da31a8f59b8514c0 100644 --- a/paddle/operators/dropout_op.cu +++ b/paddle/operators/dropout_op.cu @@ -58,7 +58,7 @@ class GPUDropoutKernel : public framework::OpKernel { auto X = EigenMatrix::Reshape(*x, 1); auto Y = EigenMatrix::Reshape(*y, 1); - auto place = context.GetEigenDevice(); + auto& place = *context.template device_context().eigen_device(); if (!context.Attr("is_test")) { auto* mask = context.Output("Mask"); auto* mask_data = mask->mutable_data(context.GetPlace()); @@ -80,7 +80,9 @@ class GPUDropoutKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - dropout, ops::GPUDropoutKernel); -REGISTER_OP_GPU_KERNEL( - dropout_grad, ops::DropoutGradKernel); +REGISTER_OP_CUDA_KERNEL( + dropout, + ops::GPUDropoutKernel); +REGISTER_OP_CUDA_KERNEL( + dropout_grad, + ops::DropoutGradKernel); diff --git a/paddle/operators/dropout_op.h b/paddle/operators/dropout_op.h index d9a130fdc040f745b058c39221f0bb9661473388..84ad39f0bb639975365d427aa205411ef79ecd46 100644 --- a/paddle/operators/dropout_op.h +++ b/paddle/operators/dropout_op.h @@ -25,7 +25,7 @@ template using EigenMatrix = framework::EigenMatrix; -template +template class CPUDropoutKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -55,13 +55,14 @@ class CPUDropoutKernel : public framework::OpKernel { } else { auto X = EigenMatrix::Reshape(*x, 1); auto Y = EigenMatrix::Reshape(*y, 1); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); Y.device(place) = X * dropout_prob; } } }; -template +template class DropoutGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -77,7 +78,8 @@ class DropoutGradKernel : public framework::OpKernel { auto dX = EigenMatrix::Reshape(*grad_x, 1); auto dY = EigenMatrix::Reshape(*grad_y, 1); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); dX.device(place) = dY * M; } }; diff --git a/paddle/operators/elementwise_add_op.cc b/paddle/operators/elementwise_add_op.cc index 432b9ba6f72f8dd11c666d5473c570bde60de995..a62eeeeb95fef77c00258403ca1cae11c2db7173 100644 --- a/paddle/operators/elementwise_add_op.cc +++ b/paddle/operators/elementwise_add_op.cc @@ -34,13 +34,13 @@ REGISTER_OP(elementwise_add, ops::ElementwiseOp, ops::ElementwiseAddOpMaker, elementwise_add_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_add, - ops::ElementwiseAddKernel, - ops::ElementwiseAddKernel, - ops::ElementwiseAddKernel, - ops::ElementwiseAddKernel); + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel); REGISTER_OP_CPU_KERNEL( elementwise_add_grad, - ops::ElementwiseAddGradKernel, - ops::ElementwiseAddGradKernel, - ops::ElementwiseAddGradKernel, - ops::ElementwiseAddGradKernel); + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel); diff --git a/paddle/operators/elementwise_add_op.cu b/paddle/operators/elementwise_add_op.cu index 7591428ac7c2f74f25f0f7d818eafcf59c8e4a4f..78642bb4246e7328dd3e2d902aca88615d598ddf 100644 --- a/paddle/operators/elementwise_add_op.cu +++ b/paddle/operators/elementwise_add_op.cu @@ -17,15 +17,16 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( elementwise_add, - ops::ElementwiseAddKernel, - ops::ElementwiseAddKernel, - ops::ElementwiseAddKernel, - ops::ElementwiseAddKernel); -REGISTER_OP_GPU_KERNEL( + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel); +REGISTER_OP_CUDA_KERNEL( elementwise_add_grad, - ops::ElementwiseAddGradKernel, - ops::ElementwiseAddGradKernel, - ops::ElementwiseAddGradKernel, - ops::ElementwiseAddGradKernel); + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel); diff --git a/paddle/operators/elementwise_add_op.h b/paddle/operators/elementwise_add_op.h index f04fe3ec6069ab1bf227be6a3a5c10ee908e4824..069bdaf0ab7469b0a814ca5f68b444b9ce4904f1 100644 --- a/paddle/operators/elementwise_add_op.h +++ b/paddle/operators/elementwise_add_op.h @@ -19,11 +19,48 @@ namespace paddle { namespace operators { -template +template +struct AddFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a + b; } +}; + +template class ElementwiseAddKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseCompute(ctx); + using Tensor = framework::Tensor; + + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); + z->mutable_data(ctx.GetPlace()); + TransformFunctor, T, DeviceContext> functor( + x, y, z, ctx.template device_context(), AddFunctor()); + + auto x_dims = x->dims(); + auto y_dims = y->dims(); + PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), + "Rank of first input must >= rank of second input."); + + if (x_dims == y_dims) { + functor.Run(); + return; + } + + int axis = ctx.Attr("axis"); + axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); + PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(), + "Axis should be in range [0, x_dims)"); + + int pre, n, post; + get_mid_dims(x_dims, y_dims, axis, pre, n, post); + if (post == 1) { + functor.RunRowWise(n, pre); + return; + } else { + functor.RunMidWise(n, pre, post); + return; + } } }; @@ -100,11 +137,11 @@ struct ElementwiseAddBroadCast2GradFunctor { } }; -template +template class ElementwiseAddGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseGradCompute, + ElementwiseGradCompute, ElementwiseAddOneGradFunctor, ElementwiseAddBroadCastGradFunctor, ElementwiseAddBroadCast2GradFunctor>(ctx); diff --git a/paddle/operators/elementwise_div_op.cc b/paddle/operators/elementwise_div_op.cc index 7a325199bd07e44042a4e8b3aae0ab93fae1c351..1c3e9e70eef0c1adfb89cf1a58437092f8d536d7 100644 --- a/paddle/operators/elementwise_div_op.cc +++ b/paddle/operators/elementwise_div_op.cc @@ -35,13 +35,13 @@ REGISTER_OP(elementwise_div, ops::ElementwiseOp, ops::ElementwiseDivOpMaker, elementwise_div_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_div, - ops::ElementwiseDivKernel, - ops::ElementwiseDivKernel, - ops::ElementwiseDivKernel, - ops::ElementwiseDivKernel); + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel); REGISTER_OP_CPU_KERNEL( elementwise_div_grad, - ops::ElementwiseDivGradKernel, - ops::ElementwiseDivGradKernel, - ops::ElementwiseDivGradKernel, - ops::ElementwiseDivGradKernel); + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel); diff --git a/paddle/operators/elementwise_div_op.cu b/paddle/operators/elementwise_div_op.cu index de4d0c33442a1fcfe0dd4c16df7ceeec737fbc6d..502c52893667e246a19bb04c8bf3ed3df3265f2d 100644 --- a/paddle/operators/elementwise_div_op.cu +++ b/paddle/operators/elementwise_div_op.cu @@ -17,15 +17,16 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( elementwise_div, - ops::ElementwiseDivKernel, - ops::ElementwiseDivKernel, - ops::ElementwiseDivKernel, - ops::ElementwiseDivKernel); -REGISTER_OP_GPU_KERNEL( + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel, + ops::ElementwiseDivKernel); +REGISTER_OP_CUDA_KERNEL( elementwise_div_grad, - ops::ElementwiseDivGradKernel, - ops::ElementwiseDivGradKernel, - ops::ElementwiseDivGradKernel, - ops::ElementwiseDivGradKernel); + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel, + ops::ElementwiseDivGradKernel); diff --git a/paddle/operators/elementwise_div_op.h b/paddle/operators/elementwise_div_op.h index 8946ff3d25c2aff3dc3aa69368f0083371cd2fef..d91313db4225d8fe051856345367a15867bdf215 100644 --- a/paddle/operators/elementwise_div_op.h +++ b/paddle/operators/elementwise_div_op.h @@ -19,11 +19,11 @@ namespace paddle { namespace operators { -template +template class ElementwiseDivKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseCompute(ctx); + ElementwiseCompute(ctx); } }; @@ -102,11 +102,11 @@ struct ElementwiseDivBroadCast2GradFunctor { } }; -template +template class ElementwiseDivGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseGradCompute, + ElementwiseGradCompute, ElementwiseDivGradFunctor, ElementwiseDivBroadCastGradFunctor, ElementwiseDivBroadCast2GradFunctor>(ctx); diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index 8851267a524f51773a9f86ff83943cea4cb042aa..aadb95cbe35fe565cf1009f0f9765def921d0906 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -36,13 +36,13 @@ REGISTER_OP(elementwise_mul, ops::ElementwiseOp, ops::ElementwiseMulOpMaker, elementwise_mul_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_mul, - ops::ElementwiseMulKernel, - ops::ElementwiseMulKernel, - ops::ElementwiseMulKernel, - ops::ElementwiseMulKernel); + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel); REGISTER_OP_CPU_KERNEL( elementwise_mul_grad, - ops::ElementwiseMulGradKernel, - ops::ElementwiseMulGradKernel, - ops::ElementwiseMulGradKernel, - ops::ElementwiseMulGradKernel); + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel); diff --git a/paddle/operators/elementwise_mul_op.cu b/paddle/operators/elementwise_mul_op.cu index b0dfdee1ccef56c6cda06ae6759017294fa5115c..089451b3e1288b3adc689a3c7d9fea2bc5243407 100644 --- a/paddle/operators/elementwise_mul_op.cu +++ b/paddle/operators/elementwise_mul_op.cu @@ -17,15 +17,16 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( elementwise_mul, - ops::ElementwiseMulKernel, - ops::ElementwiseMulKernel, - ops::ElementwiseMulKernel, - ops::ElementwiseMulKernel); -REGISTER_OP_GPU_KERNEL( + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel); +REGISTER_OP_CUDA_KERNEL( elementwise_mul_grad, - ops::ElementwiseMulGradKernel, - ops::ElementwiseMulGradKernel, - ops::ElementwiseMulGradKernel, - ops::ElementwiseMulGradKernel); + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel); diff --git a/paddle/operators/elementwise_mul_op.h b/paddle/operators/elementwise_mul_op.h index 4469b07eaa08a3b011a88e58f1d645dd30b10ced..16fa5ec4b3a369805acb401bae5407072101af8d 100644 --- a/paddle/operators/elementwise_mul_op.h +++ b/paddle/operators/elementwise_mul_op.h @@ -18,11 +18,11 @@ namespace paddle { namespace operators { -template +template class ElementwiseMulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseCompute(ctx); + ElementwiseCompute(ctx); } }; @@ -101,11 +101,11 @@ struct ElementwiseMulBroadCast2GradFunctor { } }; -template +template class ElementwiseMulGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseGradCompute, + ElementwiseGradCompute, ElementwiseMulGradFunctor, ElementwiseMulBroadCastGradFunctor, ElementwiseMulBroadCast2GradFunctor>(ctx); diff --git a/paddle/operators/elementwise_op.h b/paddle/operators/elementwise_op.h index 56e5eb69bc382a2c15d88b759fa6987f02c6cabb..ea533503e4916cae7e1157ed34da9629dcff3513 100644 --- a/paddle/operators/elementwise_op.h +++ b/paddle/operators/elementwise_op.h @@ -35,7 +35,7 @@ class ElementwiseOp : public framework::OperatorWithKernel { auto x_dim = ctx->GetInputDim("X"); auto y_dim = ctx->GetInputDim("Y"); PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), - "Rank of first input must >= rank of second input.") + "Rank of first input must >= rank of second input."); ctx->SetOutputDim("Out", x_dim); ctx->ShareLoD("X", /*->*/ "Out"); } @@ -120,7 +120,7 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), - "Rank of first input must >= rank of second input.") + "Rank of first input must >= rank of second input."); auto x_grad_name = framework::GradVarName("X"); auto y_grad_name = framework::GradVarName("Y"); diff --git a/paddle/operators/elementwise_op_function.h b/paddle/operators/elementwise_op_function.h index 488a35aafc8600bb8bb252fc3a5161c72a2f6df1..7ebfc7df8c117edd7bcf14cc5ae6ba3dc1302c03 100644 --- a/paddle/operators/elementwise_op_function.h +++ b/paddle/operators/elementwise_op_function.h @@ -16,6 +16,11 @@ #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" +#include "paddle/platform/transform.h" + +#ifdef __NVCC__ +#include +#endif #include "paddle/operators/math/math_function.h" @@ -54,18 +59,173 @@ inline void get_mid_dims(const framework::DDim& x_dims, } } +template +class RowwiseTransformIterator; +template +class MidWiseTransformIterator; + +template +class RowwiseTransformIterator { + public: + RowwiseTransformIterator(const T* ptr, int n) : ptr_(ptr), i_(0), n_(n) {} + + RowwiseTransformIterator& operator++() { + ++i_; + if (UNLIKELY(i_ == n_)) { + i_ = 0; + } + return *this; + } + + bool operator==(const RowwiseTransformIterator& + rhs) const { + return (ptr_ + i_) == &(*rhs); + } + + bool operator!=(const RowwiseTransformIterator& + rhs) const { + return (ptr_ + i_) != &(*rhs); + } + + const T& operator*() { return ptr_[i_]; } + + private: + const T* ptr_; + int i_; + int64_t n_; +}; + +template +class MidWiseTransformIterator { + public: + MidWiseTransformIterator(const T* ptr, int n, int post) + : ptr_(ptr), i_(0), j_(0), n_(n), post_(post) {} + + MidWiseTransformIterator& operator++() { + ++j_; + i_ = j_ / post_; + if (UNLIKELY(i_ == n_)) { + j_ = 0; + i_ = 0; + } + return *this; + } + + bool operator==(const MidWiseTransformIterator& + rhs) const { + return (ptr_ + i_) == &(*rhs); + } + + bool operator!=(const MidWiseTransformIterator& + rhs) const { + return (ptr_ + i_) != &(*rhs); + } + + const T& operator*() { return ptr_[i_]; } + + private: + const T* ptr_; + int i_; + int64_t j_; + int64_t n_; + int post_; +}; + +#ifdef __NVCC__ +template +class RowwiseTransformIterator + : public thrust::iterator_adaptor< + RowwiseTransformIterator, const T*> { + public: + typedef thrust::iterator_adaptor< + RowwiseTransformIterator, const T*> + super_t; + HOSTDEVICE RowwiseTransformIterator(const T* x, int n) + : super_t(x), begin_(x), n_(n){}; + friend class thrust::iterator_core_access; + + private: + unsigned int n_; + const T* begin_; + HOSTDEVICE typename super_t::reference dereference() const { + return *(begin_ + (this->base() - begin_) % n_); + } +}; + +template +class MidWiseTransformIterator + : public thrust::iterator_adaptor< + MidWiseTransformIterator, const T*> { + public: + typedef thrust::iterator_adaptor< + MidWiseTransformIterator, const T*> + super_t; + HOSTDEVICE MidWiseTransformIterator(const T* x, int n, int post) + : super_t(x), begin_(x), n_(n), post_(post){}; + friend class thrust::iterator_core_access; + + private: + unsigned int post_; + unsigned int n_; + const T* begin_; + HOSTDEVICE typename super_t::reference dereference() const { + return *(begin_ + (((this->base() - begin_) / post_) % n_)); + } +}; +#endif + +template +class TransformFunctor { + public: + TransformFunctor(const framework::Tensor* x, const framework::Tensor* y, + framework::Tensor* z, const DeviceContext& ctx, Functor func) + : x_(x->data()), + y_(y->data()), + z_(z->mutable_data(ctx.GetPlace())), + nx_(x->numel()), + ctx_(ctx), + func_(func) {} + + inline void Run() const { + platform::Transform trans; + trans(ctx_, x_, x_ + nx_, y_, z_, func_); + } + + inline void RunRowWise(int n, int pre) const { + platform::Transform trans; + trans(ctx_, x_, x_ + nx_, RowwiseTransformIterator(y_, n), + z_, func_); + } + + inline void RunMidWise(int n, int pre, int post) const { + platform::Transform trans; + trans(ctx_, x_, x_ + nx_, + MidWiseTransformIterator(y_, n, post), z_, func_); + } + + private: + const T* x_; + const T* y_; + T* z_; + int64_t nx_; + const DeviceContext& ctx_; + Functor func_; +}; + #define EIGEN_FUNCTOR(name, eigen_op) \ struct Eigen##name##Functor { \ - template \ + template \ inline void Run(const framework::Tensor* x, const framework::Tensor* y, \ framework::Tensor* z, \ const framework::ExecutionContext& ctx) { \ auto x_e = framework::EigenVector::Flatten(*x); \ auto y_e = framework::EigenVector::Flatten(*y); \ auto z_e = framework::EigenVector::Flatten(*z); \ - z_e.device(ctx.GetEigenDevice()) = eigen_op(x_e, y_e); \ + z_e.device( \ + *ctx.template device_context().eigen_device()) = \ + eigen_op(x_e, y_e); \ } \ - template \ + template \ inline void RunBroadCast(const framework::Tensor* x, \ const framework::Tensor* y, framework::Tensor* z, \ const framework::ExecutionContext& ctx, int pre, \ @@ -76,9 +236,11 @@ inline void get_mid_dims(const framework::DDim& x_dims, auto y_bcast = y_e.reshape(Eigen::DSizes(1, n)) \ .broadcast(Eigen::DSizes(pre, 1)) \ .reshape(Eigen::DSizes(x_e.size())); \ - z_e.device(ctx.GetEigenDevice()) = eigen_op(x_e, y_bcast); \ + z_e.device( \ + *ctx.template device_context().eigen_device()) = \ + eigen_op(x_e, y_bcast); \ } \ - template \ + template \ inline void RunBroadCast2(const framework::Tensor* x, \ const framework::Tensor* y, \ framework::Tensor* z, \ @@ -90,11 +252,13 @@ inline void get_mid_dims(const framework::DDim& x_dims, auto y_bcast = y_e.reshape(Eigen::DSizes(1, n, 1)) \ .broadcast(Eigen::DSizes(pre, 1, post)) \ .reshape(Eigen::DSizes(x_e.size())); \ - z_e.device(ctx.GetEigenDevice()) = eigen_op(x_e, y_bcast); \ + z_e.device( \ + *ctx.template device_context().eigen_device()) = \ + eigen_op(x_e, y_bcast); \ } \ } -template +template void ElementwiseCompute(const framework::ExecutionContext& ctx) { using Tensor = framework::Tensor; @@ -106,11 +270,11 @@ void ElementwiseCompute(const framework::ExecutionContext& ctx) { auto x_dims = x->dims(); auto y_dims = y->dims(); PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), - "Rank of first input must >= rank of second input.") + "Rank of first input must >= rank of second input."); if (x_dims == y_dims) { functor f; - f.template Run(x, y, z, ctx); + f.template Run(x, y, z, ctx); return; } @@ -123,11 +287,11 @@ void ElementwiseCompute(const framework::ExecutionContext& ctx) { get_mid_dims(x_dims, y_dims, axis, pre, n, post); if (post == 1) { functor f; - f.template RunBroadCast(x, y, z, ctx, pre, n); + f.template RunBroadCast(x, y, z, ctx, pre, n); return; } else { functor f; - f.template RunBroadCast2(x, y, z, ctx, pre, n, post); + f.template RunBroadCast2(x, y, z, ctx, pre, n, post); return; } } @@ -144,8 +308,9 @@ EIGEN_FUNCTOR(Mul, EIGEN_MUL); #define EIGEN_DIV(x, y) ((x) / (y)) EIGEN_FUNCTOR(Div, EIGEN_DIV); -template +template void ElementwiseGradCompute(const framework::ExecutionContext& ctx) { using Tensor = framework::Tensor; @@ -154,7 +319,7 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx) { auto* out = ctx.Input("Out"); auto* dout = ctx.Input(framework::GradVarName("Out")); - auto place = ctx.GetEigenDevice(); + auto& place = *ctx.template device_context().eigen_device(); auto x_dims = x->dims(); auto y_dims = y->dims(); diff --git a/paddle/operators/elementwise_sub_op.cc b/paddle/operators/elementwise_sub_op.cc index 95d7979e39bfe7b484acb7771d1bd078014293a2..3e4d19361ead0100e45e50880d402e3d2b8557ff 100644 --- a/paddle/operators/elementwise_sub_op.cc +++ b/paddle/operators/elementwise_sub_op.cc @@ -34,13 +34,13 @@ REGISTER_OP(elementwise_sub, ops::ElementwiseOp, ops::ElementwiseSubOpMaker, elementwise_sub_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_sub, - ops::ElementwiseSubKernel, - ops::ElementwiseSubKernel, - ops::ElementwiseSubKernel, - ops::ElementwiseSubKernel); + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel); REGISTER_OP_CPU_KERNEL( elementwise_sub_grad, - ops::ElementwiseSubGradKernel, - ops::ElementwiseSubGradKernel, - ops::ElementwiseSubGradKernel, - ops::ElementwiseSubGradKernel); + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel); diff --git a/paddle/operators/elementwise_sub_op.cu b/paddle/operators/elementwise_sub_op.cu index ec23bec35feae26f5463c575b1ab6f58d417e100..0b2f0f7d4d98f1336087f9fc3fc485ed8d805b5f 100644 --- a/paddle/operators/elementwise_sub_op.cu +++ b/paddle/operators/elementwise_sub_op.cu @@ -17,15 +17,16 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( elementwise_sub, - ops::ElementwiseSubKernel, - ops::ElementwiseSubKernel, - ops::ElementwiseSubKernel, - ops::ElementwiseSubKernel); -REGISTER_OP_GPU_KERNEL( + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel); +REGISTER_OP_CUDA_KERNEL( elementwise_sub_grad, - ops::ElementwiseSubGradKernel, - ops::ElementwiseSubGradKernel, - ops::ElementwiseSubGradKernel, - ops::ElementwiseSubGradKernel); + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel); diff --git a/paddle/operators/elementwise_sub_op.h b/paddle/operators/elementwise_sub_op.h index 3f40c1c5bcea5e8473765b039de4ee2a16054f0c..731a30c5e30d3f9bbdbabd62e5d9a77559500b06 100644 --- a/paddle/operators/elementwise_sub_op.h +++ b/paddle/operators/elementwise_sub_op.h @@ -18,11 +18,11 @@ namespace paddle { namespace operators { -template +template class ElementwiseSubKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseCompute(ctx); + ElementwiseCompute(ctx); } }; @@ -101,11 +101,11 @@ struct ElementwiseSubBroadCast2GradFunctor { } }; -template +template class ElementwiseSubGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - ElementwiseGradCompute, + ElementwiseGradCompute, ElementwiseSubOneGradFunctor, ElementwiseSubBroadCastGradFunctor, ElementwiseSubBroadCast2GradFunctor>(ctx); diff --git a/paddle/operators/expand_op.cc b/paddle/operators/expand_op.cc index 282775fcda45fe3bbd72bf04a7ae828f2c840ab7..8b3cddbb944de250d5754a2be64dd8e7ec53003a 100644 --- a/paddle/operators/expand_op.cc +++ b/paddle/operators/expand_op.cc @@ -130,7 +130,8 @@ class ExpandGradOp : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(expand, ops::ExpandOp, ops::ExpandOpMaker, expand_grad, ops::ExpandGradOp); -REGISTER_OP_CPU_KERNEL(expand, - ops::ExpandKernel); REGISTER_OP_CPU_KERNEL( - expand_grad, ops::ExpandGradKernel); + expand, ops::ExpandKernel); +REGISTER_OP_CPU_KERNEL( + expand_grad, + ops::ExpandGradKernel); diff --git a/paddle/operators/expand_op.cu b/paddle/operators/expand_op.cu index 6744562b6c21dd8bfeb7e4cb6b809dc7913aa3a5..99ee584d0859f9bf688899cc9b346d221415518c 100644 --- a/paddle/operators/expand_op.cu +++ b/paddle/operators/expand_op.cu @@ -17,7 +17,8 @@ #include "paddle/operators/expand_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(expand, - ops::ExpandKernel); -REGISTER_OP_GPU_KERNEL( - expand_grad, ops::ExpandGradKernel); +REGISTER_OP_CUDA_KERNEL( + expand, ops::ExpandKernel); +REGISTER_OP_CUDA_KERNEL( + expand_grad, + ops::ExpandGradKernel); diff --git a/paddle/operators/expand_op.h b/paddle/operators/expand_op.h index 4d7996ad1e744fead1329c35ce6ea43bf0683ce6..14ef8b0912860f7ec39535997c39d6d4c4970650 100644 --- a/paddle/operators/expand_op.h +++ b/paddle/operators/expand_op.h @@ -56,7 +56,7 @@ template using EigenTensor = framework::EigenTensor; -template +template class ExpandKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -83,12 +83,13 @@ class ExpandKernel : public framework::OpKernel { auto x = EigenTensor::From(*in0); out0->mutable_data(context.GetPlace()); auto y = EigenTensor::From(*out0); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); y.device(place) = x.broadcast(bcast_dims); } }; -template +template class ExpandGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -164,7 +165,8 @@ class ExpandGradKernel : public framework::OpKernel { reduce_dims[i] = reduce_dims_vec[i]; } auto out_grad = EigenVector::Flatten(*in0); - x_grad.device(context.GetEigenDevice()) = + x_grad.device( + *context.template device_context().eigen_device()) = out_grad.reshape(reshape_dims).sum(reduce_dims).reshape(x.dimensions()); } }; diff --git a/paddle/operators/fill_constant_batch_size_like_op.cc b/paddle/operators/fill_constant_batch_size_like_op.cc index 892922cd3aaec8bf8194320c5c3a0dd0365bb589..7fb74e2b950338fbd05515f844959862504eddce 100644 --- a/paddle/operators/fill_constant_batch_size_like_op.cc +++ b/paddle/operators/fill_constant_batch_size_like_op.cc @@ -100,8 +100,11 @@ REGISTER_OPERATOR(fill_constant_batch_size_like, ops::FillConstantBatchSizeLikeOpMaker); REGISTER_OP_CPU_KERNEL( fill_constant_batch_size_like, - ops::FillConstantBatchSizeLikeOpKernel, - ops::FillConstantBatchSizeLikeOpKernel, - ops::FillConstantBatchSizeLikeOpKernel, - ops::FillConstantBatchSizeLikeOpKernel, + ops::FillConstantBatchSizeLikeOpKernel, + ops::FillConstantBatchSizeLikeOpKernel, + ops::FillConstantBatchSizeLikeOpKernel); diff --git a/paddle/operators/fill_constant_batch_size_like_op.cu.cc b/paddle/operators/fill_constant_batch_size_like_op.cu.cc index 9e7a1eeab863c962ca72908e561e12a04d5021c5..2e0e15f36bb2e0ffd33dc6d1d25965d0cbe33186 100644 --- a/paddle/operators/fill_constant_batch_size_like_op.cu.cc +++ b/paddle/operators/fill_constant_batch_size_like_op.cu.cc @@ -16,10 +16,13 @@ #include "paddle/framework/op_registry.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( fill_constant_batch_size_like, - ops::FillConstantBatchSizeLikeOpKernel, - ops::FillConstantBatchSizeLikeOpKernel, - ops::FillConstantBatchSizeLikeOpKernel, - ops::FillConstantBatchSizeLikeOpKernel, + ops::FillConstantBatchSizeLikeOpKernel, + ops::FillConstantBatchSizeLikeOpKernel, + ops::FillConstantBatchSizeLikeOpKernel); diff --git a/paddle/operators/fill_constant_batch_size_like_op.h b/paddle/operators/fill_constant_batch_size_like_op.h index 339d97a30a5819ab488e83990651ba99212239ec..66da9d0307e36db3726f30518c8c57a923e54388 100644 --- a/paddle/operators/fill_constant_batch_size_like_op.h +++ b/paddle/operators/fill_constant_batch_size_like_op.h @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class FillConstantBatchSizeLikeOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -27,8 +27,9 @@ class FillConstantBatchSizeLikeOpKernel : public framework::OpKernel { out->mutable_data(ctx.GetPlace()); auto value = ctx.Attr("value"); - math::SetConstant setter; - setter(ctx.device_context(), out, static_cast(value)); + math::SetConstant setter; + setter(ctx.template device_context(), out, + static_cast(value)); } }; diff --git a/paddle/operators/fill_op.cc b/paddle/operators/fill_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..382e161c5d83ba560411b1f231aa896028b709b8 --- /dev/null +++ b/paddle/operators/fill_op.cc @@ -0,0 +1,111 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/data_type.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/detail/safe_ref.h" + +namespace paddle { +namespace operators { + +struct FillOpVisitor { + FillOpVisitor(framework::LoDTensor *tensor, const std::vector &value) + : tensor_(tensor), value_(value) {} + + template + void operator()() const { + platform::CPUPlace cpu; + auto *data = tensor_->mutable_data(cpu); + std::transform(value_.data(), value_.data() + tensor_->numel(), data, + [](float dat) { return static_cast(dat); }); + } + + framework::LoDTensor *tensor_; + const std::vector &value_; +}; + +class FillOp : public framework::OperatorBase { + public: + FillOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto &out = + detail::Ref(detail::Ref(scope.FindVar(Output("Out")), + "Cannot find variable %s", Output("Out")) + .GetMutable()); + out.Resize(framework::make_ddim(Attr>("shape"))); + auto dtype = static_cast(Attr("dtype")); + platform::CPUPlace cpu; + auto force_cpu = Attr("force_cpu"); + out.mutable_data(force_cpu ? cpu : dev_ctx.GetPlace(), + framework::ToTypeIndex(dtype)); + + framework::LoDTensor tensor; + + if (force_cpu || platform::is_cpu_place(dev_ctx.GetPlace())) { + tensor.ShareDataWith(out); + } else { + // Always make tensor in CPU memory. + tensor.Resize(out.dims()); + tensor.mutable_data(cpu, framework::ToTypeIndex(dtype)); + } + + framework::VisitDataType( + dtype, FillOpVisitor(&tensor, Attr>("value"))); + + if (!force_cpu && platform::is_gpu_place(dev_ctx.GetPlace())) { + // Copy tensor to out + framework::CopyFrom(tensor, dev_ctx.GetPlace(), dev_ctx, &out); + } + } +}; + +class FillOpMaker : public framework::OpProtoAndCheckerMaker { + public: + FillOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddComment(R"DOC(Fill operator + +Fill an tensor with `value` and `shape`. The type of the tensor is specify by +`dtype`. +)DOC"); + AddOutput("Out", "(LoDTensor) The output tensor."); + AddAttr>( + "value", "The float values of tensor, which are flatten in row major"); + AddAttr>("shape", "The shape of output tensor"); + AddAttr("dtype", "The data type of output tensor, Default is float") + .SetDefault(framework::DataType::FP32); + AddAttr("force_cpu", + "Whether the output tensor must be at CPU memory or not. " + "Default is false.") + .SetDefault(false); + } +}; + +class FillOpInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + context->SetOutputDim( + "Out", + framework::make_ddim(context->Attrs().Get>("shape"))); + } +}; + +} // namespace operators +} // namespace paddle +namespace ops = paddle::operators; +REGISTER_OPERATOR(fill, ops::FillOp, ops::FillOpInferShape, ops::FillOpMaker); diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index 95fb5932b8b555e1357adc9fdfb7b6e6db7da71d..720c11f5f12a8dea971fe82db6afe8f6b0d9ee1a 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -54,8 +54,9 @@ namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, ops::FillZerosLikeOp, ops::FillZerosLikeOpMaker); REGISTER_OP_CPU_KERNEL( - fill_zeros_like, ops::FillZerosLikeKernel, - ops::FillZerosLikeKernel, - ops::FillZerosLikeKernel, - ops::FillZerosLikeKernel, - ops::FillZerosLikeKernel); + fill_zeros_like, + ops::FillZerosLikeKernel, + ops::FillZerosLikeKernel, + ops::FillZerosLikeKernel, + ops::FillZerosLikeKernel, + ops::FillZerosLikeKernel); diff --git a/paddle/operators/fill_zeros_like_op.cu.cc b/paddle/operators/fill_zeros_like_op.cu.cc index 1501a17441072223ba0e8cf5b6c8cdd5e903a467..9f412306bb5f08497990f0e0385f695d838c2400 100644 --- a/paddle/operators/fill_zeros_like_op.cu.cc +++ b/paddle/operators/fill_zeros_like_op.cu.cc @@ -16,9 +16,10 @@ #include "paddle/framework/op_registry.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - fill_zeros_like, ops::FillZerosLikeKernel, - ops::FillZerosLikeKernel, - ops::FillZerosLikeKernel, - ops::FillZerosLikeKernel, - ops::FillZerosLikeKernel); +REGISTER_OP_CUDA_KERNEL( + fill_zeros_like, + ops::FillZerosLikeKernel, + ops::FillZerosLikeKernel, + ops::FillZerosLikeKernel, + ops::FillZerosLikeKernel, + ops::FillZerosLikeKernel); diff --git a/paddle/operators/fill_zeros_like_op.h b/paddle/operators/fill_zeros_like_op.h index 7e7d78eea2bce427d6ad4dfb77bcb4ace35cd287..a6e2941f52150de7886717303d2cb2f10b7eef7b 100644 --- a/paddle/operators/fill_zeros_like_op.h +++ b/paddle/operators/fill_zeros_like_op.h @@ -19,15 +19,16 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class FillZerosLikeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* out = context.Output("Y"); out->mutable_data(context.GetPlace()); - math::SetConstant setter; - setter(context.device_context(), out, static_cast(0)); + math::SetConstant setter; + setter(context.template device_context(), out, + static_cast(0)); } }; diff --git a/paddle/operators/ftrl_op.cc b/paddle/operators/ftrl_op.cc index cb7ae6919623f10a6c4ec98c0e942c1590ac9a7a..b14913ff213c84051b5a945f4a470cea4039a289 100644 --- a/paddle/operators/ftrl_op.cc +++ b/paddle/operators/ftrl_op.cc @@ -135,5 +135,5 @@ The paper that proposed Follow The Regularized Leader (FTRL): namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(ftrl, ops::FTRLOp, ops::FTRLOpMaker); -REGISTER_OP_CPU_KERNEL(ftrl, - ops::FTRLOpKernel); +REGISTER_OP_CPU_KERNEL( + ftrl, ops::FTRLOpKernel); diff --git a/paddle/operators/ftrl_op.cu b/paddle/operators/ftrl_op.cu index 97b36dade6f531df49615ae2d44d565eadba7154..abbbe7adbe6bd14f55f7f941c5e6740fada24910 100644 --- a/paddle/operators/ftrl_op.cu +++ b/paddle/operators/ftrl_op.cu @@ -15,5 +15,5 @@ specific language governing permissions and limitations under the License. */ #include "paddle/operators/ftrl_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(ftrl, - ops::FTRLOpKernel); +REGISTER_OP_CUDA_KERNEL( + ftrl, ops::FTRLOpKernel); diff --git a/paddle/operators/ftrl_op.h b/paddle/operators/ftrl_op.h index b040162f8d1d8998aa13021c10a25fe57135c1e9..4eea04cd8d61bb34fc612e0ca1765a664e329ca9 100644 --- a/paddle/operators/ftrl_op.h +++ b/paddle/operators/ftrl_op.h @@ -24,7 +24,7 @@ template using EigenVector = framework::EigenVector; -template +template class FTRLOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -53,7 +53,7 @@ class FTRLOpKernel : public framework::OpKernel { auto p_out = EigenVector::Flatten(*param_out); auto s_acc_out = EigenVector::Flatten(*sq_accum_out); auto l_acc_out = EigenVector::Flatten(*lin_accum_out); - auto place = ctx.GetEigenDevice(); + auto& place = *ctx.template device_context().eigen_device(); Eigen::DSizes grad_dsize(grad->numel()); diff --git a/paddle/operators/gather.cu.h b/paddle/operators/gather.cu.h index 8d04ecd284226c7b4c6cdd5531915fee2d94ce61..c806aa5f05ad214abb3484935d82b67880a1db7a 100644 --- a/paddle/operators/gather.cu.h +++ b/paddle/operators/gather.cu.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { using framework::Tensor; -using platform::Place; +using platform::DeviceContext; #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ diff --git a/paddle/operators/gather_op.cu b/paddle/operators/gather_op.cu index 92219d6a433e6db0bb9886ed8670cbafaa843ff8..b37f0576e276b2aa995f01de635ec153a0db36aa 100644 --- a/paddle/operators/gather_op.cu +++ b/paddle/operators/gather_op.cu @@ -49,7 +49,8 @@ class GatherGradOpCUDAKernel : public framework::OpKernel { dX->mutable_data(ctx.GetPlace()); auto dxt = framework::EigenVector::Flatten(*dX); - auto place = ctx.GetEigenDevice(); + auto &place = *ctx.template device_context() + .eigen_device(); dxt.device(place) = dxt.constant(static_cast(0)); GPUScatterAssign(ctx.device_context(), *dO, *Index, dX); @@ -60,5 +61,5 @@ class GatherGradOpCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(gather, ops::GatherOpCUDAKernel); -REGISTER_OP_GPU_KERNEL(gather_grad, ops::GatherGradOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(gather, ops::GatherOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(gather_grad, ops::GatherGradOpCUDAKernel); diff --git a/paddle/operators/gather_op.h b/paddle/operators/gather_op.h index 8276ed0d3d8b676aafab45fae70942e78b72b8e6..1a1ba0c41aef95d3dc8cc929db72770a7bd08b18 100644 --- a/paddle/operators/gather_op.h +++ b/paddle/operators/gather_op.h @@ -53,7 +53,8 @@ class GatherGradientOpKernel : public framework::OpKernel { dX->mutable_data(ctx.GetPlace()); auto dxt = framework::EigenVector::Flatten(*dX); - auto place = ctx.GetEigenDevice(); + auto &place = *ctx.template device_context() + .eigen_device(); dxt.device(place) = dxt.constant(static_cast(0)); ScatterAssign(ctx.device_context(), *dO, *Index, dX); diff --git a/paddle/operators/gaussian_random_op.cu b/paddle/operators/gaussian_random_op.cu index 315560bf1ba8a66b9a3b7d79510d202885e845d6..ffce6f713816abe7d1f207f141a1b0933574e2ff 100644 --- a/paddle/operators/gaussian_random_op.cu +++ b/paddle/operators/gaussian_random_op.cu @@ -60,5 +60,5 @@ class GPUGaussianRandomKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -REGISTER_OP_GPU_KERNEL(gaussian_random, - paddle::operators::GPUGaussianRandomKernel); +REGISTER_OP_CUDA_KERNEL(gaussian_random, + paddle::operators::GPUGaussianRandomKernel); diff --git a/paddle/operators/gru_op.cc b/paddle/operators/gru_op.cc index 5aa03f8916a67222fb0ca5781533766063e52683..311e7edcf1519bc706a51e4d9242a1ebee5168ca 100644 --- a/paddle/operators/gru_op.cc +++ b/paddle/operators/gru_op.cc @@ -213,8 +213,9 @@ class GRUGradOp : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(gru, ops::GRUOp, ops::GRUOpMaker, gru_grad, ops::GRUGradOp); -REGISTER_OP_CPU_KERNEL(gru, ops::GRUKernel, - ops::GRUKernel); -REGISTER_OP_CPU_KERNEL(gru_grad, - ops::GRUGradKernel, - ops::GRUGradKernel); +REGISTER_OP_CPU_KERNEL( + gru, ops::GRUKernel, + ops::GRUKernel); +REGISTER_OP_CPU_KERNEL( + gru_grad, ops::GRUGradKernel, + ops::GRUGradKernel); diff --git a/paddle/operators/gru_op.cu.cc b/paddle/operators/gru_op.cu.cc index 0ceff94ec3ddaadbd5f0ca4f5a4eebe6cb8ee3a9..458630ca6187ec89638046d8eea63c31eca518f2 100644 --- a/paddle/operators/gru_op.cu.cc +++ b/paddle/operators/gru_op.cu.cc @@ -15,8 +15,9 @@ #include "paddle/operators/gru_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(gru, ops::GRUKernel, - ops::GRUKernel); -REGISTER_OP_GPU_KERNEL(gru_grad, - ops::GRUGradKernel, - ops::GRUGradKernel); +REGISTER_OP_CUDA_KERNEL( + gru, ops::GRUKernel, + ops::GRUKernel); +REGISTER_OP_CUDA_KERNEL( + gru_grad, ops::GRUGradKernel, + ops::GRUGradKernel); diff --git a/paddle/operators/gru_op.h b/paddle/operators/gru_op.h index 1b18368e0e16365682520b62a7f6adab0cbb527f..6d02dff578846904beeb58c5161d27c7c2ed5d70 100644 --- a/paddle/operators/gru_op.h +++ b/paddle/operators/gru_op.h @@ -27,16 +27,16 @@ namespace operators { using LoDTensor = framework::LoDTensor; using Tensor = framework::Tensor; -template -inline void ReorderInitState(const platform::DeviceContext& ctx, +template +inline void ReorderInitState(const DeviceContext& ctx, const framework::Tensor& src, const size_t* index, framework::Tensor* dst, bool indexed_src) { - math::CopyMatrixRowsFunctor row_shuffle; + math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); row_shuffle(ctx, src, index, *dst, indexed_src); } -template +template class GRUKernel : public framework::OpKernel { public: void BatchCompute(const framework::ExecutionContext& context) const { @@ -60,19 +60,19 @@ class GRUKernel : public framework::OpKernel { auto hidden_dims = hidden->dims(); bool is_reverse = context.Attr("is_reverse"); - math::LoDTensor2BatchFunctor to_batch; - auto& dev_ctx = context.device_context(); + math::LoDTensor2BatchFunctor to_batch; + auto& dev_ctx = context.template device_context(); to_batch(dev_ctx, *input, *batch_gate, true, is_reverse); if (bias) { - math::RowwiseAdd add_bias; + math::RowwiseAdd add_bias; add_bias(dev_ctx, *batch_gate, *bias, batch_gate); } int frame_size = hidden_dims[1]; math::hl_gru_value gru_value; - gru_value.gateWeight = const_cast(weight_data); - gru_value.stateWeight = + gru_value.gate_weight = const_cast(weight_data); + gru_value.state_weight = const_cast(weight_data + 2 * frame_size * frame_size); Tensor ordered_h0; const size_t* order = batch_gate->lod()[2].data(); @@ -80,11 +80,12 @@ class GRUKernel : public framework::OpKernel { // Since the batch computing for GRU reorders the input sequences // according to their length. The initialized cell state also needs // to reorder. - ReorderInitState(context.device_context(), *h0, order, - &ordered_h0, true); - gru_value.prevOutValue = ordered_h0.data(); + ReorderInitState( + context.template device_context(), *h0, order, + &ordered_h0, true); + gru_value.prev_out_value = ordered_h0.data(); } else { - gru_value.prevOutValue = nullptr; + gru_value.prev_out_value = nullptr; } auto batch_starts = batch_gate->lod()[0]; size_t num_batch = batch_starts.size() - 1; @@ -96,17 +97,17 @@ class GRUKernel : public framework::OpKernel { Tensor gate_t = batch_gate->Slice(bstart, bend); Tensor reset_hidden_prev_t = batch_reset_hidden_prev->Slice(bstart, bend); Tensor hidden_t = batch_hidden->Slice(bstart, bend); - gru_value.outputValue = hidden_t.data(); - gru_value.gateValue = gate_t.data(); - gru_value.resetOutputValue = reset_hidden_prev_t.data(); - math::GRUUnitFunctor::compute( + gru_value.output_value = hidden_t.data(); + gru_value.gate_value = gate_t.data(); + gru_value.reset_output_value = reset_hidden_prev_t.data(); + math::GRUUnitFunctor::compute( dev_ctx, gru_value, frame_size, cur_batch_size, math::ActiveType(context.Attr("activation")), math::ActiveType(context.Attr("gate_activation"))); - gru_value.prevOutValue = gru_value.outputValue; + gru_value.prev_out_value = gru_value.output_value; } - math::Batch2LoDTensorFunctor to_seq; + math::Batch2LoDTensorFunctor to_seq; batch_hidden->set_lod(batch_gate->lod()); to_seq(dev_ctx, *batch_hidden, *hidden); } @@ -116,7 +117,7 @@ class GRUKernel : public framework::OpKernel { } }; -template +template class GRUGradKernel : public framework::OpKernel { public: void BatchCompute(const framework::ExecutionContext& context) const { @@ -141,14 +142,14 @@ class GRUGradKernel : public framework::OpKernel { auto hidden_dims = hidden->dims(); int frame_size = hidden_dims[1]; - math::LoDTensor2BatchFunctor to_batch; + math::LoDTensor2BatchFunctor to_batch; LoDTensor batch_hidden_grad, batch_gate_grad, batch_reset_hidden_prev_grad; batch_hidden_grad.mutable_data(hidden_dims, context.GetPlace()); batch_gate_grad.mutable_data(gate_dims, context.GetPlace()); batch_reset_hidden_prev_grad.mutable_data(hidden_dims, context.GetPlace()); - math::SetConstant zero; - auto& dev_ctx = context.device_context(); + math::SetConstant zero; + auto& dev_ctx = context.template device_context(); zero(dev_ctx, &batch_hidden_grad, static_cast(0.0)); zero(dev_ctx, &batch_gate_grad, static_cast(0.0)); zero(dev_ctx, &batch_reset_hidden_prev_grad, static_cast(0.0)); @@ -156,12 +157,13 @@ class GRUGradKernel : public framework::OpKernel { Tensor ordered_h0, ordered_h0_grad; const size_t* order = batch_gate->lod()[2].data(); if (h0) { - ReorderInitState(context.device_context(), *h0, order, - &ordered_h0, true); + ReorderInitState(dev_ctx, *h0, order, &ordered_h0, + true); } if (h0_grad) { ordered_h0_grad.mutable_data(h0_grad->dims(), context.GetPlace()); - zero(context.device_context(), &ordered_h0_grad, static_cast(0.0)); + zero(context.template device_context(), &ordered_h0_grad, + static_cast(0.0)); } bool is_reverse = context.Attr("is_reverse"); @@ -169,20 +171,20 @@ class GRUGradKernel : public framework::OpKernel { to_batch(dev_ctx, *hidden_grad, batch_hidden_grad, false, is_reverse); math::hl_gru_value gru_value; - gru_value.gateWeight = const_cast(weight_data); - gru_value.stateWeight = + gru_value.gate_weight = const_cast(weight_data); + gru_value.state_weight = const_cast(weight_data + 2 * frame_size * frame_size); math::hl_gru_grad gru_grad; if (weight_grad) { - gru_grad.gateWeightGrad = + gru_grad.gate_weight_grad = weight_grad->mutable_data(context.GetPlace()); zero(dev_ctx, weight_grad, static_cast(0.0)); - gru_grad.stateWeightGrad = + gru_grad.state_weight_grad = weight_grad->data() + 2 * frame_size * frame_size; } else { - gru_grad.gateWeightGrad = nullptr; - gru_grad.stateWeightGrad = nullptr; + gru_grad.gate_weight_grad = nullptr; + gru_grad.state_weight_grad = nullptr; } auto batch_starts = batch_hidden_grad.lod()[0]; @@ -193,48 +195,48 @@ class GRUGradKernel : public framework::OpKernel { int cur_batch_size = bend - bstart; Tensor gate_t = batch_gate->Slice(bstart, bend); - gru_value.gateValue = gate_t.data(); + gru_value.gate_value = gate_t.data(); Tensor reset_hidden_prev_t = batch_reset_hidden_prev->Slice(bstart, bend); - gru_value.resetOutputValue = reset_hidden_prev_t.data(); + gru_value.reset_output_value = reset_hidden_prev_t.data(); Tensor hidden_grad_t = batch_hidden_grad.Slice(bstart, bend); - gru_grad.outputGrad = hidden_grad_t.data(); + gru_grad.output_grad = hidden_grad_t.data(); Tensor gate_grad_t = batch_gate_grad.Slice(bstart, bend); - gru_grad.gateGrad = gate_grad_t.data(); + gru_grad.gate_grad = gate_grad_t.data(); Tensor reset_hidden_prev_grad_t = batch_reset_hidden_prev_grad.Slice(bstart, bend); - gru_grad.resetOutputGrad = reset_hidden_prev_grad_t.data(); + gru_grad.reset_output_grad = reset_hidden_prev_grad_t.data(); if (n == 0) { - gru_value.prevOutValue = h0 ? ordered_h0.data() : nullptr; - gru_grad.prevOutGrad = + gru_value.prev_out_value = h0 ? ordered_h0.data() : nullptr; + gru_grad.prev_out_grad = h0 && h0_grad ? ordered_h0_grad.data() : nullptr; } else { int bstart_pre = static_cast(batch_starts[n - 1]); Tensor hidden_prev_t = batch_hidden->Slice(bstart_pre, bstart); - gru_value.prevOutValue = hidden_prev_t.data(); + gru_value.prev_out_value = hidden_prev_t.data(); Tensor hidden_prev_grad_t = batch_hidden_grad.Slice(bstart_pre, bstart); - gru_grad.prevOutGrad = hidden_prev_grad_t.data(); + gru_grad.prev_out_grad = hidden_prev_grad_t.data(); } - math::GRUUnitGradFunctor::compute( + math::GRUUnitGradFunctor::compute( dev_ctx, gru_value, gru_grad, frame_size, cur_batch_size, math::ActiveType(context.Attr("activation")), math::ActiveType(context.Attr("gate_activation"))); } if (input_grad) { input_grad->mutable_data(context.GetPlace()); - math::Batch2LoDTensorFunctor to_seq; + math::Batch2LoDTensorFunctor to_seq; batch_gate_grad.set_lod(batch_gate->lod()); to_seq(dev_ctx, batch_gate_grad, *input_grad); } if (bias_grad) { bias_grad->mutable_data(context.GetPlace()); - math::ColwiseSum col_sum; + math::ColwiseSum col_sum; col_sum(dev_ctx, batch_gate_grad, bias_grad); } if (h0 && h0_grad) { - ReorderInitState(context.device_context(), ordered_h0_grad, - order, h0_grad, false); + ReorderInitState(dev_ctx, ordered_h0_grad, order, + h0_grad, false); } } diff --git a/paddle/operators/gru_unit_op.cc b/paddle/operators/gru_unit_op.cc index 877c969103cfc17e1b170449d1922d9c7db2a58b..705de87be5b67fbc343a89eeba2282941b264c8a 100644 --- a/paddle/operators/gru_unit_op.cc +++ b/paddle/operators/gru_unit_op.cc @@ -201,9 +201,10 @@ class GRUUnitGradOp : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(gru_unit, ops::GRUUnitOp, ops::GRUUnitOpMaker, gru_unit_grad, ops::GRUUnitGradOp); -REGISTER_OP_CPU_KERNEL(gru_unit, - ops::GRUUnitKernel, - ops::GRUUnitKernel); REGISTER_OP_CPU_KERNEL( - gru_unit_grad, ops::GRUUnitGradKernel, - ops::GRUUnitGradKernel); + gru_unit, ops::GRUUnitKernel, + ops::GRUUnitKernel); +REGISTER_OP_CPU_KERNEL( + gru_unit_grad, + ops::GRUUnitGradKernel, + ops::GRUUnitGradKernel); diff --git a/paddle/operators/gru_unit_op.cu b/paddle/operators/gru_unit_op.cu index 821c8c6421771bd99474b0b2f8aa2acf04697779..7c752db494b59c3ec2af093332777ce6655fb477 100644 --- a/paddle/operators/gru_unit_op.cu +++ b/paddle/operators/gru_unit_op.cu @@ -16,9 +16,10 @@ #include "paddle/operators/gru_unit_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(gru_unit, - ops::GRUUnitKernel, - ops::GRUUnitKernel); -REGISTER_OP_GPU_KERNEL( - gru_unit_grad, ops::GRUUnitGradKernel, - ops::GRUUnitGradKernel); +REGISTER_OP_CUDA_KERNEL( + gru_unit, ops::GRUUnitKernel, + ops::GRUUnitKernel); +REGISTER_OP_CUDA_KERNEL( + gru_unit_grad, + ops::GRUUnitGradKernel, + ops::GRUUnitGradKernel); diff --git a/paddle/operators/gru_unit_op.h b/paddle/operators/gru_unit_op.h index 3398c0934e250cfc292776d08773204bb9b4d87e..8fe60c750da0a42089dc38190d2dda3d08e5ba06 100644 --- a/paddle/operators/gru_unit_op.h +++ b/paddle/operators/gru_unit_op.h @@ -34,7 +34,7 @@ using EigenVector = framework::EigenVector; enum GRUActivationType { identity = 0, sigmoid = 1, tanh = 2, relu = 3 }; -template +template class GRUUnitKernel : public framework::OpKernel { public: template @@ -71,7 +71,8 @@ class GRUUnitKernel : public framework::OpKernel { auto g = EigenMatrix::From(*gate); auto r_h_p = EigenMatrix::From(*reset_hidden_prev); auto h = EigenMatrix::From(*hidden); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); // calculate unactivated gate outputs if (bias) { @@ -86,10 +87,10 @@ class GRUUnitKernel : public framework::OpKernel { const T* weight_data = weight->data(); T* gate_data = gate->data(); T* reset_hidden_prev_data = reset_hidden_prev->data(); - math::gemm(context.device_context(), false, false, batch_size, - 2 * frame_size, frame_size, 1, hidden_prev_data, - frame_size, weight_data, frame_size * 2, 1, gate_data, - frame_size * 3); + math::gemm( + context.template device_context(), false, false, + batch_size, 2 * frame_size, frame_size, 1, hidden_prev_data, frame_size, + weight_data, frame_size * 2, 1, gate_data, frame_size * 3); // calculate activited gate Eigen::array extents({{batch_size, frame_size}}); @@ -102,11 +103,11 @@ class GRUUnitKernel : public framework::OpKernel { g.slice(r_offsets, extents), g.slice(r_offsets, extents)); auto r = g.slice(r_offsets, extents); // reset gate r_h_p.device(place) = r * h_p; // reset previous hidden state - math::gemm(context.device_context(), false, false, batch_size, - frame_size, frame_size, 1, reset_hidden_prev_data, - frame_size, weight_data + frame_size * frame_size * 2, - frame_size, 1, gate_data + frame_size * 2, - frame_size * 3); + math::gemm( + context.template device_context(), false, false, + batch_size, frame_size, frame_size, 1, reset_hidden_prev_data, + frame_size, weight_data + frame_size * frame_size * 2, frame_size, 1, + gate_data + frame_size * 2, frame_size * 3); Eigen::array c_offsets({{0, frame_size * 2}}); ActCompute(context.Attr("activation"), place, @@ -118,7 +119,7 @@ class GRUUnitKernel : public framework::OpKernel { } }; -template +template class GRUUnitGradKernel : public framework::OpKernel { public: template @@ -166,7 +167,8 @@ class GRUUnitGradKernel : public framework::OpKernel { auto d_h = EigenMatrix::From(*hidden_grad); auto d_g = EigenMatrix::From(gate_grad); auto d_r_h_p = EigenMatrix::From(reset_hidden_prev_grad); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); int batch_size = input->dims()[0]; int frame_size = hidden_prev->dims()[1]; @@ -186,11 +188,11 @@ class GRUUnitGradKernel : public framework::OpKernel { ActGradCompute(context.Attr("activation"), place, c, c, d_g.slice(c_offsets, extents), d_h * u); // backward for reset_hidden_prev - math::gemm(context.device_context(), false, true, batch_size, - frame_size, frame_size, 1, - gate_grad_data + frame_size * 2, frame_size * 3, - weight_data + frame_size * frame_size * 2, frame_size, - 0, reset_hidden_prev_grad_data, frame_size); + math::gemm( + context.template device_context(), false, true, + batch_size, frame_size, frame_size, 1, gate_grad_data + frame_size * 2, + frame_size * 3, weight_data + frame_size * frame_size * 2, frame_size, + 0, reset_hidden_prev_grad_data, frame_size); // backward for unactivated reset gate ActGradCompute(context.Attr("gate_activation"), place, r, r, d_g.slice(r_offsets, extents), d_r_h_p * h_p); @@ -198,17 +200,18 @@ class GRUUnitGradKernel : public framework::OpKernel { if (weight_grad) { T* weight_grad_data = weight_grad->mutable_data(context.GetPlace()); // backward for state_weight - math::gemm( - context.device_context(), true, false, frame_size, frame_size, - batch_size, 1, reset_hidden_prev_data, frame_size, - gate_grad_data + frame_size * 2, frame_size * 3, 0, + math::gemm( + context.template device_context(), true, false, + frame_size, frame_size, batch_size, 1, reset_hidden_prev_data, + frame_size, gate_grad_data + frame_size * 2, frame_size * 3, 0, weight_grad_data + frame_size * frame_size * 2, frame_size); // backward for update_gate_weight and reset_gate_weight - math::gemm(context.device_context(), true, false, frame_size, - frame_size * 2, batch_size, 1, hidden_prev_data, - frame_size, gate_grad_data, frame_size * 3, 0, - weight_grad_data, frame_size * 2); + math::gemm( + context.template device_context(), true, false, + frame_size, frame_size * 2, batch_size, 1, hidden_prev_data, + frame_size, gate_grad_data, frame_size * 3, 0, weight_grad_data, + frame_size * 2); } // backward for hidden_prev if (hidden_prev_grad) { @@ -216,10 +219,11 @@ class GRUUnitGradKernel : public framework::OpKernel { hidden_prev_grad->mutable_data(context.GetPlace()); auto d_h_p = EigenMatrix::From(*hidden_prev_grad); d_h_p.device(place) = d_r_h_p * r + d_h * (u.constant(T(1)) - u); - math::gemm(context.device_context(), false, true, batch_size, - frame_size, frame_size * 2, 1, gate_grad_data, - frame_size * 3, weight_data, frame_size * 2, 1, - hidden_prev_grad_data, frame_size); + math::gemm( + context.template device_context(), false, true, + batch_size, frame_size, frame_size * 2, 1, gate_grad_data, + frame_size * 3, weight_data, frame_size * 2, 1, hidden_prev_grad_data, + frame_size); } // backward for input if (input_grad) { diff --git a/paddle/operators/hinge_loss_op.cc b/paddle/operators/hinge_loss_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..373b4d99b47f2a8ab06c7584a25acee59b6f3e3b --- /dev/null +++ b/paddle/operators/hinge_loss_op.cc @@ -0,0 +1,114 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/hinge_loss_op.h" + +namespace paddle { +namespace operators { + +class HingeLossOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Logits"), + "Input(Logits) must be initialized."); + PADDLE_ENFORCE(ctx->HasInput("Labels"), + "Input(Labels) must be initialized."); + + auto pred_dims = ctx->GetInputDim("Logits"); + auto label_dims = ctx->GetInputDim("Labels"); + + PADDLE_ENFORCE_EQ(pred_dims, label_dims); + PADDLE_ENFORCE_EQ(pred_dims.size(), 2, + "The rank of Input(Logits) must be 2 and the shape is " + "[batch_size, 1]."); + PADDLE_ENFORCE_EQ(pred_dims[1], 1, + "Each row of Input(Logits) contains a real value, " + "so the 2nd dimension of Input(Logits) must be 1."); + + ctx->SetOutputDim("Loss", {pred_dims[0], 1}); + ctx->ShareLoD("Logits", "Loss"); + } +}; + +template +class HingeLossOpMaker : public framework::OpProtoAndCheckerMaker { + public: + HingeLossOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Logits", + "The input value (Logits) of Hinge loss op." + "Logits is a 2-D tensor with shape [batch_size, 1]."); + AddInput("Labels", + "The target value (Labels) of Hinge loss op." + "Labels is a 2-D tensor with shape [batch_size, 1]."); + AddOutput("Loss", + "The output tensor with shape [batch_size, 1] " + "which represents the hinge loss."); + AddComment(R"DOC( +HingeLoss Operator. + +Let x be a logit (prediction) and y be the actual label. The logit can +take any values from (-inf, inf), but the labels should be either -1 or 1. +Then, the hinge loss is computed as follows: + +$$ +L_(x, y) = max(1 - y.x, 0) +$$ + +Note that the labels passed as input will have values as either 0 or 1. + +)DOC"); + } +}; + +class HingeLossGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Logits"), + "Input(Logits) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Labels"), + "Input(Labels) should not be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")), + "Input(Loss@GRAD) should not be null."); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Logits")), + "Input(Logits@GRAD) should not be null."); + + auto pred_dims = ctx->GetInputDim("Logits"); + auto lab_dims = ctx->GetInputDim("Labels"); + auto loss_grad_dims = ctx->GetInputDim(framework::GradVarName("Loss")); + + PADDLE_ENFORCE_EQ(loss_grad_dims, pred_dims); + + auto pred_grad_name = framework::GradVarName("Logits"); + ctx->SetOutputDim(pred_grad_name, pred_dims); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(hinge_loss, ops::HingeLossOp, ops::HingeLossOpMaker, + hinge_loss_grad, ops::HingeLossGradOp); +REGISTER_OP_CPU_KERNEL( + hinge_loss, + ops::HingeLossKernel); +REGISTER_OP_CPU_KERNEL( + hinge_loss_grad, + ops::HingeLossGradKernel); diff --git a/paddle/operators/hinge_loss_op.cu b/paddle/operators/hinge_loss_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..31a5bde292ebcab899ad05a813c685963dd5bc25 --- /dev/null +++ b/paddle/operators/hinge_loss_op.cu @@ -0,0 +1,24 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/hinge_loss_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + hinge_loss, + ops::HingeLossKernel); +REGISTER_OP_CUDA_KERNEL( + hinge_loss_grad, + ops::HingeLossGradKernel); diff --git a/paddle/operators/hinge_loss_op.h b/paddle/operators/hinge_loss_op.h new file mode 100644 index 0000000000000000000000000000000000000000..91369cfb8a5d4f40be9e6249b50079ba2b550003 --- /dev/null +++ b/paddle/operators/hinge_loss_op.h @@ -0,0 +1,71 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class HingeLossKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* pred = context.Input("Logits"); + auto* label = context.Input("Labels"); + auto* loss = context.Output("Loss"); + auto& place = + *context.template device_context().eigen_device(); + + auto x = framework::EigenVector::Flatten(*pred); + auto y = framework::EigenVector::Flatten(*label); + loss->mutable_data(context.GetPlace()); + auto l = framework::EigenVector::Flatten(*loss); + l.device(place) = + (static_cast(1) - x * (static_cast(2) * y - static_cast(1))) + .cwiseMax(static_cast(0)); + } +}; + +template +class HingeLossGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* pred = context.Input("Logits"); + auto* label = context.Input("Labels"); + auto* dloss = + context.Input(framework::GradVarName("Loss")); + auto* dpred = + context.Output(framework::GradVarName("Logits")); + auto& place = + *context.template device_context().eigen_device(); + + auto x = framework::EigenVector::Flatten(*pred); + auto y = framework::EigenVector::Flatten(*label); + auto dl = framework::EigenVector::Flatten(*dloss); + + if (dpred) { + dpred->mutable_data(context.GetPlace()); + auto dx = framework::EigenVector::Flatten(*dpred); + auto alt_labels = static_cast(2) * y - static_cast(1); + dx.device(place) = + dl * ((x * alt_labels) < static_cast(1)).template cast() * + (-alt_labels); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/huber_loss_op.cc b/paddle/operators/huber_loss_op.cc index 938803d5b36177c782fe40bc34fd92504e5bbf7b..11828d083a55f0a38cf3b8513b7395bbb5592581 100644 --- a/paddle/operators/huber_loss_op.cc +++ b/paddle/operators/huber_loss_op.cc @@ -124,8 +124,9 @@ class HuberLossGradOp : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(huber_loss, ops::HuberLossOp, ops::HuberLossOpMaker, huber_loss_grad, ops::HuberLossGradOp); -REGISTER_OP_CPU_KERNEL(huber_loss, - ops::HuberLossKernel); +REGISTER_OP_CPU_KERNEL( + huber_loss, + ops::HuberLossKernel); REGISTER_OP_CPU_KERNEL( huber_loss_grad, - ops::HuberLossGradKernel); + ops::HuberLossGradKernel); diff --git a/paddle/operators/huber_loss_op.cu b/paddle/operators/huber_loss_op.cu index 317321dc6c495f6e9a8808d841c71bfa26b754d0..d49a4d9d4236c402f2559c5a0a5de097c2edc61f 100644 --- a/paddle/operators/huber_loss_op.cu +++ b/paddle/operators/huber_loss_op.cu @@ -16,8 +16,9 @@ #include "paddle/operators/huber_loss_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(huber_loss, - ops::HuberLossKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( + huber_loss, + ops::HuberLossKernel); +REGISTER_OP_CUDA_KERNEL( huber_loss_grad, - ops::HuberLossGradKernel); + ops::HuberLossGradKernel); diff --git a/paddle/operators/huber_loss_op.h b/paddle/operators/huber_loss_op.h index 4e7bc5543226e19fe0d6190171cdd9c2b3d2d985..4dd20e8b080ab8bd2e61830241d64ee8546a80ec 100644 --- a/paddle/operators/huber_loss_op.h +++ b/paddle/operators/huber_loss_op.h @@ -41,7 +41,7 @@ struct HuberLossForward { T delta; }; -template +template class HuberLossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -50,7 +50,8 @@ class HuberLossKernel : public framework::OpKernel { auto* out0 = context.Output("Residual"); auto* out1 = context.Output("Out"); auto delta = static_cast(context.Attr("delta")); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); auto x = EigenVector::Flatten(*in0); auto y = EigenVector::Flatten(*in1); @@ -85,7 +86,7 @@ struct HuberLossBackward { T delta; }; -template +template class HuberLossGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -94,7 +95,8 @@ class HuberLossGradKernel : public framework::OpKernel { auto* out0 = context.Output(framework::GradVarName("X")); auto* out1 = context.Output(framework::GradVarName("Y")); auto delta = static_cast(context.op().Attr("delta")); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); auto residual = EigenVector::Flatten(*in0); auto out_grad = EigenVector::Flatten(*in1); diff --git a/paddle/operators/increment_op.cc b/paddle/operators/increment_op.cc index 35efb12932f1d61fdb511b4ee2cdab3891507c61..54911267e36dfdbc62d533f40f0b754e7d2cb7bf 100644 --- a/paddle/operators/increment_op.cc +++ b/paddle/operators/increment_op.cc @@ -61,6 +61,8 @@ class IncrementOp : public framework::OperatorBase { out.Resize(x.dims()); out.mutable_data(x.place(), x.type()); float value = Attr("step"); + VLOG(10) << Output("Out") << " increase " << Input("X") << " with " + << value; framework::VisitDataType(framework::ToDataType(out.type()), IncrementFunctor(x, &out, value)); } diff --git a/paddle/operators/l1_norm_op.cc b/paddle/operators/l1_norm_op.cc index 02ebf022968e95d0b20598d3c935fb51177c8841..c0b51202c6bb708a682568175c56583394961535 100644 --- a/paddle/operators/l1_norm_op.cc +++ b/paddle/operators/l1_norm_op.cc @@ -69,7 +69,8 @@ $$Out = \sum{|X|}$$ namespace ops = paddle::operators; REGISTER_OP(l1_norm, ops::L1NormOp, ops::L1NormOpMaker, l1_norm_grad, ops::L1NormGradOp); -REGISTER_OP_CPU_KERNEL(l1_norm, - ops::L1NormKernel); REGISTER_OP_CPU_KERNEL( - l1_norm_grad, ops::L1NormGradKernel); + l1_norm, ops::L1NormKernel); +REGISTER_OP_CPU_KERNEL( + l1_norm_grad, + ops::L1NormGradKernel); diff --git a/paddle/operators/l1_norm_op.cu b/paddle/operators/l1_norm_op.cu index 1c206e04ccbb5f4c2cb9d45aef7bac17c62d55c5..fd725f86f6c98c5aff844546361d8599ea3527ab 100644 --- a/paddle/operators/l1_norm_op.cu +++ b/paddle/operators/l1_norm_op.cu @@ -16,7 +16,8 @@ #include "paddle/operators/l1_norm_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(l1_norm, - ops::L1NormKernel); -REGISTER_OP_GPU_KERNEL( - l1_norm_grad, ops::L1NormGradKernel); +REGISTER_OP_CUDA_KERNEL( + l1_norm, ops::L1NormKernel); +REGISTER_OP_CUDA_KERNEL( + l1_norm_grad, + ops::L1NormGradKernel); diff --git a/paddle/operators/l1_norm_op.h b/paddle/operators/l1_norm_op.h index 3c60dc3dc7415f34ed9d238e6f41b197ec404883..ae3878f2b7b079027a9e9145cefa9eae6b22ffbc 100644 --- a/paddle/operators/l1_norm_op.h +++ b/paddle/operators/l1_norm_op.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { // Out = sum(abs(X)) -template +template class L1NormKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -30,14 +30,15 @@ class L1NormKernel : public framework::OpKernel { auto x = framework::EigenVector::Flatten(*X); auto out = framework::EigenScalar::From(*Out); - auto place = context.GetEigenDevice(); + auto &place = + *context.template device_context().eigen_device(); out.device(place) = x.abs().sum(); } }; // dX = dout * sign(X) -template +template class L1NormGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -52,7 +53,8 @@ class L1NormGradKernel : public framework::OpKernel { auto x_eigen = framework::EigenVector::Flatten(*x); auto d_out_eigen = framework::EigenVector::Flatten(*d_out); auto dx_eigen = framework::EigenVector::Flatten(*dx); - auto place = context.GetEigenDevice(); + auto &place = + *context.template device_context().eigen_device(); Eigen::DSizes x_dsize(x->numel()); dx_eigen.device(place) = d_out_eigen.broadcast(x_dsize) * x_eigen.sign(); diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index 8e079a14e0a15e8ff803b6087e6b0b02083479ef..896e3657d4406c5a1fe07f1712abb2ff0370fd3c 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -261,9 +261,10 @@ REGISTER_OP(linear_chain_crf, ops::LinearChainCRFOp, ops::LinearChainCRFOpMaker, linear_chain_crf_grad, ops::LinearChainCRFGradOp); REGISTER_OP_CPU_KERNEL( linear_chain_crf, - ops::LinearChainCRFOpKernel, - ops::LinearChainCRFOpKernel); + ops::LinearChainCRFOpKernel, + ops::LinearChainCRFOpKernel); REGISTER_OP_CPU_KERNEL( linear_chain_crf_grad, - ops::LinearChainCRFGradOpKernel, - ops::LinearChainCRFGradOpKernel); + ops::LinearChainCRFGradOpKernel, + ops::LinearChainCRFGradOpKernel); diff --git a/paddle/operators/linear_chain_crf_op.cu b/paddle/operators/linear_chain_crf_op.cu index 6fc8995f4c2ce05f89ffb58129695113f89159fa..3b105ec3414b5d63946331319d0f47a38e7908cc 100644 --- a/paddle/operators/linear_chain_crf_op.cu +++ b/paddle/operators/linear_chain_crf_op.cu @@ -16,11 +16,12 @@ limitations under the License. */ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( linear_chain_crf, - ops::LinearChainCRFOpKernel, - ops::LinearChainCRFOpKernel); -REGISTER_OP_GPU_KERNEL( + ops::LinearChainCRFOpKernel, + ops::LinearChainCRFOpKernel); +REGISTER_OP_CUDA_KERNEL( linear_chain_crf_grad, - ops::LinearChainCRFGradOpKernel, - ops::LinearChainCRFGradOpKernel); + ops::LinearChainCRFGradOpKernel, + ops::LinearChainCRFGradOpKernel); diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index 014bbfa7580011e38a2f546e30d1e584965a7815..694584e79c3a1e818814a4a2145f52d8db7cf10a 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -50,7 +50,7 @@ template using EigenMatrix = framework::EigenMatrix; -template +template class LinearChainCRFOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -137,7 +137,8 @@ class LinearChainCRFOpKernel : public framework::OpKernel { framework::make_ddim({static_cast(batch_size), 1}), platform::CPUPlace()); - auto place = ctx.GetEigenDevice(); + auto& place = *ctx.template device_context() + .eigen_device(); auto x = EigenMatrix::From(*emission_weights); auto x_row_max = EigenMatrix::From(emission_row_max); x_row_max.device(place) = @@ -287,7 +288,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel { } }; -template +template class LinearChainCRFGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -359,8 +360,7 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { emission_grad->mutable_data(platform::CPUPlace()); if (transition_grad) { transition_grad->mutable_data(platform::CPUPlace()); - math::SetConstant()(ctx.device_context(), - transition_grad, 0.); + math::set_constant(ctx.device_context(), transition_grad, 0.); } // Now, all the inputs and outputs should be on the CPU memory. @@ -384,10 +384,10 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { Tensor one_seq_beta = beta.Slice(start_pos, end_pos); Tensor one_seq_emission_grad = emission_grad->Slice(start_pos, end_pos); - BackwardOneSequence(ctx.device_context(), ll_grad[i], - one_seq_emission_exps, *transition_exps, - one_seq_alpha, one_seq_label, &one_seq_beta, - transition_grad, &one_seq_emission_grad); + BackwardOneSequence( + ctx.template device_context(), ll_grad[i], + one_seq_emission_exps, *transition_exps, one_seq_alpha, one_seq_label, + &one_seq_beta, transition_grad, &one_seq_emission_grad); } if (platform::is_gpu_place(ctx.GetPlace())) { @@ -441,8 +441,8 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { copyTensor(ctx, transition_grad_src, transition_grad_dst); } - void BackwardOneSequence(const platform::DeviceContext& ctx, const T ll_grad, - const Tensor& emission_exps, + void BackwardOneSequence(const platform::CPUDeviceContext& ctx, + const T ll_grad, const Tensor& emission_exps, const Tensor& transition_exps, const Tensor& alpha, const Tensor& label, Tensor* beta, Tensor* transition_grad, @@ -481,7 +481,7 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { auto alpha_mat = EigenMatrix::From(alpha); auto beta_mat = EigenMatrix::From(*beta); - auto* place = ctx.GetEigenDevice(); + auto* place = ctx.eigen_device(); auto prob = alpha_mat * beta_mat; auto row_sum = prob.sum(Eigen::DSizes(1)) .reshape(Eigen::DSizes(seq_length, 1)) diff --git a/paddle/operators/load_op.cc b/paddle/operators/load_op.cc index b0838eed1611c1d51e57fc2300606f753982dc89..4e58b84430f2a8697bbbc1acf971fd063120f563 100644 --- a/paddle/operators/load_op.cc +++ b/paddle/operators/load_op.cc @@ -38,61 +38,7 @@ class LoadOp : public framework::OperatorBase { out_var_name); auto *tensor = out_var->GetMutable(); - - uint32_t version; - fin.read(reinterpret_cast(&version), sizeof(version)); - PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported"); - framework::TensorDesc desc; - { // int32_t size - // proto buffer - int32_t size; - fin.read(reinterpret_cast(&size), sizeof(size)); - std::unique_ptr buf(new char[size]); - fin.read(reinterpret_cast(buf.get()), size); - PADDLE_ENFORCE(desc.ParseFromArray(buf.get(), size), - "Cannot parse tensor desc"); - } - { // read tensor - std::vector dims; - dims.reserve(static_cast(desc.dims().size())); - std::copy(desc.dims().begin(), desc.dims().end(), - std::back_inserter(dims)); - tensor->Resize(framework::make_ddim(dims)); - - void *buf; - platform::Place cpu = platform::CPUPlace(); - switch (desc.data_type()) { - case framework::FP32: - buf = tensor->mutable_data(cpu); - break; - case framework::FP64: - buf = tensor->mutable_data(cpu); - break; - case framework::INT32: - buf = tensor->mutable_data(cpu); - break; - case framework::INT64: - buf = tensor->mutable_data(cpu); - break; - default: - PADDLE_THROW("DataType %d not supported", desc.data_type()); - } - fin.read(static_cast(buf), tensor->memory_size()); - } - { // read lod - uint64_t lod_level; - fin.read(reinterpret_cast(&lod_level), sizeof(lod_level)); - auto &lod = *tensor->mutable_lod(); - lod.resize(lod_level); - for (uint64_t i = 0; i < lod_level; ++i) { - uint64_t size; - fin.read(reinterpret_cast(&size), sizeof(size)); - std::vector tmp(size / sizeof(size_t)); - fin.read(reinterpret_cast(tmp.data()), - static_cast(size)); - lod[i] = tmp; - } - } + framework::DeserializeFromStream(fin, tensor); auto place = dev_ctx.GetPlace(); if (platform::is_gpu_place(place)) { diff --git a/paddle/operators/lod_array_length_op.cc b/paddle/operators/lod_array_length_op.cc index 80445eb575703be3354595672a4c064b30e0f18c..b2f4ec57fadd2ba3dc8708abbfebaaeb67100f1e 100644 --- a/paddle/operators/lod_array_length_op.cc +++ b/paddle/operators/lod_array_length_op.cc @@ -43,12 +43,16 @@ class LoDArrayLengthProtoMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensorArray) The input tensor array."); AddOutput("Out", "(Tensor) 1x1 CPU Tensor of length, int64_t"); - AddComment(R"DOC(Get the length of lod tensor array + AddComment(R"DOC( +LoDArrayLength Operator. -Out = len(X) +This operator obtains the length of lod tensor array: + +$$Out = len(X)$$ NOTE: The output is a CPU Tensor since the control variable should be only in CPU and the length of LoDTensorArray should be used as control variables. + )DOC"); } }; diff --git a/paddle/operators/lod_reset_op.cu b/paddle/operators/lod_reset_op.cu index 5244a17c3aad01909e3b8cf5f4d5abf8a44edc7f..f7c235898096ffb3d6ba039cb3f01d5bc9ef5364 100644 --- a/paddle/operators/lod_reset_op.cu +++ b/paddle/operators/lod_reset_op.cu @@ -16,9 +16,10 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(lod_reset, - ops::LoDResetKernel, - ops::LoDResetKernel); -REGISTER_OP_GPU_KERNEL( - lod_reset_grad, ops::LoDResetGradKernel, - ops::LoDResetGradKernel); +REGISTER_OP_CUDA_KERNEL( + lod_reset, ops::LoDResetKernel, + ops::LoDResetKernel); +REGISTER_OP_CUDA_KERNEL( + lod_reset_grad, + ops::LoDResetGradKernel, + ops::LoDResetGradKernel); diff --git a/paddle/operators/lod_reset_op.h b/paddle/operators/lod_reset_op.h index cbcbf80adc3cf68f9eb28bbe2a69168cc8798347..b86f8b13135fa809ade3b001434eda5d88375c2c 100644 --- a/paddle/operators/lod_reset_op.h +++ b/paddle/operators/lod_reset_op.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { -template +template class LoDResetKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { @@ -65,7 +65,7 @@ class LoDResetKernel : public framework::OpKernel { } }; -template +template class LoDResetGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { diff --git a/paddle/operators/lod_tensor_to_array_op.cc b/paddle/operators/lod_tensor_to_array_op.cc index 010c79d4e153463d4b2e48e5fd798d3bc4febaf1..b970bf31773f4c6feb0010bd40ba906b388ec310 100644 --- a/paddle/operators/lod_tensor_to_array_op.cc +++ b/paddle/operators/lod_tensor_to_array_op.cc @@ -14,6 +14,7 @@ #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/op_registry.h" +#include "paddle/operators/detail/safe_ref.h" namespace paddle { namespace operators { @@ -32,15 +33,20 @@ class LoDTensorToArrayOp : public framework::OperatorBase { : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, const platform::DeviceContext &dev_ctx) const override { - auto &x = scope.FindVar(Input("X"))->Get(); - auto &rank_table = - scope.FindVar(Input("RankTable"))->Get(); - auto &out = - *scope.FindVar(Output("Out"))->GetMutable(); - + auto &x = detail::Ref(scope.FindVar(Input("X")), "Cannot find input %s", + Input("X")) + .Get(); + auto &rank_table = detail::Ref(scope.FindVar(Input("RankTable"))) + .Get(); + auto &out = *detail::Ref(scope.FindVar(Output("Out"))) + .GetMutable(); auto &items = rank_table.items(); auto max_seq_len = items[0].length; auto rank_level = rank_table.level(); + + PADDLE_ENFORCE_LT(rank_level, x.lod().size(), + "Input should be a LOD tensor, and size is at least %d", + rank_level + 1); out.resize(max_seq_len); std::vector> copy_ranges(max_seq_len); @@ -55,16 +61,13 @@ class LoDTensorToArrayOp : public framework::OperatorBase { size_t start_idx = x.lod()[rank_level][item.index] + t; auto lod_and_offset = framework::GetSubLoDAndAbsoluteOffset( x.lod(), start_idx, start_idx + 1, rank_level + 1); - auto &lod_length = lod_and_offset.first; framework::AppendLoD(&lod, lod_length); - size_t start_offset = lod_and_offset.second.first; size_t end_offset = lod_and_offset.second.second; copy_ranges[t].emplace_back(CopyRange{start_offset, end_offset}); } } - for (size_t i = 0; i < max_seq_len; ++i) { auto &ranges = copy_ranges[i]; size_t height = std::accumulate( diff --git a/paddle/operators/log_loss_op.cc b/paddle/operators/log_loss_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..4524229a330a0ceddca673e2b2a6d836a15a2e3f --- /dev/null +++ b/paddle/operators/log_loss_op.cc @@ -0,0 +1,116 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/log_loss_op.h" + +namespace paddle { +namespace operators { + +class LogLossOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Predicted"), + "Input(Predicted) must be initialized."); + PADDLE_ENFORCE(ctx->HasInput("Labels"), + "Input(Labels) must be initialized."); + + auto pred_dims = ctx->GetInputDim("Predicted"); + auto label_dims = ctx->GetInputDim("Labels"); + + PADDLE_ENFORCE_EQ(pred_dims, label_dims); + PADDLE_ENFORCE_EQ(pred_dims.size(), 2, + "The rank of Input(Predicted) must be 2 and the shape is " + "[batch_size, 1]."); + PADDLE_ENFORCE_EQ(pred_dims[1], 1, + "Each row of Input(Predicted) contains a real value, " + "so the 2nd dimension of Input(X) must be 1."); + + ctx->SetOutputDim("Loss", {pred_dims[0], 1}); + ctx->ShareLoD("Predicted", "Loss"); + } +}; + +template +class LogLossOpMaker : public framework::OpProtoAndCheckerMaker { + public: + LogLossOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Predicted", + "The input value (Predicted) of Log loss op." + "Predicted is a 2-D tensor with shape [batch_size, 1]."); + AddInput("Labels", + "The target value (Labels) of Log loss op." + "Labels is a 2-D tensor with shape [batch_size, 1]."); + AddOutput("Loss", + "The output tensor with shape [batch_size, 1] " + "which represents the log loss."); + AddAttr("epsilon", "Epsilon in log loss."); + AddComment(R"DOC( +LogLoss Operator. + +Log loss is a loss function used for binary classification. Log Loss quantifies +the accuracy of a classifier by penalising false classifications. Minimising the +Log Loss is equivalent to maximising the accuracy of the classifier. We define +Predicted as the values predicted by our model and Labels as the target ground +truth value. Log loss can evaluate how close the predicted values are to the +target. The shapes of Predicted and Labels are both [batch_size, 1]. +The equation is: + +$$ +Loss = - Labels * log(Predicted + \epsilon) - + (1 - Labels) * log(1 - Predicted + \epsilon) +$$ + +)DOC"); + } +}; + +class LogLossGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Predicted"), + "Input(Predicted) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Labels"), + "Input(Labels) should not be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")), + "Input(Loss@GRAD) should not be null."); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Predicted")), + "Output(Predicted@GRAD) should not be null."); + + auto pred_dims = ctx->GetInputDim("Predicted"); + auto label_dims = ctx->GetInputDim("Labels"); + auto loss_grad_dims = ctx->GetInputDim(framework::GradVarName("Loss")); + PADDLE_ENFORCE_EQ(loss_grad_dims, pred_dims); + + auto pred_grad_name = framework::GradVarName("Predicted"); + ctx->SetOutputDim(pred_grad_name, pred_dims); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(log_loss, ops::LogLossOp, ops::LogLossOpMaker, log_loss_grad, + ops::LogLossGradOp); +REGISTER_OP_CPU_KERNEL( + log_loss, ops::LogLossKernel); +REGISTER_OP_CPU_KERNEL( + log_loss_grad, + ops::LogLossGradKernel); diff --git a/paddle/operators/log_loss_op.cu b/paddle/operators/log_loss_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..e87ac7d12a2b730085b4e9a33457612c4eba2655 --- /dev/null +++ b/paddle/operators/log_loss_op.cu @@ -0,0 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/log_loss_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + log_loss, ops::LogLossKernel); +REGISTER_OP_CUDA_KERNEL( + log_loss_grad, + ops::LogLossGradKernel); diff --git a/paddle/operators/log_loss_op.h b/paddle/operators/log_loss_op.h new file mode 100644 index 0000000000000000000000000000000000000000..743eddb74004b5e87ed9b8a6ccb1b8496b8548dc --- /dev/null +++ b/paddle/operators/log_loss_op.h @@ -0,0 +1,75 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; + +template +class LogLossKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* loss_out = ctx.Output("Loss"); + + loss_out->mutable_data(ctx.GetPlace()); + + auto epsilon = static_cast(ctx.Attr("epsilon")); + + auto prediction = EigenVector::Flatten(*ctx.Input("Predicted")); + auto label = EigenVector::Flatten(*ctx.Input("Labels")); + + auto loss = EigenVector::Flatten(*loss_out); + auto& place = *ctx.template device_context().eigen_device(); + + loss.device(place) = (-(label * (prediction + epsilon).log()) - + ((static_cast(1) - label) * + (static_cast(1) - prediction + epsilon).log())); + } +}; + +template +class LogLossGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto epsilon = static_cast(ctx.Attr("epsilon")); + + auto prediction = EigenVector::Flatten(*ctx.Input("Predicted")); + auto label = EigenVector::Flatten(*ctx.Input("Labels")); + + auto* dloss = ctx.Input(framework::GradVarName("Loss")); + auto* dpred = ctx.Output(framework::GradVarName("Predicted")); + + auto dl = EigenVector::Flatten(*dloss); + auto& place = *ctx.template device_context().eigen_device(); + + if (dpred) { + dpred->mutable_data(ctx.GetPlace()); + auto dx = framework::EigenVector::Flatten(*dpred); + dx.device(place) = dl * (-(label / (prediction + epsilon)) + + ((static_cast(1) - label) / + (static_cast(1) - prediction + epsilon))); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/logical_op.cc b/paddle/operators/logical_op.cc index a37582c1d840ac11f847d8743c824ef1aef0fd66..c818d5e9c19abab15ebdc2b3485e03ab66cf649d 100644 --- a/paddle/operators/logical_op.cc +++ b/paddle/operators/logical_op.cc @@ -139,15 +139,16 @@ class LogicalOp : public framework::OperatorWithKernel { ::paddle::operators::UnaryLogicalOpInferShape<_##op_type##Comment>, \ ::paddle::framework::EmptyGradOpMaker); -REGISTER_BINARY_LOGICAL_OP(logical_and, "Out = X && Y"); +REGISTER_BINARY_LOGICAL_OP(logical_and, "$$Out = X \\&\\& Y$$"); REGISTER_BINARY_LOGICAL_KERNEL(logical_and, CPU, paddle::operators::LogicalAndFunctor); -REGISTER_BINARY_LOGICAL_OP(logical_or, "Out = X && Y"); +REGISTER_BINARY_LOGICAL_OP(logical_or, "$$Out = X || Y$$"); REGISTER_BINARY_LOGICAL_KERNEL(logical_or, CPU, paddle::operators::LogicalOrFunctor); -REGISTER_UNARY_LOGICAL_OP(logical_not, "Out = !X"); +REGISTER_UNARY_LOGICAL_OP(logical_not, "$$Out = !X$$"); REGISTER_UNARY_LOGICAL_KERNEL(logical_not, CPU, paddle::operators::LogicalNotFunctor); -REGISTER_BINARY_LOGICAL_OP(logical_xor, "Out = (X || Y) && !(X && Y)"); +REGISTER_BINARY_LOGICAL_OP(logical_xor, + "$$Out = (X || Y) \\, \\&\\& \\, !(X \\&\\& Y)$$"); REGISTER_BINARY_LOGICAL_KERNEL(logical_xor, CPU, paddle::operators::LogicalXorFunctor); diff --git a/paddle/operators/logical_op.cu b/paddle/operators/logical_op.cu index d41239b2ca43e7145ea56afcb0af69948838cc48..7fef60e0c9e957f28118e54d23c6043752d2f52f 100644 --- a/paddle/operators/logical_op.cu +++ b/paddle/operators/logical_op.cu @@ -14,11 +14,11 @@ #include "paddle/operators/logical_op.h" -REGISTER_BINARY_LOGICAL_KERNEL(logical_and, GPU, +REGISTER_BINARY_LOGICAL_KERNEL(logical_and, CUDA, paddle::operators::LogicalAndFunctor); -REGISTER_BINARY_LOGICAL_KERNEL(logical_or, GPU, +REGISTER_BINARY_LOGICAL_KERNEL(logical_or, CUDA, paddle::operators::LogicalOrFunctor); -REGISTER_UNARY_LOGICAL_KERNEL(logical_not, GPU, +REGISTER_UNARY_LOGICAL_KERNEL(logical_not, CUDA, paddle::operators::LogicalNotFunctor); -REGISTER_BINARY_LOGICAL_KERNEL(logical_xor, GPU, +REGISTER_BINARY_LOGICAL_KERNEL(logical_xor, CUDA, paddle::operators::LogicalXorFunctor); diff --git a/paddle/operators/logical_op.h b/paddle/operators/logical_op.h index 6e78a7d6ed87ba950886e6bc667f82118ff78904..629388cac81e60c8b84197238018384ffc59a08f 100644 --- a/paddle/operators/logical_op.h +++ b/paddle/operators/logical_op.h @@ -47,7 +47,7 @@ struct LogicalXorFunctor { } }; -template +template class BinaryLogicalOpKernel : public framework::OpKernel { public: @@ -57,14 +57,14 @@ class BinaryLogicalOpKernel auto* y = context.Input("Y"); auto* out = context.Output("Out"); Functor binary_func; - platform::Transform trans; - trans(context.device_context(), x->data(), x->data() + x->numel(), - y->data(), out->mutable_data(context.GetPlace()), - binary_func); + platform::Transform trans; + trans(context.template device_context(), x->data(), + x->data() + x->numel(), y->data(), + out->mutable_data(context.GetPlace()), binary_func); } }; -template +template class UnaryLogicalOpKernel : public framework::OpKernel { public: @@ -73,8 +73,9 @@ class UnaryLogicalOpKernel auto* x = context.Input("X"); auto* out = context.Output("Out"); Functor unary_func; - platform::Transform trans; - trans(context.device_context(), x->data(), x->data() + x->numel(), + platform::Transform trans; + trans(context.template device_context(), x->data(), + x->data() + x->numel(), out->mutable_data(context.GetPlace()), unary_func); } }; @@ -85,9 +86,9 @@ class UnaryLogicalOpKernel #define REGISTER_BINARY_LOGICAL_KERNEL(op_type, dev, functor) \ REGISTER_OP_##dev##_KERNEL( \ op_type, ::paddle::operators::BinaryLogicalOpKernel< \ - ::paddle::platform::dev##Place, functor>); + ::paddle::platform::dev##DeviceContext, functor>); #define REGISTER_UNARY_LOGICAL_KERNEL(op_type, dev, functor) \ REGISTER_OP_##dev##_KERNEL( \ op_type, ::paddle::operators::UnaryLogicalOpKernel< \ - ::paddle::platform::dev##Place, functor>); + ::paddle::platform::dev##DeviceContext, functor>); diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index 84b044184a36a0d3a72a4105d6baf401b4774cf7..9431030a53975acafe9bcb22dc9164492929b07a 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -85,6 +85,8 @@ template class LookupTableGradCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { + auto& dev_ctx = + context.template device_context(); bool is_sparse = context.Attr("is_sparse"); if (is_sparse) { auto* ids = context.Input("Ids"); @@ -95,7 +97,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { auto* ids_data = ids->data(); auto ids_dim = ids->dims(); - auto stream = context.cuda_device_context().stream(); + auto stream = dev_ctx.stream(); // copy GPU memory to CPU pinned memory framework::Vector new_rows; new_rows.resize(ids_dim[0]); @@ -129,14 +131,11 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { T* d_table = d_table_t->mutable_data(context.GetPlace()); auto t = framework::EigenVector::Flatten(*d_table_t); - t.device(context.GetEigenDevice()) = - t.constant(static_cast(0)); + t.device(*dev_ctx.eigen_device()) = t.constant(static_cast(0)); dim3 threads(128, 8); dim3 grids(8, 1); - LookupTableGrad< - T, 128, 8, - 8><<>>( + LookupTableGrad<<>>( d_table, d_output, ids, N, K, D); } } @@ -146,7 +145,8 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(lookup_table, ops::LookupTableCUDAKernel, - ops::LookupTableCUDAKernel); -REGISTER_OP_GPU_KERNEL(lookup_table_grad, ops::LookupTableGradCUDAKernel, - ops::LookupTableGradCUDAKernel); +REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel, + ops::LookupTableCUDAKernel); +REGISTER_OP_CUDA_KERNEL(lookup_table_grad, + ops::LookupTableGradCUDAKernel, + ops::LookupTableGradCUDAKernel); diff --git a/paddle/operators/lrn_op.cc b/paddle/operators/lrn_op.cc index 00392b7967d020a7951a16a7850a2f08735baeb8..b5b7bc940a85ac2bbb6c6b303284777df714b7d6 100644 --- a/paddle/operators/lrn_op.cc +++ b/paddle/operators/lrn_op.cc @@ -19,6 +19,103 @@ namespace operators { using framework::Tensor; +template +struct LRNFunctor { + void operator()(const framework::ExecutionContext& ctx, + const framework::Tensor& input, framework::Tensor* out, + framework::Tensor* mid, int N, int C, int H, int W, int n, + T k, T alpha, T beta) { + auto x_v = framework::EigenVector::Flatten(input); + + const int start = -(n - 1) / 2; + const int end = start + n; + + auto e_mid = framework::EigenTensor::From(*mid); + e_mid = e_mid.constant(k); + + auto e_x = framework::EigenTensor::From(input); + for (int m = 0; m < N; m++) { + for (int i = 0; i < C; i++) { + for (int c = start; c <= end; c++) { + int ch = i + c; + if (ch >= 0 && ch < C) { + auto s = e_mid.slice(Eigen::array({{m, i, 0, 0}}), + Eigen::array({{1, 1, H, W}})); + + auto r = e_x.slice(Eigen::array({{m, ch, 0, 0}}), + Eigen::array({{1, 1, H, W}})); + + s += alpha * r.square(); + } + } + } + } + + auto out_e = framework::EigenVector::Flatten(*out); + out_e = x_v * e_mid.reshape(Eigen::DSizes(e_mid.size())).pow(-beta); + } +}; +template struct LRNFunctor; +template struct LRNFunctor; + +template +struct LRNGradFunctor { + void operator()(const framework::ExecutionContext& ctx, + const framework::Tensor& x, const framework::Tensor& out, + const framework::Tensor& mid, framework::Tensor* x_g, + const framework::Tensor& out_g, int N, int C, int H, int W, + int n, T alpha, T beta) { + T ratio = -2 * alpha * beta; + auto x_g_e = framework::EigenVector::Flatten(*x_g); + x_g_e = x_g_e.constant(0.0); + + auto e_x = framework::EigenTensor::From(x); + auto e_x_g = framework::EigenTensor::From(*x_g); + auto e_out = framework::EigenTensor::From(out); + auto e_out_g = framework::EigenTensor::From(out_g); + auto e_mid = framework::EigenTensor::From(mid); + + const int start = -(n - 1) / 2; + const int end = start + n; + for (int m = 0; m < N; m++) { + for (int i = 0; i < C; i++) { + auto i_x = e_x.slice(Eigen::array({{m, i, 0, 0}}), + Eigen::array({{1, 1, H, W}})); + + auto i_x_g = e_x_g.slice(Eigen::array({{m, i, 0, 0}}), + Eigen::array({{1, 1, H, W}})); + + auto i_out_g = e_out_g.slice(Eigen::array({{m, i, 0, 0}}), + Eigen::array({{1, 1, H, W}})); + + auto i_mid = e_mid.slice(Eigen::array({{m, i, 0, 0}}), + Eigen::array({{1, 1, H, W}})); + + i_x_g = i_mid.pow(-beta) * i_out_g; + for (int c = start; c <= end; c++) { + int ch = i + c; + if (ch < 0 || ch >= C) { + continue; + } + + auto c_out = e_out.slice(Eigen::array({{m, ch, 0, 0}}), + Eigen::array({{1, 1, H, W}})); + + auto c_mid = e_mid.slice(Eigen::array({{m, ch, 0, 0}}), + Eigen::array({{1, 1, H, W}})); + + auto c_out_g = e_out_g.slice(Eigen::array({{m, ch, 0, 0}}), + Eigen::array({{1, 1, H, W}})); + + i_x_g += ratio * c_out_g * c_out * i_x / c_mid; + } + } + } + } +}; +template struct LRNGradFunctor; +template struct LRNGradFunctor; + class LRNOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -83,8 +180,8 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Local Response Normalization Operator. -This operator comes from the paper -"ImageNet Classification with Deep Convolutional Neural Networks". +This operator comes from the paper: +<>. The original formula is: @@ -107,7 +204,7 @@ Input(i, x, y), Output(i, x, y) represents an element in an image. C is the number of feature maps of one image. n is a hyper-parameter configured when operator is initialized. The sum in the denominator is the sum of the same positions in the neighboring maps. - + )DOC"); } }; @@ -119,8 +216,7 @@ class LRNOpGrad : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("MidOut")), - "Input(MidOut@GRAD) should not be null"); + PADDLE_ENFORCE(ctx->HasInput("MidOut"), "Input(MidOut) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); @@ -134,6 +230,7 @@ class LRNOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(lrn, ops::LRNOp, ops::LRNOpMaker, lrn_grad, ops::LRNOpGrad); -REGISTER_OP_CPU_KERNEL(lrn, ops::LRNKernel); -REGISTER_OP_CPU_KERNEL(lrn_grad, - ops::LRNGradKernel); +REGISTER_OP_CPU_KERNEL( + lrn, ops::LRNKernel); +REGISTER_OP_CPU_KERNEL( + lrn_grad, ops::LRNGradKernel); diff --git a/paddle/operators/lrn_op.cu b/paddle/operators/lrn_op.cu index 607dc6d86a72b0a0c953f52782955dc530b7478c..c6857c2b6d0a9011ef83d115e6edd81bf2f8a0ca 100644 --- a/paddle/operators/lrn_op.cu +++ b/paddle/operators/lrn_op.cu @@ -12,11 +12,167 @@ See the License for the specific language governing permissions and limitations under the License. */ -#define EIGEN_USE_GPU #include "paddle/operators/lrn_op.h" -namespace ops = paddle::operators; +namespace paddle { +namespace operators { + +template +__global__ void KeCMRNormFillScale(int img_size, const T* in, T* mid, int C, + int H, int W, int size, T k, T alpha) { + const int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx < img_size) { + const int w = idx % W; + const int h = (idx / W) % H; + const int n = idx / W / H; + const int offset = (n * C * H + h) * W + w; + + in += offset; + mid += offset; + const int step = H * W; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; + + T accum = 0; + int index = 0; + while (index < C + post_pad) { + if (index < C) { + T val = in[index * step]; + accum += val * val; + } + if (index >= size) { + T val = in[(index - size) * step]; + accum -= val * val; + } + if (index >= post_pad) { + mid[(index - post_pad) * step] = k + accum * alpha; + } + ++index; + } + } +} + +template +__global__ void KeCMRNormOutput(int input_size, const T* in, const T* mid, + T negative_beta, T* out) { + const int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index < input_size) { + out[index] = in[index] * pow(mid[index], negative_beta); + } +} + +template +void CrossMapNormal(const framework::ExecutionContext& ctx, const T* inputs, + T* outputs, T* mid, int N, int C, int H, int W, int n, T k, + T alpha, T beta) { + int img_size = N * H * W; + const int block_size = 1024; + int grid_size = (img_size + block_size - 1) / block_size; + + auto& dev_ctx = ctx.template device_context(); + KeCMRNormFillScale<<>>( + img_size, inputs, mid, C, H, W, n, k, alpha); + + int input_size = N * H * W * C; + grid_size = (input_size + block_size - 1) / block_size; + KeCMRNormOutput<<>>( + input_size, inputs, mid, -beta, outputs); +} + +template +struct LRNFunctor { + void operator()(const framework::ExecutionContext& ctx, + const framework::Tensor& input, framework::Tensor* out, + framework::Tensor* mid, int N, int C, int H, int W, int n, + T k, T alpha, T beta) { + CrossMapNormal( + ctx, input.data(), out->mutable_data(ctx.GetPlace()), + mid->mutable_data(ctx.GetPlace()), N, C, H, W, n, k, alpha, beta); + } +}; + +template struct LRNFunctor; +template struct LRNFunctor; -REGISTER_OP_GPU_KERNEL(lrn, ops::LRNKernel); -REGISTER_OP_GPU_KERNEL(lrn_grad, - ops::LRNGradKernel); +template +__global__ void KeCMRNormDiff(int img_size, const T* x, const T* out, + const T* mid, T* x_g, const T* out_g, int C, + int H, int W, int size, T negative_beta, + T ratio) { + const int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx < img_size) { + const int w = idx % W; + const int h = (idx / W) % H; + const int n = idx / W / H; + const int offset = (n * C * H + h) * W + w; + x += offset; + out += offset; + mid += offset; + out_g += offset; + x_g += offset; + + const int step = H * W; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; + + int index = 0; + T accum = 0; + // TODO(gongwb): optimize this with thread shared array. + while (index < C + post_pad) { + if (index < C) { + x_g[index * step] = 0.0; + accum += out_g[index * step] * out[index * step] / mid[index * step]; + } + if (index >= size) { + accum -= out_g[(index - size) * step] * out[(index - size) * step] / + mid[(index - size) * step]; + } + if (index >= post_pad) { + x_g[(index - post_pad) * step] += + out_g[(index - post_pad) * step] * + pow(mid[(index - post_pad) * step], negative_beta) - + ratio * x[(index - post_pad) * step] * accum; + } + ++index; + } + } +} + +template +void CrossMapNormalGrad(const framework::ExecutionContext& ctx, const T* x, + const T* out, const T* mid, T* x_g, const T* out_g, + int N, int C, int H, int W, int n, T alpha, T beta) { + int img_size = N * H * W; + + const int block_size = 1024; + int grid_size = (img_size + block_size - 1) / block_size; + + auto& dev_ctx = ctx.template device_context(); + KeCMRNormDiff<<>>( + img_size, x, out, mid, x_g, out_g, C, H, W, n, -beta, + 2.0f * alpha * beta); +} + +template +struct LRNGradFunctor { + void operator()(const framework::ExecutionContext& ctx, + const framework::Tensor& x, const framework::Tensor& out, + const framework::Tensor& mid, framework::Tensor* x_g, + const framework::Tensor& out_g, int N, int C, int H, int W, + int n, T alpha, T beta) { + CrossMapNormalGrad(ctx, x.data(), out.data(), mid.data(), + x_g->mutable_data(ctx.GetPlace()), out_g.data(), + N, C, H, W, n, alpha, beta); + } +}; + +template struct LRNGradFunctor; +template struct LRNGradFunctor; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + lrn, ops::LRNKernel); +REGISTER_OP_CUDA_KERNEL( + lrn_grad, ops::LRNGradKernel); diff --git a/paddle/operators/lrn_op.h b/paddle/operators/lrn_op.h index 606c65744303b53846c9077dfa832bdbeedb410e..44063d3e036809eb236bbe7c46aa0cce06b46df0 100644 --- a/paddle/operators/lrn_op.h +++ b/paddle/operators/lrn_op.h @@ -21,7 +21,15 @@ namespace paddle { namespace operators { -template +template +struct LRNFunctor { + void operator()(const framework::ExecutionContext& ctx, + const framework::Tensor& input, framework::Tensor* out, + framework::Tensor* mid, int N, int C, int H, int W, int n, + T k, T alpha, T beta); +}; + +template class LRNKernel : public framework::OpKernel { public: using Tensor = framework::Tensor; @@ -31,8 +39,8 @@ class LRNKernel : public framework::OpKernel { // f(x) represents outputs void Compute(const framework::ExecutionContext& ctx) const override { // input - const Tensor* x = ctx.Input("X"); - auto x_dims = x->dims(); + const Tensor& x = *ctx.Input("X"); + auto x_dims = x.dims(); // NCHW int N = x_dims[0]; @@ -57,38 +65,20 @@ class LRNKernel : public framework::OpKernel { PADDLE_ENFORCE(beta >= 0.0, "beta should >= 0.0"); PADDLE_ENFORCE(k >= 0.0, "k should >= 0.0"); - auto x_v = framework::EigenVector::Flatten(*x); - - const int start = -(n - 1) / 2; - const int end = start + n; - - auto e_mid = framework::EigenTensor::From(*mid); - e_mid.device(ctx.GetEigenDevice()) = e_mid.constant(k); - - auto e_x = framework::EigenTensor::From(*x); - for (int m = 0; m < N; m++) { - for (int i = 0; i < C; i++) { - for (int c = start; c <= end; c++) { - int ch = i + c; - if (ch >= 0 && ch < C) { - auto s = e_mid.slice(Eigen::array({{m, i, 0, 0}}), - Eigen::array({{1, 1, H, W}})); - - auto r = e_x.slice(Eigen::array({{m, ch, 0, 0}}), - Eigen::array({{1, 1, H, W}})); - - s.device(ctx.GetEigenDevice()) += alpha * r.square(); - } - } - } - } - - auto out_e = framework::EigenVector::Flatten(*out); - out_e.device(ctx.GetEigenDevice()) = - x_v * e_mid.reshape(Eigen::DSizes(e_mid.size())).pow(-beta); + LRNFunctor f; + f(ctx, x, out, mid, N, C, H, W, n, k, alpha, beta); } }; +template +struct LRNGradFunctor { + void operator()(const framework::ExecutionContext& ctx, + const framework::Tensor& x, const framework::Tensor& out, + const framework::Tensor& mid, framework::Tensor* x_g, + const framework::Tensor& out_g, int N, int C, int H, int W, + int n, T alpha, T beta); +}; + /** * \brief Backward calculation for normalization with across maps. * @@ -97,7 +87,7 @@ class LRNKernel : public framework::OpKernel { * The implementation of this Function is derived from the * CrossMapNormalFunc implementation. * - * InputGrad = OutputGrad * denoms ^ (-beta) + * InputGrad = OutputGrad * MidOut ^ (-beta) * -- upper * + > (OutputGrad * OutputValue * (-2 * alpha * beta) / MidOut) * InputValue * -- lower @@ -108,23 +98,20 @@ class LRNKernel : public framework::OpKernel { * The upper and lower is the same as forward. The logic of the sum * is also the same as forward. */ -template +template class LRNGradKernel : public framework::OpKernel { public: using Tensor = framework::Tensor; void Compute(const framework::ExecutionContext& ctx) const override { - const Tensor* x = ctx.Input("X"); - const Tensor* out = ctx.Input("Out"); - const Tensor* out_g = ctx.Input(framework::GradVarName("Out")); - const Tensor* mid = ctx.Input("MidOut"); + const Tensor& x = *ctx.Input("X"); + const Tensor& out = *ctx.Input("Out"); + const Tensor& out_g = *ctx.Input(framework::GradVarName("Out")); + const Tensor& mid = *ctx.Input("MidOut"); auto x_g = ctx.Output(framework::GradVarName("X")); x_g->mutable_data(ctx.GetPlace()); - auto x_g_e = framework::EigenVector::Flatten(*x_g); - x_g_e.device(ctx.GetEigenDevice()) = x_g_e.constant(0.0); - - auto x_dims = x->dims(); + auto x_dims = x.dims(); int N = x_dims[0]; int C = x_dims[1]; int H = x_dims[2]; @@ -133,51 +120,9 @@ class LRNGradKernel : public framework::OpKernel { int n = ctx.Attr("n"); T alpha = ctx.Attr("alpha"); T beta = ctx.Attr("beta"); - T ratio = -2 * alpha * beta; - - auto e_x = framework::EigenTensor::From(*x); - auto e_x_g = framework::EigenTensor::From(*x_g); - auto e_out = framework::EigenTensor::From(*out); - auto e_out_g = framework::EigenTensor::From(*out_g); - auto e_mid = framework::EigenTensor::From(*mid); - - const int start = -(n - 1) / 2; - const int end = start + n; - for (int m = 0; m < N; m++) { - for (int i = 0; i < C; i++) { - auto i_x = e_x.slice(Eigen::array({{m, i, 0, 0}}), - Eigen::array({{1, 1, H, W}})); - - auto i_x_g = e_x_g.slice(Eigen::array({{m, i, 0, 0}}), - Eigen::array({{1, 1, H, W}})); - - auto i_out_g = e_out_g.slice(Eigen::array({{m, i, 0, 0}}), - Eigen::array({{1, 1, H, W}})); - - auto i_mid = e_mid.slice(Eigen::array({{m, i, 0, 0}}), - Eigen::array({{1, 1, H, W}})); - - i_x_g.device(ctx.GetEigenDevice()) = i_mid.pow(-beta) * i_out_g; - for (int c = start; c <= end; c++) { - int ch = i + c; - if (ch < 0 || ch >= C) { - continue; - } - - auto c_out = e_out.slice(Eigen::array({{m, ch, 0, 0}}), - Eigen::array({{1, 1, H, W}})); - - auto c_mid = e_mid.slice(Eigen::array({{m, ch, 0, 0}}), - Eigen::array({{1, 1, H, W}})); - - auto c_out_g = e_out_g.slice(Eigen::array({{m, ch, 0, 0}}), - Eigen::array({{1, 1, H, W}})); - - i_x_g.device(ctx.GetEigenDevice()) += - ratio * c_out_g * c_out * i_x / c_mid; - } - } - } + + LRNGradFunctor f; + f(ctx, x, out, mid, x_g, out_g, N, C, H, W, n, alpha, beta); } }; diff --git a/paddle/operators/lstm_op.cc b/paddle/operators/lstm_op.cc index 4cbb60f3fdab968e8c36d4fbad55fd3efc7b1d0d..2db7da30db416e03cf473c8e65b023d9265e9193 100644 --- a/paddle/operators/lstm_op.cc +++ b/paddle/operators/lstm_op.cc @@ -181,7 +181,7 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Long-Short Term Memory (LSTM) Operator. -The defalut implementation is diagonal/peephole connection +The defalut implementation is diagonal/peephole connection (https://arxiv.org/pdf/1402.1128.pdf), the formula is as follows: $$ @@ -198,27 +198,27 @@ c_t = f_t \odot c_{t-1} + i_t \odot \tilde{c_t} \\ h_t = o_t \odot act_h(c_t) $$ -where the W terms denote weight matrices (e.g. \f$W_{xi}\f$ is the matrix -of weights from the input gate to the input), \f$W_{ic}, W_{fc}, W_{oc}\f$ +where the W terms denote weight matrices (e.g. $W_{xi}$ is the matrix +of weights from the input gate to the input), $W_{ic}, W_{fc}, W_{oc}$ are diagonal weight matrices for peephole connections. In our implementation, we use vectors to reprenset these diagonal weight matrices. The b terms -denote bias vectors (\f$b_i\f$ is the input gate bias vector), \f$\sigma\f$ +denote bias vectors ($b_i$ is the input gate bias vector), $\sigma$ is the non-line activations, such as logistic sigmoid function, and -\f$i, f, o\f$ and \f$c\f$ are the input gate, forget gate, output gate, +$i, f, o$ and $c$ are the input gate, forget gate, output gate, and cell activation vectors, respectively, all of which have the same size as -the cell output activation vector \f$h\f$. +the cell output activation vector $h$. -The \f$\odot\f$ is the element-wise product of the vectors. \f$act_g\f$ and \f$act_h\f$ +The $\odot$ is the element-wise product of the vectors. $act_g$ and $act_h$ are the cell input and cell output activation functions and `tanh` is usually -used for them. \f$\tilde{c_t}\f$ is also called candidate hidden state, +used for them. $\tilde{c_t}$ is also called candidate hidden state, which is computed based on the current input and the previous hidden state. -Set `use_peepholes` False to disable peephole connection -(http://www.bioinf.jku.at/publications/older/2604.pdf). The formula -is omitted here. +Set `use_peepholes` False to disable peephole connection. The formula +is omitted here, please refer to the paper +http://www.bioinf.jku.at/publications/older/2604.pdf for details. -Note that these \f$W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}\f$ -operations on the input \f$x_{t}\f$ are NOT included in this operator. +Note that these $W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}$ +operations on the input $x_{t}$ are NOT included in this operator. Users can choose to use fully-connect operator before LSTM operator. )DOC"); @@ -273,8 +273,9 @@ class LSTMGradOp : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(lstm, ops::LSTMOp, ops::LSTMOpMaker, lstm_grad, ops::LSTMGradOp); -REGISTER_OP_CPU_KERNEL(lstm, ops::LSTMKernel, - ops::LSTMKernel); -REGISTER_OP_CPU_KERNEL(lstm_grad, - ops::LSTMGradKernel, - ops::LSTMGradKernel); +REGISTER_OP_CPU_KERNEL( + lstm, ops::LSTMKernel, + ops::LSTMKernel); +REGISTER_OP_CPU_KERNEL( + lstm_grad, ops::LSTMGradKernel, + ops::LSTMGradKernel); diff --git a/paddle/operators/lstm_op.cu.cc b/paddle/operators/lstm_op.cu.cc index 610cbb03e890203407b1489800bc17f1a196d12c..48519bed6f7d927b40d02683a7e9f2acfb8b85e5 100644 --- a/paddle/operators/lstm_op.cu.cc +++ b/paddle/operators/lstm_op.cu.cc @@ -15,8 +15,9 @@ #include "paddle/operators/lstm_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(lstm, ops::LSTMKernel, - ops::LSTMKernel); -REGISTER_OP_GPU_KERNEL(lstm_grad, - ops::LSTMGradKernel, - ops::LSTMGradKernel); +REGISTER_OP_CUDA_KERNEL( + lstm, ops::LSTMKernel, + ops::LSTMKernel); +REGISTER_OP_CUDA_KERNEL( + lstm_grad, ops::LSTMGradKernel, + ops::LSTMGradKernel); diff --git a/paddle/operators/lstm_op.h b/paddle/operators/lstm_op.h index 721aa42c92f2926aabbc13d0a9027b2b4e573225..14abd4bf0a6e73a9c0f000f53a5e1e380f01d1c0 100644 --- a/paddle/operators/lstm_op.h +++ b/paddle/operators/lstm_op.h @@ -24,16 +24,16 @@ namespace operators { using LoDTensor = framework::LoDTensor; using Tensor = framework::Tensor; -template -inline void ReorderInitState(const platform::DeviceContext& ctx, +template +inline void ReorderInitState(const DeviceContext& ctx, const framework::Tensor& src, const size_t* index, framework::Tensor* dst, bool indexed_src) { - math::CopyMatrixRowsFunctor row_shuffle; + math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); row_shuffle(ctx, src, index, *dst, indexed_src); } -template +template class LSTMKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -52,8 +52,8 @@ class LSTMKernel : public framework::OpKernel { cell_out->mutable_data(ctx.GetPlace()); bool is_reverse = ctx.Attr("is_reverse"); - math::LoDTensor2BatchFunctor to_batch; - auto& device_ctx = ctx.device_context(); + math::LoDTensor2BatchFunctor to_batch; + auto& device_ctx = ctx.template device_context(); to_batch(device_ctx, *input, *batch_gate, true, is_reverse); auto in_dims = input->dims(); @@ -64,7 +64,7 @@ class LSTMKernel : public framework::OpKernel { Tensor b = *bias; b.Resize({bias->numel(), 1}); Tensor gate_bias = b.Slice(0, 4 * frame_size); - math::RowwiseAdd add_bias; + math::RowwiseAdd add_bias; add_bias(device_ctx, *batch_gate, gate_bias, batch_gate); } @@ -73,24 +73,24 @@ class LSTMKernel : public framework::OpKernel { T* bias_data = const_cast(bias->data()); // the code style in LstmMetaValue will be updated later. - lstm_value.checkIg = bias_data + 4 * frame_size; - lstm_value.checkFg = lstm_value.checkIg + frame_size; - lstm_value.checkOg = lstm_value.checkFg + frame_size; + lstm_value.check_ig = bias_data + 4 * frame_size; + lstm_value.check_fg = lstm_value.check_ig + frame_size; + lstm_value.check_og = lstm_value.check_fg + frame_size; } else { - lstm_value.checkIg = nullptr; - lstm_value.checkFg = nullptr; - lstm_value.checkOg = nullptr; + lstm_value.check_ig = nullptr; + lstm_value.check_fg = nullptr; + lstm_value.check_og = nullptr; } - lstm_value.prevStateValue = nullptr; + lstm_value.prev_state_value = nullptr; Tensor ordered_c0; const size_t* order = batch_gate->lod()[2].data(); if (cell_t0) { // Since the batch computing for LSTM reorders the input sequence // according to their length. The initialized cell state also needs // to reorder. - ReorderInitState(device_ctx, *cell_t0, order, &ordered_c0, - true); - lstm_value.prevStateValue = ordered_c0.data(); + ReorderInitState(device_ctx, *cell_t0, order, + &ordered_c0, true); + lstm_value.prev_state_value = ordered_c0.data(); } // Use the local variable as here. @@ -121,9 +121,9 @@ class LSTMKernel : public framework::OpKernel { int pre_h_start = static_cast(batch_starts[n - 1]); int pre_h_end = pre_h_start + cur_batch_size; auto pre_hidden_t = batch_hidden.Slice(pre_h_start, pre_h_end); - math::matmul(device_ctx, pre_hidden_t, false, *weight, false, - static_cast(1.0), &gate_t, - static_cast(1.0)); + math::matmul(device_ctx, pre_hidden_t, false, *weight, + false, static_cast(1.0), &gate_t, + static_cast(1.0)); } else if (hidden_t0) { // If n == 0 and there is no initialized hidden state, that is to say // the H0 is zeros, the calculation W_h * H0 will be skiped. @@ -133,24 +133,24 @@ class LSTMKernel : public framework::OpKernel { // according to their length. The initialized hidden state also needs // to reorder. Tensor ordered_h0; - ReorderInitState(device_ctx, *hidden_t0, order, &ordered_h0, - true); - math::matmul(device_ctx, ordered_h0, false, *weight, false, - static_cast(1.0), &gate_t, - static_cast(1.0)); + ReorderInitState(device_ctx, *hidden_t0, order, + &ordered_h0, true); + math::matmul(device_ctx, ordered_h0, false, *weight, + false, static_cast(1.0), &gate_t, + static_cast(1.0)); } - lstm_value.gateValue = gate_t.data(); - lstm_value.outputValue = out_t.data(); - lstm_value.stateValue = cell_t.data(); - lstm_value.stateActiveValue = cell_pre_act_t.data(); - math::LstmUnitFunctor::compute(device_ctx, lstm_value, - frame_size, cur_batch_size, - gate_act, cell_act, cand_act); - lstm_value.prevStateValue = lstm_value.stateValue; + lstm_value.gate_value = gate_t.data(); + lstm_value.output_value = out_t.data(); + lstm_value.state_value = cell_t.data(); + lstm_value.state_active_value = cell_pre_act_t.data(); + math::LstmUnitFunctor::compute( + device_ctx, lstm_value, frame_size, cur_batch_size, gate_act, + cell_act, cand_act); + lstm_value.prev_state_value = lstm_value.state_value; } - math::Batch2LoDTensorFunctor to_seq; + math::Batch2LoDTensorFunctor to_seq; batch_hidden.set_lod(batch_gate->lod()); // restore the output hidden in LoDTensor from the batch hidden to_seq(device_ctx, batch_hidden, *hidden_out); @@ -161,7 +161,7 @@ class LSTMKernel : public framework::OpKernel { } }; -template +template class LSTMGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -187,8 +187,8 @@ class LSTMGradKernel : public framework::OpKernel { auto* h0_g = ctx.Output(framework::GradVarName("H0")); auto* c0_g = ctx.Output(framework::GradVarName("C0")); - auto& device_ctx = ctx.device_context(); - math::SetConstant zero; + auto& device_ctx = ctx.template device_context(); + math::SetConstant zero; if (weight_g) { weight_g->mutable_data(ctx.GetPlace()); zero(device_ctx, weight_g, static_cast(0.0)); @@ -200,7 +200,8 @@ class LSTMGradKernel : public framework::OpKernel { Tensor ordered_h0, ordered_c0, ordered_h0_g, ordered_c0_g; const size_t* order = batch_gate->lod()[2].data(); if (c0) { - ReorderInitState(device_ctx, *c0, order, &ordered_c0, true); + ReorderInitState(device_ctx, *c0, order, &ordered_c0, + true); } if (c0 && c0_g) { ordered_c0_g.mutable_data(c0_g->dims(), ctx.GetPlace()); @@ -214,13 +215,13 @@ class LSTMGradKernel : public framework::OpKernel { math::LstmMetaValue lstm_value; if (bias && ctx.Attr("use_peepholes")) { T* bias_data = const_cast(bias->data()); - lstm_value.checkIg = bias_data + 4 * frame_size; - lstm_value.checkFg = lstm_value.checkIg + frame_size; - lstm_value.checkOg = lstm_value.checkFg + frame_size; + lstm_value.check_ig = bias_data + 4 * frame_size; + lstm_value.check_fg = lstm_value.check_ig + frame_size; + lstm_value.check_og = lstm_value.check_fg + frame_size; } else { - lstm_value.checkIg = nullptr; - lstm_value.checkFg = nullptr; - lstm_value.checkOg = nullptr; + lstm_value.check_ig = nullptr; + lstm_value.check_fg = nullptr; + lstm_value.check_og = nullptr; } math::LstmMetaGrad lstm_grad; @@ -231,19 +232,19 @@ class LSTMGradKernel : public framework::OpKernel { } if (bias && bias_g && ctx.Attr("use_peepholes")) { T* bias_g_data = bias_g->data(); - lstm_grad.checkIgGrad = bias_g_data + 4 * frame_size; - lstm_grad.checkFgGrad = lstm_grad.checkIgGrad + frame_size; - lstm_grad.checkOgGrad = lstm_grad.checkFgGrad + frame_size; + lstm_grad.check_ig_grad = bias_g_data + 4 * frame_size; + lstm_grad.check_fg_grad = lstm_grad.check_ig_grad + frame_size; + lstm_grad.check_og_grad = lstm_grad.check_fg_grad + frame_size; } else { - lstm_grad.checkIgGrad = nullptr; - lstm_grad.checkFgGrad = nullptr; - lstm_grad.checkOgGrad = nullptr; + lstm_grad.check_ig_grad = nullptr; + lstm_grad.check_fg_grad = nullptr; + lstm_grad.check_og_grad = nullptr; } - math::LoDTensor2BatchFunctor to_batch; + math::LoDTensor2BatchFunctor to_batch; auto ToBatch = [&batch_gate, &to_batch]( - const platform::DeviceContext& ctx, const framework::LoDTensor& src, + const DeviceContext& ctx, const framework::LoDTensor& src, const framework::DDim& dims, framework::LoDTensor& dst) { dst.mutable_data(dims, ctx.GetPlace()); dst.set_lod(batch_gate->lod()); @@ -276,30 +277,30 @@ class LSTMGradKernel : public framework::OpKernel { Tensor gate = batch_gate->Slice(bstart, bend); Tensor cell = batch_cell.Slice(bstart, bend); Tensor cell_pre_act = batch_cell_pre_act->Slice(bstart, bend); - lstm_value.gateValue = gate.data(); - lstm_value.stateValue = cell.data(); - lstm_value.stateActiveValue = cell_pre_act.data(); + lstm_value.gate_value = gate.data(); + lstm_value.state_value = cell.data(); + lstm_value.state_active_value = cell_pre_act.data(); Tensor out_g = batch_hidden_g.Slice(bstart, bend); Tensor gate_g = batch_gate_g.Slice(bstart, bend); Tensor cell_g = batch_cell_g.Slice(bstart, bend); - lstm_grad.stateGrad = cell_g.data(); - lstm_grad.gateGrad = gate_g.data(); - lstm_grad.outputGrad = out_g.data(); + lstm_grad.state_grad = cell_g.data(); + lstm_grad.gate_grad = gate_g.data(); + lstm_grad.output_grad = out_g.data(); if (n > 0) { int bstart_pre = static_cast(batch_starts[n - 1]); Tensor cell_pre = batch_cell.Slice(bstart_pre, bstart); Tensor cell_pre_g = batch_cell_g.Slice(bstart_pre, bstart); - lstm_value.prevStateValue = cell_pre.data(); - lstm_grad.prevStateGrad = cell_pre_g.data(); + lstm_value.prev_state_value = cell_pre.data(); + lstm_grad.prev_state_grad = cell_pre_g.data(); } else { - lstm_value.prevStateValue = c0 ? ordered_c0.data() : nullptr; - lstm_grad.prevStateGrad = c0_g ? ordered_c0_g.data() : nullptr; + lstm_value.prev_state_value = c0 ? ordered_c0.data() : nullptr; + lstm_grad.prev_state_grad = c0_g ? ordered_c0_g.data() : nullptr; } int cur_batch_size = bend - bstart; - math::LstmUnitGradFunctor::compute( + math::LstmUnitGradFunctor::compute( device_ctx, lstm_value, lstm_grad, frame_size, cur_batch_size, gate_act, cell_act, cand_act); @@ -307,33 +308,34 @@ class LSTMGradKernel : public framework::OpKernel { int pre_h_start = static_cast(batch_starts[n - 1]); int pre_h_end = pre_h_start + cur_batch_size; auto pre_hidden_g = batch_hidden_g.Slice(pre_h_start, pre_h_end); - math::matmul(device_ctx, gate_g, false, *weight, true, - static_cast(1.0), &pre_hidden_g, - static_cast(1.0)); + math::matmul(device_ctx, gate_g, false, *weight, true, + static_cast(1.0), &pre_hidden_g, + static_cast(1.0)); if (weight_g) { /* backward weight */ auto pre_hidden = batch_hidden.Slice(pre_h_start, pre_h_end); - math::matmul(device_ctx, pre_hidden, true, gate_g, false, - static_cast(1.0), weight_g, - static_cast(1.0)); + math::matmul(device_ctx, pre_hidden, true, gate_g, + false, static_cast(1.0), weight_g, + static_cast(1.0)); } } else { if (h0 && weight_g) { - ReorderInitState(device_ctx, *h0, order, &ordered_h0, true); - math::matmul(device_ctx, ordered_h0, true, gate_g, false, - static_cast(1.0), weight_g, - static_cast(1.0)); + ReorderInitState(device_ctx, *h0, order, + &ordered_h0, true); + math::matmul(device_ctx, ordered_h0, true, gate_g, + false, static_cast(1.0), weight_g, + static_cast(1.0)); } if (h0 && h0_g) { ordered_h0_g.mutable_data(h0_g->dims(), ctx.GetPlace()); - math::matmul(device_ctx, gate_g, false, *weight, true, - static_cast(1.0), &ordered_h0_g, - static_cast(0.0)); + math::matmul(device_ctx, gate_g, false, *weight, + true, static_cast(1.0), + &ordered_h0_g, static_cast(0.0)); } } } - math::Batch2LoDTensorFunctor to_seq; + math::Batch2LoDTensorFunctor to_seq; if (in_g) { /* backward data */ in_g->mutable_data(ctx.GetPlace()); @@ -344,15 +346,17 @@ class LSTMGradKernel : public framework::OpKernel { Tensor b_g = *bias_g; b_g.Resize({bias_g->numel(), 1}); Tensor gate_bias_g = b_g.Slice(0, 4 * frame_size); - math::ColwiseSum col_sum; + math::ColwiseSum col_sum; col_sum(device_ctx, batch_gate_g, &gate_bias_g); } if (h0 && h0_g) { - ReorderInitState(device_ctx, ordered_h0_g, order, h0_g, false); + ReorderInitState(device_ctx, ordered_h0_g, order, h0_g, + false); } if (c0 && c0_g) { - ReorderInitState(device_ctx, ordered_c0_g, order, c0_g, false); + ReorderInitState(device_ctx, ordered_c0_g, order, c0_g, + false); } } }; diff --git a/paddle/operators/lstm_unit_op.cu b/paddle/operators/lstm_unit_op.cu index e192283aa0afac49e8e467506f3703d1ce60d2a6..291f2c295e78288c01c6575df936ceedceba7ce8 100644 --- a/paddle/operators/lstm_unit_op.cu +++ b/paddle/operators/lstm_unit_op.cu @@ -173,7 +173,7 @@ class LstmUnitGradOpCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(lstm_unit, ops::LstmUnitOpCUDAKernel, - ops::LstmUnitOpCUDAKernel); -REGISTER_OP_GPU_KERNEL(lstm_unit_grad, ops::LstmUnitGradOpCUDAKernel, - ops::LstmUnitGradOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(lstm_unit, ops::LstmUnitOpCUDAKernel, + ops::LstmUnitOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(lstm_unit_grad, ops::LstmUnitGradOpCUDAKernel, + ops::LstmUnitGradOpCUDAKernel); diff --git a/paddle/operators/lstm_unit_op.h b/paddle/operators/lstm_unit_op.h index 38cb298f92a21bb5c7508761fec701d28279a85f..61705675d930369ea8d491229caa1b4046f3e16a 100644 --- a/paddle/operators/lstm_unit_op.h +++ b/paddle/operators/lstm_unit_op.h @@ -35,7 +35,7 @@ inline T tanh(T x) { return 2. * sigmoid(2. * x) - 1.; } -template +template class LstmUnitKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -78,7 +78,7 @@ class LstmUnitKernel : public framework::OpKernel { } }; -template +template class LstmUnitGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { diff --git a/paddle/operators/margin_rank_loss_op.cc b/paddle/operators/margin_rank_loss_op.cc index d7e8a0ea7632650203106b01531d724cf0b8e085..42e8961c0ea57650a823ee4b58516f66a455b385 100644 --- a/paddle/operators/margin_rank_loss_op.cc +++ b/paddle/operators/margin_rank_loss_op.cc @@ -117,7 +117,7 @@ REGISTER_OP(margin_rank_loss, ops::MarginRankLossOp, ops::MarginRankLossGradOp); REGISTER_OP_CPU_KERNEL( margin_rank_loss, - ops::MarginRankLossKernel); + ops::MarginRankLossKernel); REGISTER_OP_CPU_KERNEL( margin_rank_loss_grad, - ops::MarginRankLossGradKernel); + ops::MarginRankLossGradKernel); diff --git a/paddle/operators/margin_rank_loss_op.cu b/paddle/operators/margin_rank_loss_op.cu index 3a639f25d478a712c1030d57c57d7e55de1488b5..1c2afccc5b32e22c939a275d8c69ad774d3ebdad 100644 --- a/paddle/operators/margin_rank_loss_op.cu +++ b/paddle/operators/margin_rank_loss_op.cu @@ -16,9 +16,9 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( margin_rank_loss, - ops::MarginRankLossKernel); -REGISTER_OP_GPU_KERNEL( + ops::MarginRankLossKernel); +REGISTER_OP_CUDA_KERNEL( margin_rank_loss_grad, - ops::MarginRankLossGradKernel); + ops::MarginRankLossGradKernel); diff --git a/paddle/operators/margin_rank_loss_op.h b/paddle/operators/margin_rank_loss_op.h index 8d0830147ecc465909e8988e90125929829f6f34..9c1f96cac13f1bdb8c5dfd3e771157d1d1c60e15 100644 --- a/paddle/operators/margin_rank_loss_op.h +++ b/paddle/operators/margin_rank_loss_op.h @@ -34,7 +34,7 @@ struct Heaviside { } }; -template +template class MarginRankLossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { @@ -56,13 +56,13 @@ class MarginRankLossKernel : public framework::OpKernel { auto x1 = framework::EigenVector::Flatten(*x1_t); auto x2 = framework::EigenVector::Flatten(*x2_t); - auto& dev = ctx.GetEigenDevice(); + auto& dev = *ctx.template device_context().eigen_device(); out.device(dev) = (-label * (x1 - x2) + margin).unaryExpr(ReLU()); act.device(dev) = out.unaryExpr(Heaviside()); } }; -template +template class MarginRankLossGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { @@ -78,7 +78,7 @@ class MarginRankLossGradKernel : public framework::OpKernel { auto d_out = framework::EigenVector::Flatten(*d_out_t); auto act = framework::EigenVector::Flatten(*act_t); auto label = framework::EigenVector::Flatten(*label_t); - auto& dev = ctx.GetEigenDevice(); + auto& dev = *ctx.template device_context().eigen_device(); // compute d_x1 if (d_x1_t) { diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index 395512133dd23f32c1020fadb5bf88ea72b13f10..6467d8ddb33cfa5a52de7b2798eaf03e2c2f92f2 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -13,8 +13,9 @@ if(WITH_GPU) nv_library(context_project SRCS context_project.cc context_project.cu DEPS device_context math_function) nv_library(sequence2batch SRCS sequence2batch.cc sequence2batch.cu DEPS device_context) nv_library(lstm_compute SRCS lstm_compute.cc lstm_compute.cu DEPS device_context activation_functions) - nv_library(gru_compute SRCS gru_compute.cc gru_compute.cu DEPS device_context activation_functions math_function) nv_library(maxouting SRCS maxouting.cc maxouting.cu DEPS device_context) + nv_library(unpooling SRCS unpooling.cc unpooling.cu DEPS device_context) + nv_library(gru_compute SRCS gru_compute.cc gru_compute.cu DEPS device_context activation_functions math_function) else() cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context framework_proto) cc_library(selected_rows_functor SRCS selected_rows_functor.cc DEPS selected_rows math_function) @@ -26,9 +27,10 @@ else() cc_library(context_project SRCS context_project.cc DEPS device_context math_function) cc_library(sequence2batch SRCS sequence2batch.cc DEPS device_context) cc_library(lstm_compute SRCS lstm_compute.cc DEPS device_context activation_functions) - cc_library(gru_compute SRCS gru_compute.cc DEPS device_context activation_functions math_function) cc_library(matrix_bit_code SRCS matrix_bit_code.cc) cc_library(maxouting SRCS maxouting.cc DEPS device_context) + cc_library(unpooling SRCS unpooling.cc DEPS device_context) + cc_library(gru_compute SRCS gru_compute.cc DEPS device_context activation_functions math_function) endif() cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) diff --git a/paddle/operators/math/context_project.cc b/paddle/operators/math/context_project.cc index f82ea5d7bee81fd1578c46f79477bb23939e627a..980dd90df8710cdbcb760e1ca84f1492a76fdb70 100644 --- a/paddle/operators/math/context_project.cc +++ b/paddle/operators/math/context_project.cc @@ -18,8 +18,8 @@ namespace paddle { namespace operators { namespace math { -template class ContextProjectFunctor; -template class ContextProjectFunctor; +template class ContextProjectFunctor; +template class ContextProjectFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/context_project.cu b/paddle/operators/math/context_project.cu index 04eeed543cb165fe449d3578a951cf74b0422252..934e3df645916013b4d1fe5eb4a19be924c914d5 100644 --- a/paddle/operators/math/context_project.cu +++ b/paddle/operators/math/context_project.cu @@ -20,8 +20,8 @@ namespace paddle { namespace operators { namespace math { -template class ContextProjectFunctor; -template class ContextProjectFunctor; +template class ContextProjectFunctor; +template class ContextProjectFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/context_project.h b/paddle/operators/math/context_project.h index d853507188cf8c80aede1e7646736036e30c9678..4036614086e1eb724a4a647db6ef13b6fe7aaaa0 100644 --- a/paddle/operators/math/context_project.h +++ b/paddle/operators/math/context_project.h @@ -81,17 +81,17 @@ using LoDTensor = framework::LoDTensor; * */ -template +template class ContextProjectFunctor { public: - void operator()(const platform::DeviceContext& context, const LoDTensor& in, + void operator()(const DeviceContext& context, const LoDTensor& in, const Tensor& padding_data, bool padding_trainable, const int context_start, const int context_length, const int context_stride, const int up_pad, const int down_pad, Tensor* col) { auto lod_level_0 = in.lod()[0]; - math::Im2ColFunctor im2col_ocf; + math::Im2ColFunctor im2col_ocf; std::vector dilation({1, 1}); std::vector padding({up_pad, 0, down_pad, 0}); @@ -188,17 +188,17 @@ class ContextProjectFunctor { } }; -template +template class ContextProjectGradFunctor { public: - void operator()(const platform::DeviceContext& context, const LoDTensor& in, + void operator()(const DeviceContext& context, const LoDTensor& in, bool padding_trainable, const int context_start, const int context_length, const int context_stride, const int up_pad, const int down_pad, bool pad_grad, bool input_grad, Tensor* padding_data, Tensor* col) { auto lod_level_0 = in.lod()[0]; - math::Col2ImFunctor col2im_ocf; + math::Col2ImFunctor col2im_ocf; std::vector dilation({1, 1}); std::vector padding({up_pad, 0, down_pad, 0}); @@ -258,8 +258,8 @@ class ContextProjectGradFunctor { Tensor out_t_sub = out_t.Slice(k * context_length, k * context_length + padding_size); Tensor w_sub = padding_data->Slice(k, k + padding_size); - axpy(context, w_sub.numel(), static_cast(1), - out_t_sub.data(), w_sub.data()); + axpy(context, w_sub.numel(), static_cast(1), + out_t_sub.data(), w_sub.data()); } } if (down_pad > 0) { @@ -290,8 +290,8 @@ class ContextProjectGradFunctor { (down_pad_begin_row + t) * context_length); Tensor w_sub = padding_data->Slice( up_pad + padding_idx, up_pad + padding_idx + padding_size); - axpy(context, w_sub.numel(), static_cast(1), - out_t_sub.data(), w_sub.data()); + axpy(context, w_sub.numel(), static_cast(1), + out_t_sub.data(), w_sub.data()); } } out_t.Resize({sequence_height, context_length * sequence_width}); diff --git a/paddle/operators/math/cross_entropy.cc b/paddle/operators/math/cross_entropy.cc index cf238a58e0a0b930077b0376a71dc02c5b31efe5..6011a196d446854877e162019f6745deb501ee9d 100644 --- a/paddle/operators/math/cross_entropy.cc +++ b/paddle/operators/math/cross_entropy.cc @@ -24,9 +24,9 @@ template ; template -class CrossEntropyFunctor { +class CrossEntropyFunctor { public: - void operator()(const platform::DeviceContext& ctx, framework::Tensor* out, + void operator()(const platform::CPUDeviceContext& ctx, framework::Tensor* out, const framework::Tensor* prob, const framework::Tensor* labels, const bool softLabel) { const int batch_size = prob->dims()[0]; @@ -35,7 +35,7 @@ class CrossEntropyFunctor { auto lbl = EigenMatrix::From(*labels); auto loss = EigenMatrix::From(*out); - loss.device(*ctx.GetEigenDevice()) = + loss.device(*ctx.eigen_device()) = -((lbl * in.log().unaryExpr(math::TolerableValue())) .sum(Eigen::DSizes(1)) .reshape(Eigen::DSizes(batch_size, 1))); @@ -53,8 +53,8 @@ class CrossEntropyFunctor { } }; -template class CrossEntropyFunctor; -template class CrossEntropyFunctor; +template class CrossEntropyFunctor; +template class CrossEntropyFunctor; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/cross_entropy.cu b/paddle/operators/math/cross_entropy.cu index 651c08f740c2991b11c210c9bf012e505adc1835..2132d49c937a85afeed0e0cee0a74a7e30c6a3ca 100644 --- a/paddle/operators/math/cross_entropy.cu +++ b/paddle/operators/math/cross_entropy.cu @@ -95,10 +95,10 @@ __global__ void SoftCrossEntropyKernel(T* Y, const T* X, const T* label, using Tensor = framework::Tensor; template -class CrossEntropyFunctor { +class CrossEntropyFunctor { public: - void operator()(const platform::DeviceContext& ctx, framework::Tensor* out, - const framework::Tensor* prob, + void operator()(const platform::CUDADeviceContext& ctx, + framework::Tensor* out, const framework::Tensor* prob, const framework::Tensor* labels, bool softLabel) { const T* prob_data = prob->data(); T* loss_data = out->mutable_data(ctx.GetPlace()); @@ -118,16 +118,14 @@ class CrossEntropyFunctor { const int64_t* label_data = labels->data(); int block = 512; int grid = (batch_size + block - 1) / block; - CrossEntropyKernel<<< - grid, block, 0, - reinterpret_cast(ctx).stream()>>>( + CrossEntropyKernel<<>>( loss_data, prob_data, label_data, batch_size, class_num); } } }; -template class CrossEntropyFunctor; -template class CrossEntropyFunctor; +template class CrossEntropyFunctor; +template class CrossEntropyFunctor; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/cross_entropy.h b/paddle/operators/math/cross_entropy.h index 70ed9ddd551bb8cb7989727c02fea870186c9f2e..677adb5adaf4041fe7acfd29be354073535fd5fc 100644 --- a/paddle/operators/math/cross_entropy.h +++ b/paddle/operators/math/cross_entropy.h @@ -33,11 +33,11 @@ struct TolerableValue { } }; -template +template class CrossEntropyFunctor { public: - void operator()(const platform::DeviceContext& context, - framework::Tensor* out, const framework::Tensor* prob, + void operator()(const DeviceContext& context, framework::Tensor* out, + const framework::Tensor* prob, const framework::Tensor* labels, const bool softLabel); }; } // namespace math diff --git a/paddle/operators/math/detail/gru_cpu_kernel.h b/paddle/operators/math/detail/gru_cpu_kernel.h index 51af140cf4d5e6581765bea00033fa53d383230d..4c67dec9cbeb48f400f79f5ed7ba3c939fa2540c 100644 --- a/paddle/operators/math/detail/gru_cpu_kernel.h +++ b/paddle/operators/math/detail/gru_cpu_kernel.h @@ -25,393 +25,397 @@ namespace detail { #ifndef __NVCC__ template -void hl_naive_gru_forward_reset_output(OpResetOutput opResetOutput, - T *gateValue, T *resetOutputValue, - T *prevOutputValue, int frameSize, +void hl_naive_gru_forward_reset_output(OpResetOutput op_reset_output, + T *gate_value, T *reset_output_value, + T *prev_output_value, int frame_size, activation_mode_t active_gate) { - T rValueUpdateGate; - T rValueResetGate; - T rValueResetOutput; - T rPrevOut = 0; - T *updateGate = gateValue; - T *resetGate = gateValue + frameSize; - - for (int i = 0; i < frameSize; i++) { - rValueUpdateGate = updateGate[i]; - rValueResetGate = resetGate[i]; - if (prevOutputValue) { - rPrevOut = prevOutputValue[i]; + T r_value_update_gate; + T r_value_reset_gate; + T r_value_reset_output; + T r_prev_out = 0; + T *update_gate = gate_value; + T *reset_gate = gate_value + frame_size; + + for (int i = 0; i < frame_size; i++) { + r_value_update_gate = update_gate[i]; + r_value_reset_gate = reset_gate[i]; + if (prev_output_value) { + r_prev_out = prev_output_value[i]; } - opResetOutput(rValueUpdateGate, rValueResetGate, rPrevOut, - rValueResetOutput, active_gate); + op_reset_output(r_value_update_gate, r_value_reset_gate, r_prev_out, + r_value_reset_output, active_gate); - updateGate[i] = rValueUpdateGate; - resetGate[i] = rValueResetGate; - resetOutputValue[i] = rValueResetOutput; + update_gate[i] = r_value_update_gate; + reset_gate[i] = r_value_reset_gate; + reset_output_value[i] = r_value_reset_output; } } template -void hl_naive_gru_forward_final_output(OpFinalOutput opFinalOutput, - T *gateValue, T *prevOutputValue, - T *outputValue, int frameSize, +void hl_naive_gru_forward_final_output(OpFinalOutput op_final_output, + T *gate_value, T *prev_output_value, + T *output_value, int frame_size, activation_mode_t active_node) { - T rValueUpdateGate; - T rValueFrameState; - T rPrevOut = 0; - T rOutput; - T *updateGate = gateValue; - T *frameState = gateValue + frameSize * 2; - - for (int i = 0; i < frameSize; i++) { - rValueUpdateGate = updateGate[i]; - rValueFrameState = frameState[i]; - if (prevOutputValue) { - rPrevOut = prevOutputValue[i]; + T r_value_update_gate; + T r_value_frame_state; + T r_prev_out = 0; + T r_output; + T *update_gate = gate_value; + T *frame_state = gate_value + frame_size * 2; + + for (int i = 0; i < frame_size; i++) { + r_value_update_gate = update_gate[i]; + r_value_frame_state = frame_state[i]; + if (prev_output_value) { + r_prev_out = prev_output_value[i]; } - opFinalOutput(rValueUpdateGate, rValueFrameState, rPrevOut, rOutput, - active_node); + op_final_output(r_value_update_gate, r_value_frame_state, r_prev_out, + r_output, active_node); - frameState[i] = rValueFrameState; - outputValue[i] = rOutput; + frame_state[i] = r_value_frame_state; + output_value[i] = r_output; } } template -void hl_avx_gru_forward_reset_output(OpResetOutput opResetOutput, T *gateValue, - T *resetOutputValue, T *prevOutputValue, - int frameSize, +void hl_avx_gru_forward_reset_output(OpResetOutput op_reset_output, + T *gate_value, T *reset_output_value, + T *prev_output_value, int frame_size, activation_mode_t active_gate) { #ifdef __AVX__ - __m256 rValueUpdateGate; - __m256 rValueResetGate; - __m256 rValueResetOutput; - __m256 rPrevOut = _mm256_set1_ps(0.0f); - __m256 *updateGate = (__m256 *)gateValue; - __m256 *resetGate = (__m256 *)(gateValue + frameSize); - - for (int i = 0; i < frameSize / 8; i++) { - rValueUpdateGate = updateGate[i]; - rValueResetGate = resetGate[i]; - if (prevOutputValue) { - rPrevOut = ((__m256 *)prevOutputValue)[i]; + __m256 r_value_update_gate; + __m256 r_value_reset_gate; + __m256 r_value_reset_output; + __m256 r_prev_out = _mm256_set1_ps(0.0f); + __m256 *update_gate = (__m256 *)gate_value; + __m256 *reset_gate = (__m256 *)(gate_value + frame_size); + + for (int i = 0; i < frame_size / 8; i++) { + r_value_update_gate = update_gate[i]; + r_value_reset_gate = reset_gate[i]; + if (prev_output_value) { + r_prev_out = ((__m256 *)prev_output_value)[i]; } - opResetOutput(rValueUpdateGate, rValueResetGate, rPrevOut, - rValueResetOutput, active_gate); + op_reset_output(r_value_update_gate, r_value_reset_gate, r_prev_out, + r_value_reset_output, active_gate); - updateGate[i] = rValueUpdateGate; - resetGate[i] = rValueResetGate; - ((__m256 *)resetOutputValue)[i] = rValueResetOutput; + update_gate[i] = r_value_update_gate; + reset_gate[i] = r_value_reset_gate; + ((__m256 *)reset_output_value)[i] = r_value_reset_output; } #endif } template -void hl_avx_gru_forward_final_output(OpFinalOutput opFinalOutput, T *gateValue, - T *prevOutputValue, T *outputValue, - int frameSize, +void hl_avx_gru_forward_final_output(OpFinalOutput op_final_output, + T *gate_value, T *prev_output_value, + T *output_value, int frame_size, activation_mode_t active_node) { #ifdef __AVX__ - __m256 rValueUpdateGate; - __m256 rValueFrameState; - __m256 rPrevOut = _mm256_set1_ps(0.0f); - __m256 rOutput; - __m256 *updateGate = (__m256 *)gateValue; - __m256 *frameState = (__m256 *)(gateValue + frameSize * 2); - - for (int i = 0; i < frameSize / 8; i++) { - rValueUpdateGate = updateGate[i]; - rValueFrameState = frameState[i]; - if (prevOutputValue) { - rPrevOut = ((__m256 *)prevOutputValue)[i]; + __m256 r_value_update_gate; + __m256 r_value_frame_state; + __m256 r_prev_out = _mm256_set1_ps(0.0f); + __m256 r_output; + __m256 *update_gate = (__m256 *)gate_value; + __m256 *frame_state = (__m256 *)(gate_value + frame_size * 2); + + for (int i = 0; i < frame_size / 8; i++) { + r_value_update_gate = update_gate[i]; + r_value_frame_state = frame_state[i]; + if (prev_output_value) { + r_prev_out = ((__m256 *)prev_output_value)[i]; } - opFinalOutput(rValueUpdateGate, rValueFrameState, rPrevOut, rOutput, - active_node); + op_final_output(r_value_update_gate, r_value_frame_state, r_prev_out, + r_output, active_node); - frameState[i] = rValueFrameState; - ((__m256 *)outputValue)[i] = rOutput; + frame_state[i] = r_value_frame_state; + ((__m256 *)output_value)[i] = r_output; } #endif } template -inline void forward_reset_output(OpResetOutput opResetOutput, - hl_gru_value value, int frameSize, - int batchSize, activation_mode_t active_gate) { - for (int b = 0; b < batchSize; b++) { - if (OpResetOutput::avx && !(frameSize & (8 - 1)) && (sizeof(T) == 4)) { +inline void forward_reset_output(OpResetOutput op_reset_output, + hl_gru_value value, int frame_size, + int batch_size, + activation_mode_t active_gate) { + for (int b = 0; b < batch_size; b++) { + if (OpResetOutput::avx && !(frame_size & (8 - 1)) && (sizeof(T) == 4)) { hl_avx_gru_forward_reset_output( - opResetOutput, value.gateValue, value.resetOutputValue, - value.prevOutValue, frameSize, active_gate); + op_reset_output, value.gate_value, value.reset_output_value, + value.prev_out_value, frame_size, active_gate); } else { hl_naive_gru_forward_reset_output( - opResetOutput, value.gateValue, value.resetOutputValue, - value.prevOutValue, frameSize, active_gate); + op_reset_output, value.gate_value, value.reset_output_value, + value.prev_out_value, frame_size, active_gate); } - value.gateValue += frameSize * 3; - value.resetOutputValue += frameSize; - if (value.prevOutValue) { - value.prevOutValue += frameSize; + value.gate_value += frame_size * 3; + value.reset_output_value += frame_size; + if (value.prev_out_value) { + value.prev_out_value += frame_size; } } } template -inline void forward_final_output(OpFinalOutput opFinalOutput, - hl_gru_value value, int frameSize, - int batchSize, activation_mode_t active_node) { - for (int b = 0; b < batchSize; b++) { - if (OpFinalOutput::avx && !(frameSize & (8 - 1)) && (sizeof(T) == 4)) { - hl_avx_gru_forward_final_output(opFinalOutput, value.gateValue, - value.prevOutValue, value.outputValue, - frameSize, active_node); +inline void forward_final_output(OpFinalOutput op_final_output, + hl_gru_value value, int frame_size, + int batch_size, + activation_mode_t active_node) { + for (int b = 0; b < batch_size; b++) { + if (OpFinalOutput::avx && !(frame_size & (8 - 1)) && (sizeof(T) == 4)) { + hl_avx_gru_forward_final_output(op_final_output, value.gate_value, + value.prev_out_value, value.output_value, + frame_size, active_node); } else { - hl_naive_gru_forward_final_output(opFinalOutput, value.gateValue, - value.prevOutValue, value.outputValue, - frameSize, active_node); + hl_naive_gru_forward_final_output( + op_final_output, value.gate_value, value.prev_out_value, + value.output_value, frame_size, active_node); } - value.gateValue += frameSize * 3; - value.outputValue += frameSize; - if (value.prevOutValue) { - value.prevOutValue += frameSize; + value.gate_value += frame_size * 3; + value.output_value += frame_size; + if (value.prev_out_value) { + value.prev_out_value += frame_size; } } } template -void hl_naive_gru_backward_state_grad(OpStateGrad opStateGrad, T *gateValue, - T *gateGrad, T *prevOutValue, - T *prevOutGrad, T *outputGrad, - int frameSize, +void hl_naive_gru_backward_state_grad(OpStateGrad op_state_grad, T *gate_value, + T *gate_grad, T *prev_out_value, + T *prev_out_grad, T *output_grad, + int frame_size, activation_mode_t active_node) { - T rUpdateGateValue; - T rUpdateGateGrad; - T rFrameStateValue; - T rFrameStateGrad; - T rOutGrad; - T rPrevOutValue = 0; - T rPrevOutGrad = 0; - T *updateGateValue = gateValue; - T *updateGateGrad = gateGrad; - T *frameStateValue = gateValue + frameSize * 2; - T *frameStateGrad = gateGrad + frameSize * 2; - - for (int i = 0; i < frameSize; i++) { - rUpdateGateValue = updateGateValue[i]; - rFrameStateValue = frameStateValue[i]; - rOutGrad = outputGrad[i]; - if (prevOutValue) { - rPrevOutValue = prevOutValue[i]; + T r_update_gate_value; + T r_update_gate_grad; + T r_frame_state_value; + T r_frame_state_grad; + T r_out_grad; + T r_prev_out_value = 0; + T r_prev_out_grad = 0; + T *update_gate_value = gate_value; + T *update_gate_grad = gate_grad; + T *frame_state_value = gate_value + frame_size * 2; + T *frame_state_grad = gate_grad + frame_size * 2; + + for (int i = 0; i < frame_size; i++) { + r_update_gate_value = update_gate_value[i]; + r_frame_state_value = frame_state_value[i]; + r_out_grad = output_grad[i]; + if (prev_out_value) { + r_prev_out_value = prev_out_value[i]; } - if (prevOutGrad) { - rPrevOutGrad = prevOutGrad[i]; + if (prev_out_grad) { + r_prev_out_grad = prev_out_grad[i]; } - opStateGrad(rUpdateGateValue, rUpdateGateGrad, rFrameStateValue, - rFrameStateGrad, rPrevOutValue, rPrevOutGrad, rOutGrad, - active_node); + op_state_grad(r_update_gate_value, r_update_gate_grad, r_frame_state_value, + r_frame_state_grad, r_prev_out_value, r_prev_out_grad, + r_out_grad, active_node); - updateGateGrad[i] = rUpdateGateGrad; - frameStateGrad[i] = rFrameStateGrad; - if (prevOutGrad) { - prevOutGrad[i] = rPrevOutGrad; + update_gate_grad[i] = r_update_gate_grad; + frame_state_grad[i] = r_frame_state_grad; + if (prev_out_grad) { + prev_out_grad[i] = r_prev_out_grad; } } } template -void hl_naive_gru_backward_reset_grad(OpResetGrad opResetGrad, T *gateValue, - T *gateGrad, T *prevOutValue, - T *prevOutGrad, T *resetOutputGrad, - int frameSize, +void hl_naive_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, + T *gate_grad, T *prev_out_value, + T *prev_out_grad, T *reset_output_grad, + int frame_size, activation_mode_t active_gate) { - T rUpdateGateValue; - T rUpdateGateGrad; - T rResetGateValue; - T rResetGateGrad; - T rResetOutputGrad = 0; - T rPrevOutValue = 0; - T rPrevOutGrad = 0; - T *updateGateValue = gateValue; - T *updateGateGrad = gateGrad; - T *resetGateValue = gateValue + frameSize; - T *resetGateGrad = gateGrad + frameSize; - - for (int i = 0; i < frameSize; i++) { - rUpdateGateValue = updateGateValue[i]; - rUpdateGateGrad = updateGateGrad[i]; - rResetGateValue = resetGateValue[i]; - - if (prevOutValue && prevOutGrad) { - rResetOutputGrad = resetOutputGrad[i]; + T r_update_gate_value; + T r_update_gate_grad; + T r_reset_gate_value; + T r_reset_gate_grad; + T r_reset_output_grad = 0; + T r_prev_out_value = 0; + T r_prev_out_grad = 0; + T *update_gate_value = gate_value; + T *update_gate_grad = gate_grad; + T *reset_gate_value = gate_value + frame_size; + T *reset_gate_grad = gate_grad + frame_size; + + for (int i = 0; i < frame_size; i++) { + r_update_gate_value = update_gate_value[i]; + r_update_gate_grad = update_gate_grad[i]; + r_reset_gate_value = reset_gate_value[i]; + + if (prev_out_value && prev_out_grad) { + r_reset_output_grad = reset_output_grad[i]; } - if (prevOutValue) { - rPrevOutValue = prevOutValue[i]; + if (prev_out_value) { + r_prev_out_value = prev_out_value[i]; } - if (prevOutGrad) { - rPrevOutGrad = prevOutGrad[i]; + if (prev_out_grad) { + r_prev_out_grad = prev_out_grad[i]; } - opResetGrad(rUpdateGateValue, rUpdateGateGrad, rResetGateValue, - rResetGateGrad, rPrevOutValue, rPrevOutGrad, rResetOutputGrad, - active_gate); + op_reset_grad(r_update_gate_value, r_update_gate_grad, r_reset_gate_value, + r_reset_gate_grad, r_prev_out_value, r_prev_out_grad, + r_reset_output_grad, active_gate); - updateGateGrad[i] = rUpdateGateGrad; - resetGateGrad[i] = rResetGateGrad; - if (prevOutGrad) { - prevOutGrad[i] = rPrevOutGrad; + update_gate_grad[i] = r_update_gate_grad; + reset_gate_grad[i] = r_reset_gate_grad; + if (prev_out_grad) { + prev_out_grad[i] = r_prev_out_grad; } } } template -void hl_avx_gru_backward_state_grad(OpStateGrad opStateGrad, T *gateValue, - T *gateGrad, T *prevOutValue, - T *prevOutGrad, T *outputGrad, - int frameSize, +void hl_avx_gru_backward_state_grad(OpStateGrad op_state_grad, T *gate_value, + T *gate_grad, T *prev_out_value, + T *prev_out_grad, T *output_grad, + int frame_size, activation_mode_t active_node) { #ifdef __AVX__ - __m256 rUpdateGateValue; - __m256 rUpdateGateGrad; - __m256 rFrameStateValue; - __m256 rFrameStateGrad; - __m256 rOutGrad; - __m256 rPrevOutValue = _mm256_set1_ps(0.0f); - __m256 rPrevOutGrad = _mm256_set1_ps(0.0f); - __m256 *updateGateValue = (__m256 *)gateValue; - __m256 *updateGateGrad = (__m256 *)gateGrad; - __m256 *frameStateValue = (__m256 *)(gateValue + frameSize * 2); - __m256 *frameStateGrad = (__m256 *)(gateGrad + frameSize * 2); - - for (int i = 0; i < frameSize / 8; i++) { - rUpdateGateValue = updateGateValue[i]; - rFrameStateValue = frameStateValue[i]; - rOutGrad = ((__m256 *)outputGrad)[i]; - if (prevOutValue) { - rPrevOutValue = ((__m256 *)prevOutValue)[i]; + __m256 r_update_gate_value; + __m256 r_update_gate_grad; + __m256 r_frame_state_value; + __m256 r_frame_state_grad; + __m256 r_out_grad; + __m256 r_prev_out_value = _mm256_set1_ps(0.0f); + __m256 r_prev_out_grad = _mm256_set1_ps(0.0f); + __m256 *update_gate_value = (__m256 *)gate_value; + __m256 *update_gate_grad = (__m256 *)gate_grad; + __m256 *frame_state_value = (__m256 *)(gate_value + frame_size * 2); + __m256 *frame_state_grad = (__m256 *)(gate_grad + frame_size * 2); + + for (int i = 0; i < frame_size / 8; i++) { + r_update_gate_value = update_gate_value[i]; + r_frame_state_value = frame_state_value[i]; + r_out_grad = ((__m256 *)output_grad)[i]; + if (prev_out_value) { + r_prev_out_value = ((__m256 *)prev_out_value)[i]; } - if (prevOutGrad) { - rPrevOutGrad = ((__m256 *)prevOutGrad)[i]; + if (prev_out_grad) { + r_prev_out_grad = ((__m256 *)prev_out_grad)[i]; } - opStateGrad(rUpdateGateValue, rUpdateGateGrad, rFrameStateValue, - rFrameStateGrad, rPrevOutValue, rPrevOutGrad, rOutGrad, - active_node); + op_state_grad(r_update_gate_value, r_update_gate_grad, r_frame_state_value, + r_frame_state_grad, r_prev_out_value, r_prev_out_grad, + r_out_grad, active_node); - updateGateGrad[i] = rUpdateGateGrad; - frameStateGrad[i] = rFrameStateGrad; - if (prevOutGrad) { - ((__m256 *)prevOutGrad)[i] = rPrevOutGrad; + update_gate_grad[i] = r_update_gate_grad; + frame_state_grad[i] = r_frame_state_grad; + if (prev_out_grad) { + ((__m256 *)prev_out_grad)[i] = r_prev_out_grad; } } #endif } template -void hl_avx_gru_backward_reset_grad(OpResetGrad opResetGrad, T *gateValue, - T *gateGrad, T *prevOutValue, - T *prevOutGrad, T *resetOutputGrad, - int frameSize, +void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, + T *gate_grad, T *prev_out_value, + T *prev_out_grad, T *reset_output_grad, + int frame_size, activation_mode_t active_gate) { #ifdef __AVX__ - __m256 rUpdateGateValue; - __m256 rUpdateGateGrad; - __m256 rResetGateValue; - __m256 rResetGateGrad; - __m256 rResetOutputGrad = _mm256_set1_ps(0.0f); - __m256 rPrevOutValue = _mm256_set1_ps(0.0f); - __m256 rPrevOutGrad = _mm256_set1_ps(0.0f); - __m256 *updateGateValue = (__m256 *)gateValue; - __m256 *updateGateGrad = (__m256 *)gateGrad; - __m256 *resetGateValue = (__m256 *)(gateValue + frameSize); - __m256 *resetGateGrad = (__m256 *)(gateGrad + frameSize); - - for (int i = 0; i < frameSize / 8; i++) { - rUpdateGateValue = updateGateValue[i]; - rUpdateGateGrad = updateGateGrad[i]; - rResetGateValue = resetGateValue[i]; - - if (prevOutValue && prevOutGrad) { - rResetOutputGrad = ((__m256 *)resetOutputGrad)[i]; + __m256 r_update_gate_value; + __m256 r_update_gate_grad; + __m256 r_reset_gate_value; + __m256 r_reset_gate_grad; + __m256 r_reset_output_grad = _mm256_set1_ps(0.0f); + __m256 r_prev_out_value = _mm256_set1_ps(0.0f); + __m256 r_prev_out_grad = _mm256_set1_ps(0.0f); + __m256 *update_gate_value = (__m256 *)gate_value; + __m256 *update_gate_grad = (__m256 *)gate_grad; + __m256 *reset_gate_value = (__m256 *)(gate_value + frame_size); + __m256 *reset_gate_grad = (__m256 *)(gate_grad + frame_size); + + for (int i = 0; i < frame_size / 8; i++) { + r_update_gate_value = update_gate_value[i]; + r_update_gate_grad = update_gate_grad[i]; + r_reset_gate_value = reset_gate_value[i]; + + if (prev_out_value && prev_out_grad) { + r_reset_output_grad = ((__m256 *)reset_output_grad)[i]; } - if (prevOutValue) { - rPrevOutValue = ((__m256 *)prevOutValue)[i]; + if (prev_out_value) { + r_prev_out_value = ((__m256 *)prev_out_value)[i]; } - if (prevOutGrad) { - rPrevOutGrad = ((__m256 *)prevOutGrad)[i]; + if (prev_out_grad) { + r_prev_out_grad = ((__m256 *)prev_out_grad)[i]; } - opResetGrad(rUpdateGateValue, rUpdateGateGrad, rResetGateValue, - rResetGateGrad, rPrevOutValue, rPrevOutGrad, rResetOutputGrad, - active_gate); + op_reset_grad(r_update_gate_value, r_update_gate_grad, r_reset_gate_value, + r_reset_gate_grad, r_prev_out_value, r_prev_out_grad, + r_reset_output_grad, active_gate); - updateGateGrad[i] = rUpdateGateGrad; - resetGateGrad[i] = rResetGateGrad; - if (prevOutGrad) { - ((__m256 *)prevOutGrad)[i] = rPrevOutGrad; + update_gate_grad[i] = r_update_gate_grad; + reset_gate_grad[i] = r_reset_gate_grad; + if (prev_out_grad) { + ((__m256 *)prev_out_grad)[i] = r_prev_out_grad; } } #endif } template -inline void backward_state_grad(OpStateGrad opStateGrad, hl_gru_value value, - hl_gru_grad grad, int frameSize, - int batchSize, activation_mode_t active_node) { - for (int b = 0; b < batchSize; b++) { - if (OpStateGrad::avx && !(frameSize & (8 - 1)) && (sizeof(T) == 4)) { +inline void backward_state_grad(OpStateGrad op_state_grad, + hl_gru_value value, hl_gru_grad grad, + int frame_size, int batch_size, + activation_mode_t active_node) { + for (int b = 0; b < batch_size; b++) { + if (OpStateGrad::avx && !(frame_size & (8 - 1)) && (sizeof(T) == 4)) { hl_avx_gru_backward_state_grad( - opStateGrad, value.gateValue, grad.gateGrad, value.prevOutValue, - grad.prevOutGrad, grad.outputGrad, frameSize, active_node); + op_state_grad, value.gate_value, grad.gate_grad, value.prev_out_value, + grad.prev_out_grad, grad.output_grad, frame_size, active_node); } else { hl_naive_gru_backward_state_grad( - opStateGrad, value.gateValue, grad.gateGrad, value.prevOutValue, - grad.prevOutGrad, grad.outputGrad, frameSize, active_node); + op_state_grad, value.gate_value, grad.gate_grad, value.prev_out_value, + grad.prev_out_grad, grad.output_grad, frame_size, active_node); } - value.gateValue += frameSize * 3; - if (value.prevOutValue) { - value.prevOutValue += frameSize; + value.gate_value += frame_size * 3; + if (value.prev_out_value) { + value.prev_out_value += frame_size; } - grad.gateGrad += frameSize * 3; - grad.outputGrad += frameSize; - if (grad.prevOutGrad) { - grad.prevOutGrad += frameSize; + grad.gate_grad += frame_size * 3; + grad.output_grad += frame_size; + if (grad.prev_out_grad) { + grad.prev_out_grad += frame_size; } } } template -inline void backward_reset_grad(OpResetGrad opResetGrad, hl_gru_value value, - hl_gru_grad grad, int frameSize, - int batchSize, activation_mode_t active_gate) { - for (int b = 0; b < batchSize; b++) { - if (OpResetGrad::avx && !(frameSize & (8 - 1)) && (sizeof(T) == 4)) { +inline void backward_reset_grad(OpResetGrad op_reset_grad, + hl_gru_value value, hl_gru_grad grad, + int frame_size, int batch_size, + activation_mode_t active_gate) { + for (int b = 0; b < batch_size; b++) { + if (OpResetGrad::avx && !(frame_size & (8 - 1)) && (sizeof(T) == 4)) { hl_avx_gru_backward_reset_grad( - opResetGrad, value.gateValue, grad.gateGrad, value.prevOutValue, - grad.prevOutGrad, grad.resetOutputGrad, frameSize, active_gate); + op_reset_grad, value.gate_value, grad.gate_grad, value.prev_out_value, + grad.prev_out_grad, grad.reset_output_grad, frame_size, active_gate); } else { hl_naive_gru_backward_reset_grad( - opResetGrad, value.gateValue, grad.gateGrad, value.prevOutValue, - grad.prevOutGrad, grad.resetOutputGrad, frameSize, active_gate); + op_reset_grad, value.gate_value, grad.gate_grad, value.prev_out_value, + grad.prev_out_grad, grad.reset_output_grad, frame_size, active_gate); } - value.gateValue += frameSize * 3; - if (value.prevOutValue) { - value.prevOutValue += frameSize; + value.gate_value += frame_size * 3; + if (value.prev_out_value) { + value.prev_out_value += frame_size; } - grad.gateGrad += frameSize * 3; - grad.resetOutputGrad += frameSize; - if (grad.prevOutGrad) { - grad.prevOutGrad += frameSize; + grad.gate_grad += frame_size * 3; + grad.reset_output_grad += frame_size; + if (grad.prev_out_grad) { + grad.prev_out_grad += frame_size; } } } diff --git a/paddle/operators/math/detail/gru_gpu_kernel.h b/paddle/operators/math/detail/gru_gpu_kernel.h index 6441c648b048422c110872a85aa8cb719f11a8d7..d2edcb7f258b387530799b967fc0fff61acc5b83 100644 --- a/paddle/operators/math/detail/gru_gpu_kernel.h +++ b/paddle/operators/math/detail/gru_gpu_kernel.h @@ -27,174 +27,174 @@ namespace math { namespace detail { /* - * threads(framePerBlock, batchPerBlock) - * grid(frameBlocks, batchBlocks) + * threads(frame_per_block, batch_per_block) + * grid(frame_blocks, batch_blocks) */ -template -__global__ void KeGruForwardResetOutput(OpResetOutput opResetOutput, - T *gateValue, T *resetOutputValue, - T *prevOutputValue, int frameSize, - int batchSize, +template +__global__ void KeGruForwardResetOutput(OpResetOutput op_reset_output, + T *gate_value, T *reset_output_value, + T *prev_output_value, int frame_size, + int batch_size, activation_mode_t active_gate) { - const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; - if (frameIdx >= frameSize) return; - - int batchIdx = 0; - if (isBatch) { - batchIdx = blockIdx.y * blockDim.y + threadIdx.y; - if (batchIdx >= batchSize) return; - gateValue += batchIdx * 3 * frameSize; - resetOutputValue += batchIdx * frameSize; + const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (frame_idx >= frame_size) return; + + int batch_idx = 0; + if (is_batch) { + batch_idx = blockIdx.y * blockDim.y + threadIdx.y; + if (batch_idx >= batch_size) return; + gate_value += batch_idx * 3 * frame_size; + reset_output_value += batch_idx * frame_size; } - T rPrevOut = 0; - T rValueResetOutput; - T rValueUpdateGate = gateValue[frameIdx + frameSize * 0]; - T rValueResetGate = gateValue[frameIdx + frameSize * 1]; + T r_prev_out = 0; + T r_value_reset_output; + T r_value_update_gate = gate_value[frame_idx + frame_size * 0]; + T r_value_reset_gate = gate_value[frame_idx + frame_size * 1]; - if (prevOutputValue) { - if (isBatch) prevOutputValue += batchIdx * frameSize; - rPrevOut = prevOutputValue[frameIdx]; + if (prev_output_value) { + if (is_batch) prev_output_value += batch_idx * frame_size; + r_prev_out = prev_output_value[frame_idx]; } - opResetOutput(rValueUpdateGate, rValueResetGate, rPrevOut, rValueResetOutput, - active_gate); + op_reset_output(r_value_update_gate, r_value_reset_gate, r_prev_out, + r_value_reset_output, active_gate); - gateValue[frameIdx + frameSize * 0] = rValueUpdateGate; - gateValue[frameIdx + frameSize * 1] = rValueResetGate; - resetOutputValue[frameIdx] = rValueResetOutput; + gate_value[frame_idx + frame_size * 0] = r_value_update_gate; + gate_value[frame_idx + frame_size * 1] = r_value_reset_gate; + reset_output_value[frame_idx] = r_value_reset_output; } /* - * threads(framePerBlock, batchPerBlock) - * grid(frameBlocks, batchBlocks) + * threads(frame_per_block, batch_per_block) + * grid(frame_blocks, batch_blocks) */ -template -__global__ void KeGruForwardFinalOutput(OpFinalOutput opFinalOutput, - T *gateValue, T *prevOutputValue, - T *outputValue, int frameSize, - int batchSize, +template +__global__ void KeGruForwardFinalOutput(OpFinalOutput op_final_output, + T *gate_value, T *prev_output_value, + T *output_value, int frame_size, + int batch_size, activation_mode_t active_node) { - const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; - if (frameIdx >= frameSize) return; - int batchIdx = 0; - if (isBatch) { - batchIdx = blockIdx.y * blockDim.y + threadIdx.y; - if (batchIdx >= batchSize) return; - gateValue += batchIdx * 3 * frameSize; - outputValue += batchIdx * frameSize; + const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (frame_idx >= frame_size) return; + int batch_idx = 0; + if (is_batch) { + batch_idx = blockIdx.y * blockDim.y + threadIdx.y; + if (batch_idx >= batch_size) return; + gate_value += batch_idx * 3 * frame_size; + output_value += batch_idx * frame_size; } - T rOutput; - T rPrevOut = 0; - T rValueUpdateGate = gateValue[frameIdx + frameSize * 0]; - T rValueFrameState = gateValue[frameIdx + frameSize * 2]; + T r_output; + T r_prev_out = 0; + T r_value_update_gate = gate_value[frame_idx + frame_size * 0]; + T r_value_frame_state = gate_value[frame_idx + frame_size * 2]; - if (prevOutputValue) { - if (isBatch) prevOutputValue += batchIdx * frameSize; - rPrevOut = prevOutputValue[frameIdx]; + if (prev_output_value) { + if (is_batch) prev_output_value += batch_idx * frame_size; + r_prev_out = prev_output_value[frame_idx]; } - opFinalOutput(rValueUpdateGate, rValueFrameState, rPrevOut, rOutput, - active_node); + op_final_output(r_value_update_gate, r_value_frame_state, r_prev_out, + r_output, active_node); - gateValue[frameIdx + frameSize * 2] = rValueFrameState; - outputValue[frameIdx] = rOutput; + gate_value[frame_idx + frame_size * 2] = r_value_frame_state; + output_value[frame_idx] = r_output; } /* - * threads(framePerBlock, batchPerBlock) - * grid(frameBlocks, batchBlocks) + * threads(frame_per_block, batch_per_block) + * grid(frame_blocks, batch_blocks) */ -template -__global__ void KeGruBackwardStateGrad(OpStateGrad opStateGrad, T *gateValue, - T *gateGrad, T *prevOutValue, - T *prevOutGrad, T *outputGrad, - int frameSize, int batchSize, +template +__global__ void KeGruBackwardStateGrad(OpStateGrad op_state_grad, T *gate_value, + T *gate_grad, T *prev_out_value, + T *prev_out_grad, T *output_grad, + int frame_size, int batch_size, activation_mode_t active_node) { - const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; - if (frameIdx >= frameSize) return; - int batchIdx = 0; - if (isBatch) { - batchIdx = blockIdx.y * blockDim.y + threadIdx.y; - if (batchIdx >= batchSize) return; - gateValue += batchIdx * 3 * frameSize; - gateGrad += batchIdx * 3 * frameSize; - outputGrad += batchIdx * frameSize; + const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (frame_idx >= frame_size) return; + int batch_idx = 0; + if (is_batch) { + batch_idx = blockIdx.y * blockDim.y + threadIdx.y; + if (batch_idx >= batch_size) return; + gate_value += batch_idx * 3 * frame_size; + gate_grad += batch_idx * 3 * frame_size; + output_grad += batch_idx * frame_size; } - T rUpdateGateGrad; - T rFrameStateGrad; - T rPrevOutValue = 0; - T rPrevOutGrad = 0; - T rUpdateGateValue = gateValue[frameIdx + frameSize * 0]; - T rFrameStateValue = gateValue[frameIdx + frameSize * 2]; - T rOutGrad = outputGrad[frameIdx]; + T r_update_gate_grad; + T r_frame_state_grad; + T r_prev_out_value = 0; + T r_prev_out_grad = 0; + T r_update_gate_value = gate_value[frame_idx + frame_size * 0]; + T r_frame_state_value = gate_value[frame_idx + frame_size * 2]; + T r_out_grad = output_grad[frame_idx]; - if (prevOutValue && prevOutGrad) { - if (isBatch) prevOutValue += batchIdx * frameSize; - rPrevOutValue = prevOutValue[frameIdx]; + if (prev_out_value && prev_out_grad) { + if (is_batch) prev_out_value += batch_idx * frame_size; + r_prev_out_value = prev_out_value[frame_idx]; - if (isBatch) prevOutGrad += batchIdx * frameSize; - rPrevOutGrad = prevOutGrad[frameIdx]; + if (is_batch) prev_out_grad += batch_idx * frame_size; + r_prev_out_grad = prev_out_grad[frame_idx]; } - opStateGrad(rUpdateGateValue, rUpdateGateGrad, rFrameStateValue, - rFrameStateGrad, rPrevOutValue, rPrevOutGrad, rOutGrad, - active_node); + op_state_grad(r_update_gate_value, r_update_gate_grad, r_frame_state_value, + r_frame_state_grad, r_prev_out_value, r_prev_out_grad, + r_out_grad, active_node); - gateGrad[frameIdx + frameSize * 0] = rUpdateGateGrad; - gateGrad[frameIdx + frameSize * 2] = rFrameStateGrad; - if (prevOutGrad) { - prevOutGrad[frameIdx] = rPrevOutGrad; + gate_grad[frame_idx + frame_size * 0] = r_update_gate_grad; + gate_grad[frame_idx + frame_size * 2] = r_frame_state_grad; + if (prev_out_grad) { + prev_out_grad[frame_idx] = r_prev_out_grad; } } /* - * threads(framePerBlock, batchPerBlock) - * grid(frameBlocks, batchBlocks) + * threads(frame_per_block, batch_per_block) + * grid(frame_blocks, batch_blocks) */ -template -__global__ void KeGruBackwardResetGrad(OpResetGrad opResetGrad, T *gateValue, - T *gateGrad, T *prevOutValue, - T *prevOutGrad, T *resetOutputGrad, - int frameSize, int batchSize, +template +__global__ void KeGruBackwardResetGrad(OpResetGrad op_reset_grad, T *gate_value, + T *gate_grad, T *prev_out_value, + T *prev_out_grad, T *reset_output_grad, + int frame_size, int batch_size, activation_mode_t active_gate) { - const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; - if (frameIdx >= frameSize) return; - int batchIdx = 0; - if (isBatch) { - batchIdx = blockIdx.y * blockDim.y + threadIdx.y; - if (batchIdx >= batchSize) return; - gateValue += batchIdx * 3 * frameSize; - gateGrad += batchIdx * 3 * frameSize; - resetOutputGrad += batchIdx * frameSize; + const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (frame_idx >= frame_size) return; + int batch_idx = 0; + if (is_batch) { + batch_idx = blockIdx.y * blockDim.y + threadIdx.y; + if (batch_idx >= batch_size) return; + gate_value += batch_idx * 3 * frame_size; + gate_grad += batch_idx * 3 * frame_size; + reset_output_grad += batch_idx * frame_size; } - T rResetGateGrad; - T rPrevOutValue = 0; - T rPrevOutGrad = 0; - T rResetOutputGrad = 0; - T rUpdateGateValue = gateValue[frameIdx + frameSize * 0]; - T rUpdateGateGrad = gateGrad[frameIdx + frameSize * 0]; - T rResetGateValue = gateValue[frameIdx + frameSize * 1]; - - if (prevOutValue && prevOutGrad) { - if (isBatch) prevOutValue += batchIdx * frameSize; - if (isBatch) prevOutGrad += batchIdx * frameSize; - rPrevOutValue = prevOutValue[frameIdx]; - rPrevOutGrad = prevOutGrad[frameIdx]; - rResetOutputGrad = resetOutputGrad[frameIdx]; + T r_reset_gate_grad; + T r_prev_out_value = 0; + T r_prev_out_grad = 0; + T r_reset_output_grad = 0; + T r_update_gate_value = gate_value[frame_idx + frame_size * 0]; + T r_update_gate_grad = gate_grad[frame_idx + frame_size * 0]; + T r_reset_gate_value = gate_value[frame_idx + frame_size * 1]; + + if (prev_out_value && prev_out_grad) { + if (is_batch) prev_out_value += batch_idx * frame_size; + if (is_batch) prev_out_grad += batch_idx * frame_size; + r_prev_out_value = prev_out_value[frame_idx]; + r_prev_out_grad = prev_out_grad[frame_idx]; + r_reset_output_grad = reset_output_grad[frame_idx]; } - opResetGrad(rUpdateGateValue, rUpdateGateGrad, rResetGateValue, - rResetGateGrad, rPrevOutValue, rPrevOutGrad, rResetOutputGrad, - active_gate); + op_reset_grad(r_update_gate_value, r_update_gate_grad, r_reset_gate_value, + r_reset_gate_grad, r_prev_out_value, r_prev_out_grad, + r_reset_output_grad, active_gate); - gateGrad[frameIdx + frameSize * 0] = rUpdateGateGrad; - gateGrad[frameIdx + frameSize * 1] = rResetGateGrad; - if (prevOutGrad) { - prevOutGrad[frameIdx] = rPrevOutGrad; + gate_grad[frame_idx + frame_size * 0] = r_update_gate_grad; + gate_grad[frame_idx + frame_size * 1] = r_reset_gate_grad; + if (prev_out_grad) { + prev_out_grad[frame_idx] = r_prev_out_grad; } } } // namespace detail diff --git a/paddle/operators/math/detail/gru_kernel.h b/paddle/operators/math/detail/gru_kernel.h index 8a681d8d8bced72e1296f863489f6ccbc7913167..acd84be01db9ddaf06d165d8be353b253f324dd2 100644 --- a/paddle/operators/math/detail/gru_kernel.h +++ b/paddle/operators/math/detail/gru_kernel.h @@ -28,23 +28,25 @@ namespace forward { template class gru_resetOutput { public: - HOSTDEVICE void operator()(T &valueUpdateGate, T &valueResetGate, T &prevOut, - T &valueResetOutput, activation_mode_t actGate) { - valueUpdateGate = activation(valueUpdateGate, actGate); - valueResetGate = activation(valueResetGate, actGate); - valueResetOutput = prevOut * valueResetGate; + HOSTDEVICE void operator()(T &value_update_gate, T &value_reset_gate, + T &prev_out, T &value_reset_output, + activation_mode_t act_gate) { + value_update_gate = activation(value_update_gate, act_gate); + value_reset_gate = activation(value_reset_gate, act_gate); + value_reset_output = prev_out * value_reset_gate; } #ifndef __NVCC__ #ifndef __AVX__ static const bool avx = false; #else static const bool avx = true; - HOSTDEVICE void operator()(__m256 &valueUpdateGate, __m256 &valueResetGate, - __m256 &prevOut, __m256 &valueResetOutput, - activation_mode_t actGate) { - valueUpdateGate = activation(valueUpdateGate, actGate); - valueResetGate = activation(valueResetGate, actGate); - valueResetOutput = _mm256_mul_ps(prevOut, valueResetGate); + HOSTDEVICE void operator()(__m256 &value_update_gate, + __m256 &value_reset_gate, __m256 &prev_out, + __m256 &value_reset_output, + activation_mode_t act_gate) { + value_update_gate = activation(value_update_gate, act_gate); + value_reset_gate = activation(value_reset_gate, act_gate); + value_reset_output = _mm256_mul_ps(prev_out, value_reset_gate); } #endif #endif @@ -53,24 +55,26 @@ class gru_resetOutput { template class gru_finalOutput { public: - HOSTDEVICE void operator()(T &valueUpdateGate, T &valueFrameState, T &prevOut, - T &valueOutput, activation_mode_t actInput) { - valueFrameState = activation(valueFrameState, actInput); - valueOutput = prevOut - (valueUpdateGate * prevOut) + - (valueUpdateGate * valueFrameState); + HOSTDEVICE void operator()(T &value_update_gate, T &value_frame_state, + T &prev_out, T &value_output, + activation_mode_t act_input) { + value_frame_state = activation(value_frame_state, act_input); + value_output = prev_out - (value_update_gate * prev_out) + + (value_update_gate * value_frame_state); } #ifndef __NVCC__ #ifndef __AVX__ static const bool avx = false; #else static const bool avx = true; - HOSTDEVICE void operator()(__m256 &valueUpdateGate, __m256 &valueFrameState, - __m256 &prevOut, __m256 &valueOutput, - activation_mode_t actInput) { - valueFrameState = activation(valueFrameState, actInput); - valueOutput = _mm256_add_ps( - _mm256_sub_ps(prevOut, _mm256_mul_ps(valueUpdateGate, prevOut)), - _mm256_mul_ps(valueUpdateGate, valueFrameState)); + HOSTDEVICE void operator()(__m256 &value_update_gate, + __m256 &value_frame_state, __m256 &prev_out, + __m256 &value_output, + activation_mode_t act_input) { + value_frame_state = activation(value_frame_state, act_input); + value_output = _mm256_add_ps( + _mm256_sub_ps(prev_out, _mm256_mul_ps(value_update_gate, prev_out)), + _mm256_mul_ps(value_update_gate, value_frame_state)); } #endif #endif @@ -82,34 +86,37 @@ namespace backward { template class gru_stateGrad { public: - HOSTDEVICE void operator()(T &valueUpdateGate, T &gradUpdateGate, - T &valueFrameState, T &gradFrameState, - T &valuePrevOut, T &gradPrevOut, T &gradOutput, - activation_mode_t actInput) { - gradUpdateGate = (gradOutput * valueFrameState); - gradUpdateGate -= (gradOutput * valuePrevOut); - gradPrevOut -= (gradOutput * valueUpdateGate); - gradPrevOut += gradOutput; - gradFrameState = - activation(gradOutput * valueUpdateGate, valueFrameState, actInput); + HOSTDEVICE void operator()(T &value_update_gate, T &grad_update_gate, + T &value_frame_state, T &grad_frame_state, + T &value_prev_out, T &grad_prev_out, + T &grad_output, activation_mode_t act_input) { + grad_update_gate = (grad_output * value_frame_state); + grad_update_gate -= (grad_output * value_prev_out); + grad_prev_out -= (grad_output * value_update_gate); + grad_prev_out += grad_output; + grad_frame_state = activation(grad_output * value_update_gate, + value_frame_state, act_input); } #ifndef __NVCC__ #ifndef __AVX__ static const bool avx = false; #else static const bool avx = true; - HOSTDEVICE void operator()(__m256 &valueUpdateGate, __m256 &gradUpdateGate, - __m256 &valueFrameState, __m256 &gradFrameState, - __m256 &valuePrevOut, __m256 &gradPrevOut, - __m256 &gradOutput, activation_mode_t actInput) { - gradUpdateGate = _mm256_mul_ps(gradOutput, valueFrameState); - gradUpdateGate = - _mm256_sub_ps(gradUpdateGate, _mm256_mul_ps(gradOutput, valuePrevOut)); - gradPrevOut = _mm256_add_ps( - _mm256_sub_ps(gradPrevOut, _mm256_mul_ps(gradOutput, valueUpdateGate)), - gradOutput); - gradFrameState = activation(_mm256_mul_ps(gradOutput, valueUpdateGate), - valueFrameState, actInput); + HOSTDEVICE void operator()(__m256 &value_update_gate, + __m256 &grad_update_gate, + __m256 &value_frame_state, + __m256 &grad_frame_state, __m256 &value_prev_out, + __m256 &grad_prev_out, __m256 &grad_output, + activation_mode_t act_input) { + grad_update_gate = _mm256_mul_ps(grad_output, value_frame_state); + grad_update_gate = _mm256_sub_ps( + grad_update_gate, _mm256_mul_ps(grad_output, value_prev_out)); + grad_prev_out = _mm256_add_ps( + _mm256_sub_ps(grad_prev_out, + _mm256_mul_ps(grad_output, value_update_gate)), + grad_output); + grad_frame_state = activation(_mm256_mul_ps(grad_output, value_update_gate), + value_frame_state, act_input); } #endif #endif @@ -118,30 +125,32 @@ class gru_stateGrad { template class gru_resetGrad { public: - HOSTDEVICE void operator()(T &valueUpdateGate, T &gradUpdateGate, - T &valueResetGate, T &gradResetGate, - T &valuePrevOut, T &gradPrevOut, - T &gradResetOutput, activation_mode_t actGate) { - gradResetGate = (gradResetOutput * valuePrevOut); - gradPrevOut += (gradResetOutput * valueResetGate); - gradUpdateGate = activation(gradUpdateGate, valueUpdateGate, actGate); - gradResetGate = activation(gradResetGate, valueResetGate, actGate); + HOSTDEVICE void operator()(T &value_update_gate, T &grad_update_gate, + T &value_reset_gate, T &grad_reset_gate, + T &value_prev_out, T &grad_prev_out, + T &grad_reset_output, activation_mode_t act_gate) { + grad_reset_gate = (grad_reset_output * value_prev_out); + grad_prev_out += (grad_reset_output * value_reset_gate); + grad_update_gate = + activation(grad_update_gate, value_update_gate, act_gate); + grad_reset_gate = activation(grad_reset_gate, value_reset_gate, act_gate); } #ifndef __NVCC__ #ifndef __AVX__ static const bool avx = false; #else static const bool avx = true; - HOSTDEVICE void operator()(__m256 &valueUpdateGate, __m256 &gradUpdateGate, - __m256 &valueResetGate, __m256 &gradResetGate, - __m256 &valuePrevOut, __m256 &gradPrevOut, - __m256 &gradResetOutput, - activation_mode_t actGate) { - gradResetGate = _mm256_mul_ps(gradResetOutput, valuePrevOut); - gradPrevOut = _mm256_add_ps(gradPrevOut, - _mm256_mul_ps(gradResetOutput, valueResetGate)); - gradUpdateGate = activation(gradUpdateGate, valueUpdateGate, actGate); - gradResetGate = activation(gradResetGate, valueResetGate, actGate); + HOSTDEVICE void operator()(__m256 &value_update_gate, + __m256 &grad_update_gate, __m256 &value_reset_gate, + __m256 &grad_reset_gate, __m256 &value_prev_out, + __m256 &grad_prev_out, __m256 &grad_reset_output, + activation_mode_t act_gate) { + grad_reset_gate = _mm256_mul_ps(grad_reset_output, value_prev_out); + grad_prev_out = _mm256_add_ps( + grad_prev_out, _mm256_mul_ps(grad_reset_output, value_reset_gate)); + grad_update_gate = + activation(grad_update_gate, value_update_gate, act_gate); + grad_reset_gate = activation(grad_reset_gate, value_reset_gate, act_gate); } #endif #endif diff --git a/paddle/operators/math/detail/lstm_cpu_kernel.h b/paddle/operators/math/detail/lstm_cpu_kernel.h index fc3ad0ce58aa1552ef7e717fb529c2d454b4895a..a734ad31eea4816e952641bad73776d93d8c8d34 100644 --- a/paddle/operators/math/detail/lstm_cpu_kernel.h +++ b/paddle/operators/math/detail/lstm_cpu_kernel.h @@ -26,278 +26,284 @@ namespace detail { template void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, - int frameSize, + int frame_size, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { - T rValueIn; - T rValueIg; - T rValueFg; - T rValueOg; - T rCheckI; - T rCheckF; - T rCheckO; - T rState; - T rPrevState = 0; - T rStateAtv; - T rOut; - - T *valueIn = value.gateValue; - T *valueIg = value.gateValue + frameSize; - T *valueFg = value.gateValue + frameSize * 2; - T *valueOg = value.gateValue + frameSize * 3; - - for (int i = 0; i < frameSize; i++) { - rValueIn = valueIn[i]; - rValueIg = valueIg[i]; - rValueFg = valueFg[i]; - rValueOg = valueOg[i]; - rCheckI = value.checkIg ? value.checkIg[i] : 0; - rCheckF = value.checkFg ? value.checkFg[i] : 0; - rCheckO = value.checkOg ? value.checkOg[i] : 0; - - if (value.prevStateValue) { - rPrevState = value.prevStateValue[i]; + T r_value_in; + T r_value_ig; + T r_value_fg; + T r_value_og; + T r_checkI; + T r_checkF; + T r_checkO; + T r_state; + T r_prev_state = 0; + T r_state_atv; + T r_out; + + T *value_in = value.gate_value; + T *value_ig = value.gate_value + frame_size; + T *value_fg = value.gate_value + frame_size * 2; + T *value_og = value.gate_value + frame_size * 3; + + for (int i = 0; i < frame_size; i++) { + r_value_in = value_in[i]; + r_value_ig = value_ig[i]; + r_value_fg = value_fg[i]; + r_value_og = value_og[i]; + r_checkI = value.check_ig ? value.check_ig[i] : 0; + r_checkF = value.check_fg ? value.check_fg[i] : 0; + r_checkO = value.check_og ? value.check_og[i] : 0; + + if (value.prev_state_value) { + r_prev_state = value.prev_state_value[i]; } - op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, - rOut, rCheckI, rCheckF, rCheckO, active_node, active_gate, active_state); - - valueIn[i] = rValueIn; - valueIg[i] = rValueIg; - valueFg[i] = rValueFg; - valueOg[i] = rValueOg; - value.stateValue[i] = rState; - value.stateActiveValue[i] = rStateAtv; - value.outputValue[i] = rOut; + op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_prev_state, r_state, + r_state_atv, r_out, r_checkI, r_checkF, r_checkO, active_node, + active_gate, active_state); + + value_in[i] = r_value_in; + value_ig[i] = r_value_ig; + value_fg[i] = r_value_fg; + value_og[i] = r_value_og; + value.state_value[i] = r_state; + value.state_active_value[i] = r_state_atv; + value.output_value[i] = r_out; } } template void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, - LstmMetaGrad grad, int frameSize, + LstmMetaGrad grad, int frame_size, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { - T rValueIn; - T rValueIg; - T rValueFg; - T rValueOg; - T rGradIn; - T rGradIg; - T rGradFg; - T rGradOg; - T rPrevState = 0; - T rPrevStateGrad; - T rState; - T rStateGrad; - T rStateAtv; - T rOutputGrad; - T rCheckI; - T rCheckF; - T rCheckO; - T rCheckIGrad; - T rCheckFGrad; - T rCheckOGrad; - - T *valueIn = value.gateValue; - T *valueIg = value.gateValue + frameSize; - T *valueFg = value.gateValue + frameSize * 2; - T *valueOg = value.gateValue + frameSize * 3; - T *gradIn = grad.gateGrad; - T *gradIg = grad.gateGrad + frameSize; - T *gradFg = grad.gateGrad + frameSize * 2; - T *gradOg = grad.gateGrad + frameSize * 3; - - for (int i = 0; i < frameSize; i++) { - rValueIn = valueIn[i]; - rValueIg = valueIg[i]; - rValueFg = valueFg[i]; - rValueOg = valueOg[i]; - rCheckI = value.checkIg ? value.checkIg[i] : 0; - rCheckF = value.checkFg ? value.checkFg[i] : 0; - rCheckO = value.checkOg ? value.checkOg[i] : 0; - rState = value.stateValue[i]; - rStateAtv = value.stateActiveValue[i]; - rOutputGrad = grad.outputGrad[i]; - rStateGrad = grad.stateGrad[i]; - if (value.prevStateValue) { - rPrevState = value.prevStateValue[i]; + T r_value_in; + T r_value_ig; + T r_value_fg; + T r_value_og; + T r_grad_in; + T r_grad_ig; + T r_grad_fg; + T r_grad_og; + T r_prev_state = 0; + T r_prev_state_grad; + T r_state; + T r_state_grad; + T r_state_atv; + T r_output_grad; + T r_checkI; + T r_checkF; + T r_checkO; + T r_checkIGrad; + T r_checkFGrad; + T r_checkOGrad; + + T *value_in = value.gate_value; + T *value_ig = value.gate_value + frame_size; + T *value_fg = value.gate_value + frame_size * 2; + T *value_og = value.gate_value + frame_size * 3; + T *grad_in = grad.gate_grad; + T *grad_ig = grad.gate_grad + frame_size; + T *grad_fg = grad.gate_grad + frame_size * 2; + T *grad_og = grad.gate_grad + frame_size * 3; + + for (int i = 0; i < frame_size; i++) { + r_value_in = value_in[i]; + r_value_ig = value_ig[i]; + r_value_fg = value_fg[i]; + r_value_og = value_og[i]; + r_checkI = value.check_ig ? value.check_ig[i] : 0; + r_checkF = value.check_fg ? value.check_fg[i] : 0; + r_checkO = value.check_og ? value.check_og[i] : 0; + r_state = value.state_value[i]; + r_state_atv = value.state_active_value[i]; + r_output_grad = grad.output_grad[i]; + r_state_grad = grad.state_grad[i]; + if (value.prev_state_value) { + r_prev_state = value.prev_state_value[i]; } - op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, - rGradOg, rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, - rOutputGrad, rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, - rCheckOGrad, active_node, active_gate, active_state); - - gradIn[i] = rGradIn; - gradIg[i] = rGradIg; - gradFg[i] = rGradFg; - gradOg[i] = rGradOg; - grad.stateGrad[i] = rStateGrad; - - if (grad.prevStateGrad) grad.prevStateGrad[i] = rPrevStateGrad; - if (value.prevStateValue) { - if (grad.checkIgGrad) grad.checkIgGrad[i] += rCheckIGrad; - if (grad.checkFgGrad) grad.checkFgGrad[i] += rCheckFGrad; + op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_grad_in, r_grad_ig, + r_grad_fg, r_grad_og, r_prev_state, r_prev_state_grad, r_state, + r_state_grad, r_state_atv, r_output_grad, r_checkI, r_checkF, r_checkO, + r_checkIGrad, r_checkFGrad, r_checkOGrad, active_node, active_gate, + active_state); + + grad_in[i] = r_grad_in; + grad_ig[i] = r_grad_ig; + grad_fg[i] = r_grad_fg; + grad_og[i] = r_grad_og; + grad.state_grad[i] = r_state_grad; + + if (grad.prev_state_grad) grad.prev_state_grad[i] = r_prev_state_grad; + if (value.prev_state_value) { + if (grad.check_ig_grad) grad.check_ig_grad[i] += r_checkIGrad; + if (grad.check_fg_grad) grad.check_fg_grad[i] += r_checkFGrad; } - if (grad.checkOgGrad) grad.checkOgGrad[i] += rCheckOGrad; + if (grad.check_og_grad) grad.check_og_grad[i] += r_checkOGrad; } } template -void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, int frameSize, +void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, + int frame_size, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { #ifdef __AVX__ - __m256 rValueIn; - __m256 rValueIg; - __m256 rValueFg; - __m256 rValueOg; - __m256 rCheckI = _mm256_set1_ps(0.0f); - __m256 rCheckF = _mm256_set1_ps(0.0f); - __m256 rCheckO = _mm256_set1_ps(0.0f); - __m256 rState; - __m256 rPrevState = _mm256_set1_ps(0.0f); - __m256 rStateAtv; - __m256 rOut; - - __m256 *valueIn = (__m256 *)value.gateValue; - __m256 *valueIg = (__m256 *)(value.gateValue + frameSize); - __m256 *valueFg = (__m256 *)(value.gateValue + frameSize * 2); - __m256 *valueOg = (__m256 *)(value.gateValue + frameSize * 3); - - for (int i = 0; i < frameSize / 8; i++) { - rValueIn = valueIn[i]; - rValueIg = valueIg[i]; - rValueFg = valueFg[i]; - rValueOg = valueOg[i]; - if (value.checkIg) { - rCheckI = ((__m256 *)value.checkIg)[i]; - rCheckF = ((__m256 *)value.checkFg)[i]; - rCheckO = ((__m256 *)value.checkOg)[i]; + __m256 r_value_in; + __m256 r_value_ig; + __m256 r_value_fg; + __m256 r_value_og; + __m256 r_checkI = _mm256_set1_ps(0.0f); + __m256 r_checkF = _mm256_set1_ps(0.0f); + __m256 r_checkO = _mm256_set1_ps(0.0f); + __m256 r_state; + __m256 r_prev_state = _mm256_set1_ps(0.0f); + __m256 r_state_atv; + __m256 r_out; + + __m256 *value_in = (__m256 *)value.gate_value; + __m256 *value_ig = (__m256 *)(value.gate_value + frame_size); + __m256 *value_fg = (__m256 *)(value.gate_value + frame_size * 2); + __m256 *value_og = (__m256 *)(value.gate_value + frame_size * 3); + + for (int i = 0; i < frame_size / 8; i++) { + r_value_in = value_in[i]; + r_value_ig = value_ig[i]; + r_value_fg = value_fg[i]; + r_value_og = value_og[i]; + if (value.check_ig) { + r_checkI = ((__m256 *)value.check_ig)[i]; + r_checkF = ((__m256 *)value.check_fg)[i]; + r_checkO = ((__m256 *)value.check_og)[i]; } - if (value.prevStateValue) { - rPrevState = ((__m256 *)value.prevStateValue)[i]; + if (value.prev_state_value) { + r_prev_state = ((__m256 *)value.prev_state_value)[i]; } - op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, - rOut, rCheckI, rCheckF, rCheckO, active_node, active_gate, active_state); - - valueIn[i] = rValueIn; - valueIg[i] = rValueIg; - valueFg[i] = rValueFg; - valueOg[i] = rValueOg; - ((__m256 *)value.stateValue)[i] = rState; - ((__m256 *)value.stateActiveValue)[i] = rStateAtv; - ((__m256 *)value.outputValue)[i] = rOut; + op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_prev_state, r_state, + r_state_atv, r_out, r_checkI, r_checkF, r_checkO, active_node, + active_gate, active_state); + + value_in[i] = r_value_in; + value_ig[i] = r_value_ig; + value_fg[i] = r_value_fg; + value_og[i] = r_value_og; + ((__m256 *)value.state_value)[i] = r_state; + ((__m256 *)value.state_active_value)[i] = r_state_atv; + ((__m256 *)value.output_value)[i] = r_out; } #endif } template void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, - LstmMetaGrad grad, int frameSize, + LstmMetaGrad grad, int frame_size, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { #ifdef __AVX__ - __m256 rValueIn; - __m256 rValueIg; - __m256 rValueFg; - __m256 rValueOg; - __m256 rGradIn; - __m256 rGradIg; - __m256 rGradFg; - __m256 rGradOg; - __m256 rPrevState = _mm256_set1_ps(0.0f); - __m256 rPrevStateGrad; - __m256 rStateGrad; - __m256 rState; - __m256 rStateAtv; - __m256 rOutputGrad; - __m256 rCheckI = _mm256_set1_ps(0.0f); - __m256 rCheckF = _mm256_set1_ps(0.0f); - __m256 rCheckO = _mm256_set1_ps(0.0f); - __m256 rCheckIGrad; - __m256 rCheckFGrad; - __m256 rCheckOGrad; - - __m256 *valueIn = (__m256 *)value.gateValue; - __m256 *valueIg = (__m256 *)(value.gateValue + frameSize); - __m256 *valueFg = (__m256 *)(value.gateValue + frameSize * 2); - __m256 *valueOg = (__m256 *)(value.gateValue + frameSize * 3); - __m256 *gradIn = (__m256 *)grad.gateGrad; - __m256 *gradIg = (__m256 *)(grad.gateGrad + frameSize); - __m256 *gradFg = (__m256 *)(grad.gateGrad + frameSize * 2); - __m256 *gradOg = (__m256 *)(grad.gateGrad + frameSize * 3); - - for (int i = 0; i < frameSize / 8; i++) { - rValueIn = valueIn[i]; - rValueIg = valueIg[i]; - rValueFg = valueFg[i]; - rValueOg = valueOg[i]; - if (value.checkIg) { - rCheckI = ((__m256 *)value.checkIg)[i]; - rCheckF = ((__m256 *)value.checkFg)[i]; - rCheckO = ((__m256 *)value.checkOg)[i]; + __m256 r_value_in; + __m256 r_value_ig; + __m256 r_value_fg; + __m256 r_value_og; + __m256 r_grad_in; + __m256 r_grad_ig; + __m256 r_grad_fg; + __m256 r_grad_og; + __m256 r_prev_state = _mm256_set1_ps(0.0f); + __m256 r_prev_state_grad; + __m256 r_state_grad; + __m256 r_state; + __m256 r_state_atv; + __m256 r_output_grad; + __m256 r_checkI = _mm256_set1_ps(0.0f); + __m256 r_checkF = _mm256_set1_ps(0.0f); + __m256 r_checkO = _mm256_set1_ps(0.0f); + __m256 r_checkIGrad; + __m256 r_checkFGrad; + __m256 r_checkOGrad; + + __m256 *value_in = (__m256 *)value.gate_value; + __m256 *value_ig = (__m256 *)(value.gate_value + frame_size); + __m256 *value_fg = (__m256 *)(value.gate_value + frame_size * 2); + __m256 *value_og = (__m256 *)(value.gate_value + frame_size * 3); + __m256 *grad_in = (__m256 *)grad.gate_grad; + __m256 *grad_ig = (__m256 *)(grad.gate_grad + frame_size); + __m256 *grad_fg = (__m256 *)(grad.gate_grad + frame_size * 2); + __m256 *grad_og = (__m256 *)(grad.gate_grad + frame_size * 3); + + for (int i = 0; i < frame_size / 8; i++) { + r_value_in = value_in[i]; + r_value_ig = value_ig[i]; + r_value_fg = value_fg[i]; + r_value_og = value_og[i]; + if (value.check_ig) { + r_checkI = ((__m256 *)value.check_ig)[i]; + r_checkF = ((__m256 *)value.check_fg)[i]; + r_checkO = ((__m256 *)value.check_og)[i]; } - rState = ((__m256 *)value.stateValue)[i]; - rStateAtv = ((__m256 *)value.stateActiveValue)[i]; - rOutputGrad = ((__m256 *)grad.outputGrad)[i]; - rStateGrad = ((__m256 *)grad.stateGrad)[i]; - if (value.prevStateValue) { - rPrevState = ((__m256 *)value.prevStateValue)[i]; + r_state = ((__m256 *)value.state_value)[i]; + r_state_atv = ((__m256 *)value.state_active_value)[i]; + r_output_grad = ((__m256 *)grad.output_grad)[i]; + r_state_grad = ((__m256 *)grad.state_grad)[i]; + if (value.prev_state_value) { + r_prev_state = ((__m256 *)value.prev_state_value)[i]; } - op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, - rGradOg, rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, - rOutputGrad, rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, - rCheckOGrad, active_node, active_gate, active_state); - - gradIn[i] = rGradIn; - gradIg[i] = rGradIg; - gradFg[i] = rGradFg; - gradOg[i] = rGradOg; - ((__m256 *)grad.stateGrad)[i] = rStateGrad; - - if (grad.prevStateGrad) ((__m256 *)grad.prevStateGrad)[i] = rPrevStateGrad; - if (value.prevStateValue) { - if (grad.checkIgGrad) ((__m256 *)grad.checkIgGrad)[i] += rCheckIGrad; - if (grad.checkFgGrad) ((__m256 *)grad.checkFgGrad)[i] += rCheckFGrad; + op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_grad_in, r_grad_ig, + r_grad_fg, r_grad_og, r_prev_state, r_prev_state_grad, r_state, + r_state_grad, r_state_atv, r_output_grad, r_checkI, r_checkF, r_checkO, + r_checkIGrad, r_checkFGrad, r_checkOGrad, active_node, active_gate, + active_state); + + grad_in[i] = r_grad_in; + grad_ig[i] = r_grad_ig; + grad_fg[i] = r_grad_fg; + grad_og[i] = r_grad_og; + ((__m256 *)grad.state_grad)[i] = r_state_grad; + + if (grad.prev_state_grad) + ((__m256 *)grad.prev_state_grad)[i] = r_prev_state_grad; + if (value.prev_state_value) { + if (grad.check_ig_grad) ((__m256 *)grad.check_ig_grad)[i] += r_checkIGrad; + if (grad.check_fg_grad) ((__m256 *)grad.check_fg_grad)[i] += r_checkFGrad; } - if (grad.checkOgGrad) ((__m256 *)grad.checkOgGrad)[i] += rCheckOGrad; + if (grad.check_og_grad) ((__m256 *)grad.check_og_grad)[i] += r_checkOGrad; } #endif } template -void cpu_lstm_forward(Op op, LstmMetaValue value, int frameSize, +void cpu_lstm_forward(Op op, LstmMetaValue value, int frame_size, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { - if (Op::avx && !(frameSize & (8 - 1)) && (std::is_same::value)) { - avx_lstm_forward_one_sequence(op, value, frameSize, active_node, + if (Op::avx && !(frame_size & (8 - 1)) && (std::is_same::value)) { + avx_lstm_forward_one_sequence(op, value, frame_size, active_node, active_gate, active_state); } else { - naive_lstm_forward_one_sequence(op, value, frameSize, active_node, + naive_lstm_forward_one_sequence(op, value, frame_size, active_node, active_gate, active_state); } } template void cpu_lstm_backward(Op op, LstmMetaValue value, LstmMetaGrad grad, - int frameSize, activation_mode_t active_node, + int frame_size, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { - if (Op::avx && !(frameSize & (8 - 1)) && (std::is_same::value)) { - avx_lstm_backward_one_sequence(op, value, grad, frameSize, active_node, + if (Op::avx && !(frame_size & (8 - 1)) && (std::is_same::value)) { + avx_lstm_backward_one_sequence(op, value, grad, frame_size, active_node, active_gate, active_state); } else { - naive_lstm_backward_one_sequence(op, value, grad, frameSize, active_node, - active_gate, active_state); + naive_lstm_backward_one_sequence(op, value, grad, frame_size, + active_node, active_gate, active_state); } } diff --git a/paddle/operators/math/detail/lstm_gpu_kernel.h b/paddle/operators/math/detail/lstm_gpu_kernel.h index d138bbe411f69929a14ad19af3e84824ac7a5d58..91bfedea53a2600156c9025f6ff3615d695a712b 100644 --- a/paddle/operators/math/detail/lstm_gpu_kernel.h +++ b/paddle/operators/math/detail/lstm_gpu_kernel.h @@ -26,189 +26,192 @@ namespace math { namespace detail { /* - * threads(framePerBlock, batchPerBlock) - * grid(frameBlocks, batchBlocks) + * threads(frame_per_block, batch_per_block) + * grid(frame_blocks, batch_blocks) */ -template -__global__ void KeLstmForward(Op op, LstmMetaValue value, int frameSize, - int batchSize, activation_mode_t active_node, +template +__global__ void KeLstmForward(Op op, LstmMetaValue value, int frame_size, + int batch_size, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { - const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; - if (frameIdx >= frameSize) return; - - int batchIdx = 0; - if (isBatch) { - batchIdx = blockIdx.y * blockDim.y + threadIdx.y; - if (batchIdx >= batchSize) return; - value.gateValue += batchIdx * frameSize * 4; - value.outputValue += batchIdx * frameSize; - value.stateValue += batchIdx * frameSize; - value.stateActiveValue += batchIdx * frameSize; + const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (frame_idx >= frame_size) return; + + int batch_idx = 0; + if (is_batch) { + batch_idx = blockIdx.y * blockDim.y + threadIdx.y; + if (batch_idx >= batch_size) return; + value.gate_value += batch_idx * frame_size * 4; + value.output_value += batch_idx * frame_size; + value.state_value += batch_idx * frame_size; + value.state_active_value += batch_idx * frame_size; } - T rState; - T rPrevState = 0; - T rStateAtv; - T rOut; - T rValueIn; - T rValueIg; - T rValueFg; - T rValueOg; - - T rCheckI = value.checkIg ? value.checkIg[frameIdx] : 0; - T rCheckF = value.checkFg ? value.checkFg[frameIdx] : 0; - T rCheckO = value.checkOg ? value.checkOg[frameIdx] : 0; - - rValueIn = value.gateValue[frameIdx]; - rValueIg = value.gateValue[frameIdx + frameSize]; - rValueFg = value.gateValue[frameIdx + frameSize * 2]; - rValueOg = value.gateValue[frameIdx + frameSize * 3]; - - if (value.prevStateValue) { - if (isBatch) value.prevStateValue += batchIdx * frameSize; - rPrevState = value.prevStateValue[frameIdx]; + T r_state; + T r_prev_state = 0; + T r_state_atv; + T r_out; + T r_value_in; + T r_value_ig; + T r_value_fg; + T r_value_og; + + T r_checkI = value.check_ig ? value.check_ig[frame_idx] : 0; + T r_checkF = value.check_fg ? value.check_fg[frame_idx] : 0; + T r_checkO = value.check_og ? value.check_og[frame_idx] : 0; + + r_value_in = value.gate_value[frame_idx]; + r_value_ig = value.gate_value[frame_idx + frame_size]; + r_value_fg = value.gate_value[frame_idx + frame_size * 2]; + r_value_og = value.gate_value[frame_idx + frame_size * 3]; + + if (value.prev_state_value) { + if (is_batch) value.prev_state_value += batch_idx * frame_size; + r_prev_state = value.prev_state_value[frame_idx]; } - op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, - rOut, rCheckI, rCheckF, rCheckO, active_node, active_gate, active_state); + op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_prev_state, r_state, + r_state_atv, r_out, r_checkI, r_checkF, r_checkO, active_node, active_gate, + active_state); - value.gateValue[frameIdx] = rValueIn; - value.gateValue[frameIdx + frameSize] = rValueIg; - value.gateValue[frameIdx + frameSize * 2] = rValueFg; - value.gateValue[frameIdx + frameSize * 3] = rValueOg; + value.gate_value[frame_idx] = r_value_in; + value.gate_value[frame_idx + frame_size] = r_value_ig; + value.gate_value[frame_idx + frame_size * 2] = r_value_fg; + value.gate_value[frame_idx + frame_size * 3] = r_value_og; - value.stateValue[frameIdx] = rState; - value.stateActiveValue[frameIdx] = rStateAtv; - value.outputValue[frameIdx] = rOut; + value.state_value[frame_idx] = r_state; + value.state_active_value[frame_idx] = r_state_atv; + value.output_value[frame_idx] = r_out; } /* - * threads(framePerBlock, batchPerBlock) - * grid(frameBlocks, batchBlocks) + * threads(frame_per_block, batch_per_block) + * grid(frame_blocks, batch_blocks) */ -template +template __global__ void KeLstmBackward(Op op, LstmMetaValue value, - LstmMetaGrad grad, int frameSize, - int batchSize, activation_mode_t active_node, + LstmMetaGrad grad, int frame_size, + int batch_size, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { - const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; - if (frameIdx >= frameSize) return; - - int batchIdx = 0; - if (isBatch) { - batchIdx = blockIdx.y * blockDim.y + threadIdx.y; - if (batchIdx >= batchSize) return; - value.gateValue += batchIdx * frameSize * 4; - value.stateValue += batchIdx * frameSize; - value.stateActiveValue += batchIdx * frameSize; - grad.gateGrad += batchIdx * frameSize * 4; - grad.stateGrad += batchIdx * frameSize; - grad.outputGrad += batchIdx * frameSize; + const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (frame_idx >= frame_size) return; + + int batch_idx = 0; + if (is_batch) { + batch_idx = blockIdx.y * blockDim.y + threadIdx.y; + if (batch_idx >= batch_size) return; + value.gate_value += batch_idx * frame_size * 4; + value.state_value += batch_idx * frame_size; + value.state_active_value += batch_idx * frame_size; + grad.gate_grad += batch_idx * frame_size * 4; + grad.state_grad += batch_idx * frame_size; + grad.output_grad += batch_idx * frame_size; } - T rValueIn; - T rValueIg; - T rValueFg; - T rValueOg; - T rGradIn; - T rGradIg; - T rGradFg; - T rGradOg; - T rPrevState = 0; - T rPrevStateGrad; - T rState; - T rStateGrad; - T rStateAtv; - T rOutputGrad; - T rCheckI = value.checkIg ? value.checkIg[frameIdx] : 0; - T rCheckF = value.checkFg ? value.checkFg[frameIdx] : 0; - T rCheckO = value.checkOg ? value.checkOg[frameIdx] : 0; - - T rCheckIGrad; - T rCheckFGrad; - T rCheckOGrad; - - rValueIn = value.gateValue[frameIdx]; - rValueIg = value.gateValue[frameIdx + frameSize]; - rValueFg = value.gateValue[frameIdx + frameSize * 2]; - rValueOg = value.gateValue[frameIdx + frameSize * 3]; - rState = value.stateValue[frameIdx]; - rStateAtv = value.stateActiveValue[frameIdx]; - rOutputGrad = grad.outputGrad[frameIdx]; - rStateGrad = grad.stateGrad[frameIdx]; - - if (value.prevStateValue) { - if (isBatch) value.prevStateValue += batchIdx * frameSize; - rPrevState = value.prevStateValue[frameIdx]; + T r_value_in; + T r_value_ig; + T r_value_fg; + T r_value_og; + T r_grad_in; + T r_grad_ig; + T r_grad_fg; + T r_grad_og; + T r_prev_state = 0; + T r_prev_state_grad; + T r_state; + T r_state_grad; + T r_state_atv; + T r_output_grad; + T r_checkI = value.check_ig ? value.check_ig[frame_idx] : 0; + T r_checkF = value.check_fg ? value.check_fg[frame_idx] : 0; + T r_checkO = value.check_og ? value.check_og[frame_idx] : 0; + + T r_checkIGrad; + T r_checkFGrad; + T r_checkOGrad; + + r_value_in = value.gate_value[frame_idx]; + r_value_ig = value.gate_value[frame_idx + frame_size]; + r_value_fg = value.gate_value[frame_idx + frame_size * 2]; + r_value_og = value.gate_value[frame_idx + frame_size * 3]; + r_state = value.state_value[frame_idx]; + r_state_atv = value.state_active_value[frame_idx]; + r_output_grad = grad.output_grad[frame_idx]; + r_state_grad = grad.state_grad[frame_idx]; + + if (value.prev_state_value) { + if (is_batch) value.prev_state_value += batch_idx * frame_size; + r_prev_state = value.prev_state_value[frame_idx]; } - op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, rGradOg, - rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, rOutputGrad, - rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, rCheckOGrad, - active_node, active_gate, active_state); - - grad.gateGrad[frameIdx] = rGradIn; - grad.gateGrad[frameIdx + frameSize] = rGradIg; - grad.gateGrad[frameIdx + frameSize * 2] = rGradFg; - grad.gateGrad[frameIdx + frameSize * 3] = rGradOg; - grad.stateGrad[frameIdx] = rStateGrad; - if (grad.prevStateGrad) { - if (isBatch) grad.prevStateGrad += batchIdx * frameSize; - grad.prevStateGrad[frameIdx] = rPrevStateGrad; + op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_grad_in, r_grad_ig, + r_grad_fg, r_grad_og, r_prev_state, r_prev_state_grad, r_state, + r_state_grad, r_state_atv, r_output_grad, r_checkI, r_checkF, r_checkO, + r_checkIGrad, r_checkFGrad, r_checkOGrad, active_node, active_gate, + active_state); + + grad.gate_grad[frame_idx] = r_grad_in; + grad.gate_grad[frame_idx + frame_size] = r_grad_ig; + grad.gate_grad[frame_idx + frame_size * 2] = r_grad_fg; + grad.gate_grad[frame_idx + frame_size * 3] = r_grad_og; + grad.state_grad[frame_idx] = r_state_grad; + if (grad.prev_state_grad) { + if (is_batch) grad.prev_state_grad += batch_idx * frame_size; + grad.prev_state_grad[frame_idx] = r_prev_state_grad; } - if (isBatch) { - if (value.prevStateValue) { - if (grad.checkIgGrad) - paddle::platform::CudaAtomicAdd(grad.checkIgGrad + frameIdx, - rCheckIGrad); - if (grad.checkFgGrad) - paddle::platform::CudaAtomicAdd(grad.checkFgGrad + frameIdx, - rCheckFGrad); + if (is_batch) { + if (value.prev_state_value) { + if (grad.check_ig_grad) + paddle::platform::CudaAtomicAdd(grad.check_ig_grad + frame_idx, + r_checkIGrad); + if (grad.check_fg_grad) + paddle::platform::CudaAtomicAdd(grad.check_fg_grad + frame_idx, + r_checkFGrad); } - if (grad.checkOgGrad) - paddle::platform::CudaAtomicAdd(grad.checkOgGrad + frameIdx, rCheckOGrad); + if (grad.check_og_grad) + paddle::platform::CudaAtomicAdd(grad.check_og_grad + frame_idx, + r_checkOGrad); } else { - if (value.prevStateValue) { - if (grad.checkIgGrad) grad.checkIgGrad[frameIdx] += rCheckIGrad; - if (grad.checkFgGrad) grad.checkFgGrad[frameIdx] += rCheckFGrad; + if (value.prev_state_value) { + if (grad.check_ig_grad) grad.check_ig_grad[frame_idx] += r_checkIGrad; + if (grad.check_fg_grad) grad.check_fg_grad[frame_idx] += r_checkFGrad; } - if (grad.checkOgGrad) grad.checkOgGrad[frameIdx] += rCheckOGrad; + if (grad.check_og_grad) grad.check_og_grad[frame_idx] += r_checkOGrad; } } template void gpu_lstm_forward(const platform::DeviceContext& context, Op op, - LstmMetaValue value, int frameSize, int batchSize, + LstmMetaValue value, int frame_size, int batch_size, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { dim3 threads; dim3 grid; - if (batchSize == 1) { - int framePerBlock = frameSize <= 1024 ? frameSize : 1024; - int frameBlocks = (frameSize + 1024 - 1) / 1024; - threads = dim3(framePerBlock, 1); - grid = dim3(frameBlocks, 1); + if (batch_size == 1) { + int frame_per_block = frame_size <= 1024 ? frame_size : 1024; + int frame_blocks = (frame_size + 1024 - 1) / 1024; + threads = dim3(frame_per_block, 1); + grid = dim3(frame_blocks, 1); } else { - /* framePerBlock = 32 batchPerBlock = 32 */ + /* frame_per_block = 32 batch_per_block = 32 */ threads = dim3(32, 32); - grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); + grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32); } auto stream = reinterpret_cast(context).stream(); - if (batchSize == 1) { + if (batch_size == 1) { KeLstmForward<<>>( - op, value, frameSize, batchSize, active_node, active_gate, + /* is_batch= */ false><<>>( + op, value, frame_size, batch_size, active_node, active_gate, active_state); } else { KeLstmForward<<>>( - op, value, frameSize, batchSize, active_node, active_gate, + /* is_batch= */ true><<>>( + op, value, frame_size, batch_size, active_node, active_gate, active_state); } } @@ -216,34 +219,34 @@ void gpu_lstm_forward(const platform::DeviceContext& context, Op op, template void gpu_lstm_backward(const platform::DeviceContext& context, Op op, LstmMetaValue value, LstmMetaGrad grad, - int frameSize, int batchSize, + int frame_size, int batch_size, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { dim3 threads; dim3 grid; - if (batchSize == 1) { - int framePerBlock = frameSize <= 1024 ? frameSize : 1024; - int frameBlocks = (frameSize + 1024 - 1) / 1024; - threads = dim3(framePerBlock, 1); - grid = dim3(frameBlocks, 1); + if (batch_size == 1) { + int frame_per_block = frame_size <= 1024 ? frame_size : 1024; + int frame_blocks = (frame_size + 1024 - 1) / 1024; + threads = dim3(frame_per_block, 1); + grid = dim3(frame_blocks, 1); } else { - /* framePerBlock = 32 batchPerBlock = 16 */ + /* frame_per_block = 32 batch_per_block = 16 */ threads = dim3(32, 16); - grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 16 - 1) / 16); + grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 16 - 1) / 16); } auto stream = reinterpret_cast(context).stream(); - if (batchSize == 1) { + if (batch_size == 1) { KeLstmBackward<<>>( - op, value, grad, frameSize, batchSize, active_node, active_gate, + /* is_batch= */ false><<>>( + op, value, grad, frame_size, batch_size, active_node, active_gate, active_state); } else { KeLstmBackward<<>>( - op, value, grad, frameSize, batchSize, active_node, active_gate, + /* is_batch= */ true><<>>( + op, value, grad, frame_size, batch_size, active_node, active_gate, active_state); } } diff --git a/paddle/operators/math/detail/lstm_kernel.h b/paddle/operators/math/detail/lstm_kernel.h index 9daaf91981a8e0252374f528f0e063111bd32675..78f9a249a3d5d413452952edf990975c02f1a369 100644 --- a/paddle/operators/math/detail/lstm_kernel.h +++ b/paddle/operators/math/detail/lstm_kernel.h @@ -27,19 +27,19 @@ namespace forward { template class lstm { public: - HOSTDEVICE void operator()(T &valueIn, T &valueIg, T &valueFg, T &valueOg, - T &prevState, T &state, T &stateAtv, T &output, + HOSTDEVICE void operator()(T &value_in, T &value_ig, T &value_fg, T &value_og, + T &prev_state, T &state, T &state_atv, T &output, T &checkI, T &checkF, T &checkO, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { - valueIn = activation(valueIn, active_node); - valueIg = activation(valueIg + prevState * checkI, active_gate); - valueFg = activation(valueFg + prevState * checkF, active_gate); - state = valueIn * valueIg + prevState * valueFg; - valueOg = activation(valueOg + state * checkO, active_gate); - stateAtv = activation(state, active_state); - output = valueOg * stateAtv; + value_in = activation(value_in, active_node); + value_ig = activation(value_ig + prev_state * checkI, active_gate); + value_fg = activation(value_fg + prev_state * checkF, active_gate); + state = value_in * value_ig + prev_state * value_fg; + value_og = activation(value_og + state * checkO, active_gate); + state_atv = activation(state, active_state); + output = value_og * state_atv; } #ifndef __NVCC__ #ifndef __AVX__ // If not compiled with AVX instructs. Disable AVX by default @@ -48,24 +48,27 @@ class lstm { // Only float support AVX optimization static const bool avx = std::is_same::value; - HOSTDEVICE void operator()(__m256 &valueIn, __m256 &valueIg, __m256 &valueFg, - __m256 &valueOg, __m256 &prevState, __m256 &state, - __m256 &stateAtv, __m256 &output, __m256 &checkI, + HOSTDEVICE void operator()(__m256 &value_in, __m256 &value_ig, + __m256 &value_fg, __m256 &value_og, + __m256 &prev_state, __m256 &state, + __m256 &state_atv, __m256 &output, __m256 &checkI, __m256 &checkF, __m256 &checkO, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { - valueIn = activation(valueIn, active_node); - valueIg = activation( - _mm256_add_ps(valueIg, _mm256_mul_ps(prevState, checkI)), active_gate); - valueFg = activation( - _mm256_add_ps(valueFg, _mm256_mul_ps(prevState, checkF)), active_gate); - state = _mm256_add_ps(_mm256_mul_ps(valueIn, valueIg), - _mm256_mul_ps(prevState, valueFg)); - valueOg = activation(_mm256_add_ps(valueOg, _mm256_mul_ps(state, checkO)), - active_gate); - stateAtv = activation(state, active_state); - output = _mm256_mul_ps(valueOg, stateAtv); + value_in = activation(value_in, active_node); + value_ig = + activation(_mm256_add_ps(value_ig, _mm256_mul_ps(prev_state, checkI)), + active_gate); + value_fg = + activation(_mm256_add_ps(value_fg, _mm256_mul_ps(prev_state, checkF)), + active_gate); + state = _mm256_add_ps(_mm256_mul_ps(value_in, value_ig), + _mm256_mul_ps(prev_state, value_fg)); + value_og = activation(_mm256_add_ps(value_og, _mm256_mul_ps(state, checkO)), + active_gate); + state_atv = activation(state, active_state); + output = _mm256_mul_ps(value_og, state_atv); } #endif #endif @@ -78,25 +81,26 @@ namespace backward { template class lstm { public: - HOSTDEVICE void operator()(T &valueIn, T &valueIg, T &valueFg, T &valueOg, - T &gradIn, T &gradIg, T &gradFg, T &gradOg, - T &prevState, T &prevStateGrad, T &state, - T &stateGrad, T &stateAtv, T &outputGrad, + HOSTDEVICE void operator()(T &value_in, T &value_ig, T &value_fg, T &value_og, + T &grad_in, T &grad_ig, T &grad_fg, T &grad_og, + T &prev_state, T &prev_state_grad, T &state, + T &state_grad, T &state_atv, T &output_grad, T &checkI, T &checkF, T &checkO, T &checkIGrad, T &checkFGrad, T &checkOGrad, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { - gradOg = activation(outputGrad * stateAtv, valueOg, active_gate); - stateGrad += activation(outputGrad * valueOg, stateAtv, active_state) + - gradOg * checkO; - gradIn = activation(stateGrad * valueIg, valueIn, active_node); - gradIg = activation(stateGrad * valueIn, valueIg, active_gate); - gradFg = activation(stateGrad * prevState, valueFg, active_gate); - prevStateGrad = gradIg * checkI + gradFg * checkF + stateGrad * valueFg; - checkIGrad = gradIg * prevState; - checkFGrad = gradFg * prevState; - checkOGrad = gradOg * state; + grad_og = activation(output_grad * state_atv, value_og, active_gate); + state_grad += activation(output_grad * value_og, state_atv, active_state) + + grad_og * checkO; + grad_in = activation(state_grad * value_ig, value_in, active_node); + grad_ig = activation(state_grad * value_in, value_ig, active_gate); + grad_fg = activation(state_grad * prev_state, value_fg, active_gate); + prev_state_grad = + grad_ig * checkI + grad_fg * checkF + state_grad * value_fg; + checkIGrad = grad_ig * prev_state; + checkFGrad = grad_fg * prev_state; + checkOGrad = grad_og * state; } #ifndef __NVCC__ #ifndef __AVX__ // If not compiled with AVX instructs. Disable AVX by default @@ -105,32 +109,32 @@ class lstm { // Only float support AVX optimization static const bool avx = std::is_same::value; HOSTDEVICE void operator()( - __m256 &valueIn, __m256 &valueIg, __m256 &valueFg, __m256 &valueOg, - __m256 &gradIn, __m256 &gradIg, __m256 &gradFg, __m256 &gradOg, - __m256 &prevState, __m256 &prevStateGrad, __m256 &state, - __m256 &stateGrad, __m256 &stateAtv, __m256 &outputGrad, __m256 &checkI, - __m256 &checkF, __m256 &checkO, __m256 &checkIGrad, __m256 &checkFGrad, - __m256 &checkOGrad, activation_mode_t active_node, + __m256 &value_in, __m256 &value_ig, __m256 &value_fg, __m256 &value_og, + __m256 &grad_in, __m256 &grad_ig, __m256 &grad_fg, __m256 &grad_og, + __m256 &prev_state, __m256 &prev_state_grad, __m256 &state, + __m256 &state_grad, __m256 &state_atv, __m256 &output_grad, + __m256 &checkI, __m256 &checkF, __m256 &checkO, __m256 &checkIGrad, + __m256 &checkFGrad, __m256 &checkOGrad, activation_mode_t active_node, activation_mode_t active_gate, activation_mode_t active_state) { - gradOg = - activation(_mm256_mul_ps(outputGrad, stateAtv), valueOg, active_gate); - stateGrad = _mm256_add_ps( - activation(_mm256_mul_ps(outputGrad, valueOg), stateAtv, active_state), - stateGrad); - stateGrad = _mm256_add_ps(_mm256_mul_ps(gradOg, checkO), stateGrad); - gradIn = - activation(_mm256_mul_ps(stateGrad, valueIg), valueIn, active_node); - gradIg = - activation(_mm256_mul_ps(stateGrad, valueIn), valueIg, active_gate); - gradFg = - activation(_mm256_mul_ps(stateGrad, prevState), valueFg, active_gate); - prevStateGrad = _mm256_add_ps(_mm256_mul_ps(gradIg, checkI), - _mm256_mul_ps(gradFg, checkF)); - prevStateGrad = - _mm256_add_ps(_mm256_mul_ps(stateGrad, valueFg), prevStateGrad); - checkIGrad = _mm256_mul_ps(gradIg, prevState); - checkFGrad = _mm256_mul_ps(gradFg, prevState); - checkOGrad = _mm256_mul_ps(gradOg, state); + grad_og = activation(_mm256_mul_ps(output_grad, state_atv), value_og, + active_gate); + state_grad = _mm256_add_ps(activation(_mm256_mul_ps(output_grad, value_og), + state_atv, active_state), + state_grad); + state_grad = _mm256_add_ps(_mm256_mul_ps(grad_og, checkO), state_grad); + grad_in = + activation(_mm256_mul_ps(state_grad, value_ig), value_in, active_node); + grad_ig = + activation(_mm256_mul_ps(state_grad, value_in), value_ig, active_gate); + grad_fg = activation(_mm256_mul_ps(state_grad, prev_state), value_fg, + active_gate); + prev_state_grad = _mm256_add_ps(_mm256_mul_ps(grad_ig, checkI), + _mm256_mul_ps(grad_fg, checkF)); + prev_state_grad = + _mm256_add_ps(_mm256_mul_ps(state_grad, value_fg), prev_state_grad); + checkIGrad = _mm256_mul_ps(grad_ig, prev_state); + checkFGrad = _mm256_mul_ps(grad_fg, prev_state); + checkOGrad = _mm256_mul_ps(grad_og, state); } #endif #endif diff --git a/paddle/operators/math/gru_compute.cc b/paddle/operators/math/gru_compute.cc index 125af449d3f700e24be5e4b7615c3b0e03fd4e5b..d570c68cd458914c8951c4ce50a02e3c5b1acab0 100644 --- a/paddle/operators/math/gru_compute.cc +++ b/paddle/operators/math/gru_compute.cc @@ -19,83 +19,85 @@ namespace operators { namespace math { template -struct GRUUnitFunctor { - static void compute(const platform::DeviceContext &context, - hl_gru_value value, int frameSize, int batchSize, +struct GRUUnitFunctor { + static void compute(const platform::CPUDeviceContext &context, + hl_gru_value value, int frame_size, int batch_size, activation_mode_t active_node, activation_mode_t active_gate) { #ifndef __NVCC__ - if (value.prevOutValue) { - math::gemm( - context, false, false, batchSize, frameSize * 2, frameSize, 1, - value.prevOutValue, frameSize, value.gateWeight, frameSize * 2, 1, - value.gateValue, frameSize * 3); + if (value.prev_out_value) { + math::gemm( + context, false, false, batch_size, frame_size * 2, frame_size, 1, + value.prev_out_value, frame_size, value.gate_weight, frame_size * 2, + 1, value.gate_value, frame_size * 3); } detail::forward_reset_output(detail::forward::gru_resetOutput(), value, - frameSize, batchSize, active_gate); + frame_size, batch_size, active_gate); - if (value.prevOutValue) { - math::gemm( - context, false, false, batchSize, frameSize, frameSize, 1, - value.resetOutputValue, frameSize, value.stateWeight, frameSize, 1, - value.gateValue + frameSize * 2, frameSize * 3); + if (value.prev_out_value) { + math::gemm( + context, false, false, batch_size, frame_size, frame_size, 1, + value.reset_output_value, frame_size, value.state_weight, frame_size, + 1, value.gate_value + frame_size * 2, frame_size * 3); } detail::forward_final_output(detail::forward::gru_finalOutput(), value, - frameSize, batchSize, active_node); + frame_size, batch_size, active_node); #endif } }; template -struct GRUUnitGradFunctor { - static void compute(const platform::DeviceContext &context, - hl_gru_value value, hl_gru_grad grad, int frameSize, - int batchSize, activation_mode_t active_node, +struct GRUUnitGradFunctor { + static void compute(const platform::CPUDeviceContext &context, + hl_gru_value value, hl_gru_grad grad, + int frame_size, int batch_size, + activation_mode_t active_node, activation_mode_t active_gate) { #ifndef __NVCC__ detail::backward_state_grad(detail::backward::gru_stateGrad(), value, - grad, frameSize, batchSize, active_node); + grad, frame_size, batch_size, active_node); - if (value.prevOutValue && grad.prevOutGrad) { - math::gemm( - context, false, true, batchSize, frameSize, frameSize, 1, - grad.gateGrad + frameSize * 2, frameSize * 3, value.stateWeight, - frameSize, 0, grad.resetOutputGrad, frameSize); + if (value.prev_out_value && grad.prev_out_grad) { + math::gemm( + context, false, true, batch_size, frame_size, frame_size, 1, + grad.gate_grad + frame_size * 2, frame_size * 3, value.state_weight, + frame_size, 0, grad.reset_output_grad, frame_size); - if (grad.stateWeightGrad) { - math::gemm( - context, true, false, frameSize, frameSize, batchSize, 1, - value.resetOutputValue, frameSize, grad.gateGrad + frameSize * 2, - frameSize * 3, 1, grad.stateWeightGrad, frameSize); + if (grad.state_weight_grad) { + math::gemm( + context, true, false, frame_size, frame_size, batch_size, 1, + value.reset_output_value, frame_size, + grad.gate_grad + frame_size * 2, frame_size * 3, 1, + grad.state_weight_grad, frame_size); } } detail::backward_reset_grad(detail::backward::gru_resetGrad(), value, - grad, frameSize, batchSize, active_gate); + grad, frame_size, batch_size, active_gate); - if (grad.prevOutGrad && value.prevOutValue) { - math::gemm( - context, false, true, batchSize, frameSize, frameSize * 2, 1, - grad.gateGrad, frameSize * 3, value.gateWeight, frameSize * 2, 1, - grad.prevOutGrad, frameSize); + if (grad.prev_out_grad && value.prev_out_value) { + math::gemm( + context, false, true, batch_size, frame_size, frame_size * 2, 1, + grad.gate_grad, frame_size * 3, value.gate_weight, frame_size * 2, 1, + grad.prev_out_grad, frame_size); - if (grad.gateWeightGrad) { - math::gemm( - context, true, false, frameSize, frameSize * 2, batchSize, 1, - value.prevOutValue, frameSize, grad.gateGrad, frameSize * 3, 1, - grad.gateWeightGrad, frameSize * 2); + if (grad.gate_weight_grad) { + math::gemm( + context, true, false, frame_size, frame_size * 2, batch_size, 1, + value.prev_out_value, frame_size, grad.gate_grad, frame_size * 3, 1, + grad.gate_weight_grad, frame_size * 2); } } #endif } }; -template struct GRUUnitFunctor; -template struct GRUUnitFunctor; -template struct GRUUnitGradFunctor; -template struct GRUUnitGradFunctor; +template struct GRUUnitFunctor; +template struct GRUUnitFunctor; +template struct GRUUnitGradFunctor; +template struct GRUUnitGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/gru_compute.cu b/paddle/operators/math/gru_compute.cu index 7b9e54ac029f6aa00553338435684097d6d02b25..dd518cd1e4bea52f0d463150114feed3ceea0ccb 100644 --- a/paddle/operators/math/gru_compute.cu +++ b/paddle/operators/math/gru_compute.cu @@ -19,159 +19,159 @@ namespace operators { namespace math { template -struct GRUUnitFunctor { - static void compute(const platform::DeviceContext &context, - hl_gru_value value, int frameSize, int batchSize, +struct GRUUnitFunctor { + static void compute(const platform::CUDADeviceContext &context, + hl_gru_value value, int frame_size, int batch_size, activation_mode_t active_node, activation_mode_t active_gate) { - auto stream = - reinterpret_cast(context).stream(); + auto stream = context.stream(); dim3 threads; dim3 grid; - if (batchSize == 1) { - int framePerBlock = frameSize <= 1024 ? frameSize : 1024; - int frameBlocks = (frameSize + 1024 - 1) / 1024; - threads = dim3(framePerBlock, 1); - grid = dim3(frameBlocks, 1); + if (batch_size == 1) { + int frame_per_block = frame_size <= 1024 ? frame_size : 1024; + int frame_blocks = (frame_size + 1024 - 1) / 1024; + threads = dim3(frame_per_block, 1); + grid = dim3(frame_blocks, 1); } else { threads = dim3(32, 32); - grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); + grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32); } - if (value.prevOutValue) { - math::gemm( - context, false, false, batchSize, frameSize * 2, frameSize, 1, - value.prevOutValue, frameSize, value.gateWeight, frameSize * 2, 1, - value.gateValue, frameSize * 3); + if (value.prev_out_value) { + math::gemm( + context, false, false, batch_size, frame_size * 2, frame_size, 1, + value.prev_out_value, frame_size, value.gate_weight, frame_size * 2, + 1, value.gate_value, frame_size * 3); } - if (batchSize == 1) { + if (batch_size == 1) { detail::KeGruForwardResetOutput, - /* isBatch= */ false, + /* is_batch= */ false, T><<>>( - detail::forward::gru_resetOutput(), value.gateValue, - value.resetOutputValue, value.prevOutValue, frameSize, batchSize, - active_gate); + detail::forward::gru_resetOutput(), value.gate_value, + value.reset_output_value, value.prev_out_value, frame_size, + batch_size, active_gate); } else { detail::KeGruForwardResetOutput, - /* isBatch= */ true, + /* is_batch= */ true, T><<>>( - detail::forward::gru_resetOutput(), value.gateValue, - value.resetOutputValue, value.prevOutValue, frameSize, batchSize, - active_gate); + detail::forward::gru_resetOutput(), value.gate_value, + value.reset_output_value, value.prev_out_value, frame_size, + batch_size, active_gate); } - if (value.prevOutValue) { - math::gemm( - context, false, false, batchSize, frameSize, frameSize, 1, - value.resetOutputValue, frameSize, value.stateWeight, frameSize, 1, - value.gateValue + frameSize * 2, frameSize * 3); + if (value.prev_out_value) { + math::gemm( + context, false, false, batch_size, frame_size, frame_size, 1, + value.reset_output_value, frame_size, value.state_weight, frame_size, + 1, value.gate_value + frame_size * 2, frame_size * 3); } - if (batchSize == 1) { + if (batch_size == 1) { detail::KeGruForwardFinalOutput, - /* isBatch= */ false, + /* is_batch= */ false, T><<>>( - detail::forward::gru_finalOutput(), value.gateValue, - value.prevOutValue, value.outputValue, frameSize, batchSize, + detail::forward::gru_finalOutput(), value.gate_value, + value.prev_out_value, value.output_value, frame_size, batch_size, active_node); } else { detail::KeGruForwardFinalOutput, - /* isBatch= */ true, + /* is_batch= */ true, T><<>>( - detail::forward::gru_finalOutput(), value.gateValue, - value.prevOutValue, value.outputValue, frameSize, batchSize, + detail::forward::gru_finalOutput(), value.gate_value, + value.prev_out_value, value.output_value, frame_size, batch_size, active_node); } } }; template -struct GRUUnitGradFunctor { - static void compute(const platform::DeviceContext &context, - hl_gru_value value, hl_gru_grad grad, int frameSize, - int batchSize, activation_mode_t active_node, +struct GRUUnitGradFunctor { + static void compute(const platform::CUDADeviceContext &context, + hl_gru_value value, hl_gru_grad grad, + int frame_size, int batch_size, + activation_mode_t active_node, activation_mode_t active_gate) { - auto stream = - reinterpret_cast(context).stream(); + auto stream = context.stream(); dim3 threads; dim3 grid; - if (batchSize == 1) { - int framePerBlock = frameSize <= 1024 ? frameSize : 1024; - int frameBlocks = (frameSize + 1024 - 1) / 1024; - threads = dim3(framePerBlock, 1); - grid = dim3(frameBlocks, 1); + if (batch_size == 1) { + int frame_per_block = frame_size <= 1024 ? frame_size : 1024; + int frame_blocks = (frame_size + 1024 - 1) / 1024; + threads = dim3(frame_per_block, 1); + grid = dim3(frame_blocks, 1); } else { threads = dim3(32, 32); - grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); + grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32); } - if (batchSize == 1) { + if (batch_size == 1) { detail::KeGruBackwardStateGrad< detail::backward::gru_stateGrad, - /* isBatch= */ false><<>>( - detail::backward::gru_stateGrad(), value.gateValue, grad.gateGrad, - value.prevOutValue, grad.prevOutGrad, grad.outputGrad, frameSize, - batchSize, active_node); + /* is_batch= */ false><<>>( + detail::backward::gru_stateGrad(), value.gate_value, + grad.gate_grad, value.prev_out_value, grad.prev_out_grad, + grad.output_grad, frame_size, batch_size, active_node); } else { detail::KeGruBackwardStateGrad< detail::backward::gru_stateGrad, - /* isBatch= */ true><<>>( - detail::backward::gru_stateGrad(), value.gateValue, grad.gateGrad, - value.prevOutValue, grad.prevOutGrad, grad.outputGrad, frameSize, - batchSize, active_node); + /* is_batch= */ true><<>>( + detail::backward::gru_stateGrad(), value.gate_value, + grad.gate_grad, value.prev_out_value, grad.prev_out_grad, + grad.output_grad, frame_size, batch_size, active_node); } - if (value.prevOutValue && grad.prevOutGrad) { - math::gemm( - context, false, true, batchSize, frameSize, frameSize, 1, - grad.gateGrad + frameSize * 2, frameSize * 3, value.stateWeight, - frameSize, 0, grad.resetOutputGrad, frameSize); - - if (grad.stateWeightGrad) { - math::gemm( - context, true, false, frameSize, frameSize, batchSize, 1, - value.resetOutputValue, frameSize, grad.gateGrad + frameSize * 2, - frameSize * 3, 1, grad.stateWeightGrad, frameSize); + if (value.prev_out_value && grad.prev_out_grad) { + math::gemm( + context, false, true, batch_size, frame_size, frame_size, 1, + grad.gate_grad + frame_size * 2, frame_size * 3, value.state_weight, + frame_size, 0, grad.reset_output_grad, frame_size); + + if (grad.state_weight_grad) { + math::gemm( + context, true, false, frame_size, frame_size, batch_size, 1, + value.reset_output_value, frame_size, + grad.gate_grad + frame_size * 2, frame_size * 3, 1, + grad.state_weight_grad, frame_size); } } - if (batchSize == 1) { + if (batch_size == 1) { detail::KeGruBackwardResetGrad< detail::backward::gru_resetGrad, - /* isBatch= */ false><<>>( - detail::backward::gru_resetGrad(), value.gateValue, grad.gateGrad, - value.prevOutValue, grad.prevOutGrad, grad.resetOutputGrad, frameSize, - batchSize, active_gate); + /* is_batch= */ false><<>>( + detail::backward::gru_resetGrad(), value.gate_value, + grad.gate_grad, value.prev_out_value, grad.prev_out_grad, + grad.reset_output_grad, frame_size, batch_size, active_gate); } else { detail::KeGruBackwardResetGrad< detail::backward::gru_resetGrad, - /* isBatch= */ true><<>>( - detail::backward::gru_resetGrad(), value.gateValue, grad.gateGrad, - value.prevOutValue, grad.prevOutGrad, grad.resetOutputGrad, frameSize, - batchSize, active_gate); + /* is_batch= */ true><<>>( + detail::backward::gru_resetGrad(), value.gate_value, + grad.gate_grad, value.prev_out_value, grad.prev_out_grad, + grad.reset_output_grad, frame_size, batch_size, active_gate); } - if (grad.prevOutGrad && value.prevOutValue) { - math::gemm( - context, false, true, batchSize, frameSize, frameSize * 2, 1, - grad.gateGrad, frameSize * 3, value.gateWeight, frameSize * 2, 1, - grad.prevOutGrad, frameSize); - - if (grad.gateWeightGrad) { - math::gemm( - context, true, false, frameSize, frameSize * 2, batchSize, 1, - value.prevOutValue, frameSize, grad.gateGrad, frameSize * 3, 1, - grad.gateWeightGrad, frameSize * 2); + if (grad.prev_out_grad && value.prev_out_value) { + math::gemm( + context, false, true, batch_size, frame_size, frame_size * 2, 1, + grad.gate_grad, frame_size * 3, value.gate_weight, frame_size * 2, 1, + grad.prev_out_grad, frame_size); + + if (grad.gate_weight_grad) { + math::gemm( + context, true, false, frame_size, frame_size * 2, batch_size, 1, + value.prev_out_value, frame_size, grad.gate_grad, frame_size * 3, 1, + grad.gate_weight_grad, frame_size * 2); } } } }; -template struct GRUUnitFunctor; -template struct GRUUnitFunctor; -template struct GRUUnitGradFunctor; -template struct GRUUnitGradFunctor; +template struct GRUUnitFunctor; +template struct GRUUnitFunctor; +template struct GRUUnitGradFunctor; +template struct GRUUnitGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/gru_compute.h b/paddle/operators/math/gru_compute.h index 1475fb38104f353857dfd968e46af98a6d52c52a..ca1343cb2c5c1eb8da92c2f06b25902c1c2fe8b3 100644 --- a/paddle/operators/math/gru_compute.h +++ b/paddle/operators/math/gru_compute.h @@ -22,37 +22,37 @@ namespace math { // TODO(guosheng): refine code style in gru_compute template struct hl_gru_value { - T *gateWeight; - T *stateWeight; - T *gateValue; - T *resetOutputValue; - T *outputValue; - T *prevOutValue; + T *gate_weight; + T *state_weight; + T *gate_value; + T *reset_output_value; + T *output_value; + T *prev_out_value; }; template struct hl_gru_grad { - T *gateWeightGrad; - T *stateWeightGrad; - T *gateGrad; - T *resetOutputGrad; - T *outputGrad; - T *prevOutGrad; + T *gate_weight_grad; + T *state_weight_grad; + T *gate_grad; + T *reset_output_grad; + T *output_grad; + T *prev_out_grad; }; -template +template struct GRUUnitFunctor { - static void compute(const platform::DeviceContext &context, - hl_gru_value value, int frameSize, int batchSize, + static void compute(const DeviceContext &context, hl_gru_value value, + int frame_size, int batch_size, activation_mode_t active_node, activation_mode_t active_gate); }; -template +template struct GRUUnitGradFunctor { - static void compute(const platform::DeviceContext &context, - hl_gru_value value, hl_gru_grad grad, int frameSize, - int batchSize, activation_mode_t active_node, + static void compute(const DeviceContext &context, hl_gru_value value, + hl_gru_grad grad, int frame_size, int batch_size, + activation_mode_t active_node, activation_mode_t active_gate); }; diff --git a/paddle/operators/math/im2col.cc b/paddle/operators/math/im2col.cc index c10c44c52076c8ee56eee3a0d82c31df70a1c9c7..707ebf05962fb65892c2adbbf41a0a3449763d31 100644 --- a/paddle/operators/math/im2col.cc +++ b/paddle/operators/math/im2col.cc @@ -25,9 +25,9 @@ namespace math { */ template class Im2ColFunctor { + platform::CPUDeviceContext, T> { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& im, const std::vector& dilation, const std::vector& stride, const std::vector& padding, framework::Tensor* col) { @@ -90,9 +90,9 @@ class Im2ColFunctor class Col2ImFunctor { + platform::CPUDeviceContext, T> { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& col, const std::vector& dilation, const std::vector& stride, @@ -149,13 +149,13 @@ class Col2ImFunctor; + platform::CPUDeviceContext, float>; template class Im2ColFunctor; + platform::CPUDeviceContext, double>; template class Col2ImFunctor; + platform::CPUDeviceContext, float>; template class Col2ImFunctor; + platform::CPUDeviceContext, double>; /* * im = [input_channels, input_height, input_width] @@ -164,9 +164,9 @@ template class Col2ImFunctor class Im2ColFunctor { + platform::CPUDeviceContext, T> { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& im, const std::vector& dilation, const std::vector& stride, const std::vector& padding, framework::Tensor* col) { @@ -235,9 +235,9 @@ class Im2ColFunctor class Col2ImFunctor { + platform::CPUDeviceContext, T> { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& col, const std::vector& dilation, const std::vector& stride, @@ -300,13 +300,13 @@ class Col2ImFunctor; + platform::CPUDeviceContext, float>; template class Im2ColFunctor; + platform::CPUDeviceContext, double>; template class Col2ImFunctor; + platform::CPUDeviceContext, float>; template class Col2ImFunctor; + platform::CPUDeviceContext, double>; } // namespace math } // namespace operators diff --git a/paddle/operators/math/im2col.cu b/paddle/operators/math/im2col.cu index bf7894243919571c2ab15d53690b1ef05bfcc6ee..a88e837b030f286cce272f99ad7991c70336e4a9 100644 --- a/paddle/operators/math/im2col.cu +++ b/paddle/operators/math/im2col.cu @@ -58,9 +58,9 @@ __global__ void im2col(const T* data_im, int num_outs, int im_height, */ template class Im2ColFunctor { + platform::CUDADeviceContext, T> { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& im, const std::vector& dilation, const std::vector& stride, const std::vector& padding, framework::Tensor* col) { @@ -96,9 +96,7 @@ class Im2ColFunctor<<(context) - .stream()>>>( + im2col<<>>( im.data(), num_outputs, im_height, im_width, dilation[0], dilation[1], filter_height, filter_width, stride[0], stride[1], padding[0], padding[1], col_height, col_width, col->data()); @@ -160,9 +158,9 @@ __global__ void col2im(int n, const T* data_col, int im_height, int im_width, */ template class Col2ImFunctor { + platform::CUDADeviceContext, T> { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& col, const std::vector& dilation, const std::vector& stride, @@ -203,9 +201,7 @@ class Col2ImFunctor<<(context) - .stream()>>>( + col2im<<>>( num_kernels, col.data(), im_height, im_width, dilation[0], dilation[1], filter_height, filter_width, stride[0], stride[1], padding[0], padding[2], col_height, col_width, im->data()); @@ -213,13 +209,13 @@ class Col2ImFunctor; + platform::CUDADeviceContext, float>; template class Im2ColFunctor; + platform::CUDADeviceContext, double>; template class Col2ImFunctor; + platform::CUDADeviceContext, float>; template class Col2ImFunctor; + platform::CUDADeviceContext, double>; template __global__ void im2colOCF(const T* im_data, int im_channels, int im_height, @@ -260,9 +256,9 @@ __global__ void im2colOCF(const T* im_data, int im_channels, int im_height, */ template class Im2ColFunctor { + platform::CUDADeviceContext, T> { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& im, const std::vector& dilation, const std::vector& stride, const std::vector& padding, framework::Tensor* col) { @@ -310,9 +306,7 @@ class Im2ColFunctor<<(context) - .stream()>>>( + im2colOCF<<>>( im.data(), im_channels, im_height, im_width, filter_height, filter_width, stride[0], stride[1], padding[0], padding[1], col_height, col_width, col->data()); @@ -358,9 +352,9 @@ __global__ void col2imOCF(const T* col_data, int im_channels, int im_height, */ template class Col2ImFunctor { + platform::CUDADeviceContext, T> { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& col, const std::vector& dilation, const std::vector& stride, @@ -409,9 +403,7 @@ class Col2ImFunctor<<(context) - .stream()>>>( + col2imOCF<<>>( col.data(), im_channels, im_height, im_width, filter_height, filter_width, stride[0], stride[1], padding[0], padding[1], col_height, col_width, im->data()); @@ -419,13 +411,13 @@ class Col2ImFunctor; + platform::CUDADeviceContext, float>; template class Im2ColFunctor; + platform::CUDADeviceContext, double>; template class Col2ImFunctor; + platform::CUDADeviceContext, float>; template class Col2ImFunctor; + platform::CUDADeviceContext, double>; } // namespace math } // namespace operators diff --git a/paddle/operators/math/im2col.h b/paddle/operators/math/im2col.h index 24fd9a06e9f5fbd50483429379cf3f46ff88bcaa..38f2c9fe0adf80a2a4355a45bebb9ba0f341d1ab 100644 --- a/paddle/operators/math/im2col.h +++ b/paddle/operators/math/im2col.h @@ -79,20 +79,19 @@ enum class ColFormat { kCFO = 0, kOCF = 1 }; * \note The caller needs to ensure that imShape.inputChannels is equal to * colShape.inputChannels. */ -template +template class Im2ColFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& im, const std::vector& dilation, + void operator()(const DeviceContext& context, const framework::Tensor& im, + const std::vector& dilation, const std::vector& stride, const std::vector& padding, framework::Tensor* col); }; -template +template class Col2ImFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& col, + void operator()(const DeviceContext& context, const framework::Tensor& col, const std::vector& dilation, const std::vector& stride, const std::vector& padding, framework::Tensor* im); diff --git a/paddle/operators/math/im2col_test.cc b/paddle/operators/math/im2col_test.cc index ae197a97ed8aa089b51be77a59a8ba6a98ac70ec..256f3bc9bd487d11b0f139ef057f5a98556b4db1 100644 --- a/paddle/operators/math/im2col_test.cc +++ b/paddle/operators/math/im2col_test.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -template +template void testIm2col() { paddle::framework::Tensor input_tmp; paddle::framework::Tensor input; @@ -59,18 +59,7 @@ void testIm2col() { memcpy(input_ptr, arr, 6 * sizeof(float)); auto* place = new Place(); - paddle::platform::DeviceContext* context; - if (paddle::platform::is_cpu_place(*place)) { - context = - new paddle::platform::CPUDeviceContext(paddle::platform::CPUPlace()); - } else { -#ifdef PADDLE_WITH_CUDA - context = - new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); -#else - PADDLE_THROW("no GPU support"); -#endif // PADDLE_WITH_CUDA - } + DeviceContext* context = new DeviceContext(*place); if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { @@ -83,10 +72,10 @@ void testIm2col() { // Im2Col paddle::operators::math::Im2ColFunctor< - paddle::operators::math::ColFormat::kCFO, Place, float> + paddle::operators::math::ColFormat::kCFO, DeviceContext, float> im2col; paddle::operators::math::Im2ColFunctor< - paddle::operators::math::ColFormat::kOCF, Place, float> + paddle::operators::math::ColFormat::kOCF, DeviceContext, float> im2col_ocf; im2col(*context, input, dilation, stride, padding, &output_cfo); @@ -119,10 +108,10 @@ void testIm2col() { // Col2Im: kCFO paddle::operators::math::Col2ImFunctor< - paddle::operators::math::ColFormat::kCFO, Place, float> + paddle::operators::math::ColFormat::kCFO, DeviceContext, float> col2im; paddle::operators::math::Col2ImFunctor< - paddle::operators::math::ColFormat::kOCF, Place, float> + paddle::operators::math::ColFormat::kOCF, DeviceContext, float> col2im_ocf; float col2im_data[] = {0, 2, 2, 3, 8, 5}; @@ -168,8 +157,8 @@ void testIm2col() { } TEST(math, im2col) { - testIm2col(); + testIm2col(); #ifdef PADDLE_WITH_CUDA - testIm2col(); + testIm2col(); #endif } diff --git a/paddle/operators/math/lstm_compute.cc b/paddle/operators/math/lstm_compute.cc index 0febf8e3b70111d12f858cf6259a2801a42d9a90..2c2e8bb82e6f51e21a00de53bbfce5f0b4868e27 100644 --- a/paddle/operators/math/lstm_compute.cc +++ b/paddle/operators/math/lstm_compute.cc @@ -21,8 +21,8 @@ namespace operators { namespace math { template -struct LstmUnitFunctor { - static void compute(const platform::DeviceContext& context, +struct LstmUnitFunctor { + static void compute(const platform::CPUDeviceContext& context, LstmMetaValue value, int frame_size, int batch_size, const std::string& gate_act, const std::string& cell_act, const std::string& cand_act) { @@ -30,20 +30,20 @@ struct LstmUnitFunctor { detail::cpu_lstm_forward(detail::forward::lstm(), value, frame_size, ActiveType(cand_act), ActiveType(gate_act), ActiveType(cell_act)); - value.gateValue += frame_size * 4; - value.stateValue += frame_size; - value.stateActiveValue += frame_size; - value.outputValue += frame_size; - if (value.prevStateValue) { - value.prevStateValue += frame_size; + value.gate_value += frame_size * 4; + value.state_value += frame_size; + value.state_active_value += frame_size; + value.output_value += frame_size; + if (value.prev_state_value) { + value.prev_state_value += frame_size; } } } }; template -struct LstmUnitGradFunctor { - static void compute(const platform::DeviceContext& context, +struct LstmUnitGradFunctor { + static void compute(const platform::CPUDeviceContext& context, LstmMetaValue value, LstmMetaGrad grad, int frame_size, int batch_size, const std::string& gate_act, const std::string& cell_act, @@ -53,29 +53,29 @@ struct LstmUnitGradFunctor { frame_size, ActiveType(cand_act), ActiveType(gate_act), ActiveType(cell_act)); - value.gateValue += frame_size * 4; - value.stateValue += frame_size; - value.stateActiveValue += frame_size; - value.outputValue += frame_size; - if (value.prevStateValue) { - value.prevStateValue += frame_size; + value.gate_value += frame_size * 4; + value.state_value += frame_size; + value.state_active_value += frame_size; + value.output_value += frame_size; + if (value.prev_state_value) { + value.prev_state_value += frame_size; } - grad.gateGrad += frame_size * 4; - grad.stateGrad += frame_size; - grad.stateActiveGrad += frame_size; - grad.outputGrad += frame_size; - if (grad.prevStateGrad) { - grad.prevStateGrad += frame_size; + grad.gate_grad += frame_size * 4; + grad.state_grad += frame_size; + grad.state_active_grad += frame_size; + grad.output_grad += frame_size; + if (grad.prev_state_grad) { + grad.prev_state_grad += frame_size; } } } }; -template class LstmUnitFunctor; -template class LstmUnitFunctor; -template class LstmUnitGradFunctor; -template class LstmUnitGradFunctor; +template class LstmUnitFunctor; +template class LstmUnitFunctor; +template class LstmUnitGradFunctor; +template class LstmUnitGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/lstm_compute.cu b/paddle/operators/math/lstm_compute.cu index b2122f2a5c08a6d9d53293833177f0ba2c3ab860..92b1f4228b49709d2903fab518e7649133932fad 100644 --- a/paddle/operators/math/lstm_compute.cu +++ b/paddle/operators/math/lstm_compute.cu @@ -21,8 +21,8 @@ namespace operators { namespace math { template -struct LstmUnitFunctor { - static void compute(const platform::DeviceContext& context, +struct LstmUnitFunctor { + static void compute(const platform::CUDADeviceContext& context, LstmMetaValue value, int frame_size, int batch_size, const std::string& gate_act, const std::string& cell_act, const std::string& cand_act) { @@ -33,8 +33,8 @@ struct LstmUnitFunctor { }; template -struct LstmUnitGradFunctor { - static void compute(const platform::DeviceContext& context, +struct LstmUnitGradFunctor { + static void compute(const platform::CUDADeviceContext& context, LstmMetaValue value, LstmMetaGrad grad, int frame_size, int batch_size, const std::string& gate_act, const std::string& cell_act, @@ -45,10 +45,10 @@ struct LstmUnitGradFunctor { } }; -template class LstmUnitFunctor; -template class LstmUnitFunctor; -template class LstmUnitGradFunctor; -template class LstmUnitGradFunctor; +template class LstmUnitFunctor; +template class LstmUnitFunctor; +template class LstmUnitGradFunctor; +template class LstmUnitGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/lstm_compute.h b/paddle/operators/math/lstm_compute.h index 28d2c6fd3b0d8143da90c37f241072e37397f98b..5f74e273585aea5184281bf294df694235150e30 100644 --- a/paddle/operators/math/lstm_compute.h +++ b/paddle/operators/math/lstm_compute.h @@ -31,26 +31,26 @@ typedef enum { template struct LstmMetaValue { - T *gateValue; - T *prevStateValue; - T *stateValue; - T *stateActiveValue; - T *outputValue; - T *checkIg; - T *checkFg; - T *checkOg; + T *gate_value; + T *prev_state_value; + T *state_value; + T *state_active_value; + T *output_value; + T *check_ig; + T *check_fg; + T *check_og; }; template struct LstmMetaGrad { - T *gateGrad; - T *prevStateGrad; - T *stateGrad; - T *stateActiveGrad; - T *outputGrad; - T *checkIgGrad; - T *checkFgGrad; - T *checkOgGrad; + T *gate_grad; + T *prev_state_grad; + T *state_grad; + T *state_active_grad; + T *output_grad; + T *check_ig_grad; + T *check_fg_grad; + T *check_og_grad; }; inline activation_mode_t ActiveType(const std::string &type) { @@ -67,21 +67,20 @@ inline activation_mode_t ActiveType(const std::string &type) { } } -template +template class LstmUnitFunctor { public: - static void compute(const platform::DeviceContext &context, - LstmMetaValue value, int frame_size, int batch_size, + static void compute(const DeviceContext &context, LstmMetaValue value, + int frame_size, int batch_size, const std::string &gate_act, const std::string &cell_act, const std::string &cand_act); }; -template +template class LstmUnitGradFunctor { public: - static void compute(const platform::DeviceContext &context, - LstmMetaValue value, LstmMetaGrad grad, - int frame_size, int batch_size, + static void compute(const DeviceContext &context, LstmMetaValue value, + LstmMetaGrad grad, int frame_size, int batch_size, const std::string &gate_act, const std::string &cell_act, const std::string &cand_act); }; diff --git a/paddle/operators/math/math_function.cc b/paddle/operators/math/math_function.cc index 3bc0945fe3a7aee50f0d4501926af11fa469a55e..ead0fe1971387c43875327b4194a3fadfa6a2736 100644 --- a/paddle/operators/math/math_function.cc +++ b/paddle/operators/math/math_function.cc @@ -21,13 +21,11 @@ namespace operators { namespace math { template <> -void gemm(const platform::DeviceContext& context, - const CBLAS_TRANSPOSE transA, - const CBLAS_TRANSPOSE transB, const int M, - const int N, const int K, - const float alpha, const float* A, - const float* B, const float beta, - float* C) { +void gemm( + const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, + const float alpha, const float* A, const float* B, const float beta, + float* C) { int lda = (transA == CblasNoTrans) ? K : M; int ldb = (transB == CblasNoTrans) ? N : K; int ldc = N; @@ -36,13 +34,11 @@ void gemm(const platform::DeviceContext& context, } template <> -void gemm(const platform::DeviceContext& context, - const CBLAS_TRANSPOSE transA, - const CBLAS_TRANSPOSE transB, const int M, - const int N, const int K, - const double alpha, const double* A, - const double* B, const double beta, - double* C) { +void gemm( + const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, + const double alpha, const double* A, const double* B, const double beta, + double* C) { int lda = (transA == CblasNoTrans) ? K : M; int ldb = (transB == CblasNoTrans) ? N : K; int ldc = N; @@ -51,35 +47,32 @@ void gemm(const platform::DeviceContext& context, } template <> -void gemm(const platform::DeviceContext& context, - const bool transA, const bool transB, - const int M, const int N, const int K, - const float alpha, const float* A, - const int lda, const float* B, - const int ldb, const float beta, float* C, - const int ldc) { +void gemm( + const platform::CPUDeviceContext& context, const bool transA, + const bool transB, const int M, const int N, const int K, const float alpha, + const float* A, const int lda, const float* B, const int ldb, + const float beta, float* C, const int ldc) { cblas_sgemm(CblasRowMajor, transA == false ? CblasNoTrans : CblasTrans, transB == false ? CblasNoTrans : CblasTrans, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc); } template <> -void gemm(const platform::DeviceContext& context, - const bool transA, const bool transB, - const int M, const int N, const int K, - const double alpha, const double* A, - const int lda, const double* B, - const int ldb, const double beta, - double* C, const int ldc) { +void gemm( + const platform::CPUDeviceContext& context, const bool transA, + const bool transB, const int M, const int N, const int K, + const double alpha, const double* A, const int lda, const double* B, + const int ldb, const double beta, double* C, const int ldc) { cblas_dgemm(CblasRowMajor, transA == false ? CblasNoTrans : CblasTrans, transB == false ? CblasNoTrans : CblasTrans, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc); } template <> -void matmul( - const platform::DeviceContext& context, const framework::Tensor& matrix_a, - bool trans_a, const framework::Tensor& matrix_b, bool trans_b, float alpha, +void matmul( + const platform::CPUDeviceContext& context, + const framework::Tensor& matrix_a, bool trans_a, + const framework::Tensor& matrix_b, bool trans_b, float alpha, framework::Tensor* matrix_out, float beta) { auto dim_a = matrix_a.dims(); auto dim_b = matrix_b.dims(); @@ -99,15 +92,16 @@ void matmul( CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans; - gemm( + gemm( context, transA, transB, M, N, K, alpha, matrix_a.data(), matrix_b.data(), beta, matrix_out->data()); } template <> -void matmul( - const platform::DeviceContext& context, const framework::Tensor& matrix_a, - bool trans_a, const framework::Tensor& matrix_b, bool trans_b, double alpha, +void matmul( + const platform::CPUDeviceContext& context, + const framework::Tensor& matrix_a, bool trans_a, + const framework::Tensor& matrix_b, bool trans_b, double alpha, framework::Tensor* matrix_out, double beta) { auto dim_a = matrix_a.dims(); auto dim_b = matrix_b.dims(); @@ -127,16 +121,16 @@ void matmul( CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans; - gemm( + gemm( context, transA, transB, M, N, K, alpha, matrix_a.data(), matrix_b.data(), beta, matrix_out->data()); } -#ifdef PADDLE_USE_MKLML +#ifdef PADDLE_WITH_MKLML // Use cblas_{s,d}gemm_batched if available: Run with 1 group of size batchSize. template <> -void batched_gemm( - const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, +void batched_gemm( + const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, const int batchCount, const int strideA, const int strideB) { @@ -157,8 +151,8 @@ void batched_gemm( } template <> -void batched_gemm( - const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, +void batched_gemm( + const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, const int batchCount, const int strideA, const int strideB) { @@ -183,8 +177,8 @@ void batched_gemm( // functions of Intel MKL are not available. In the future, this computation // should be parallelized. template <> -void batched_gemm( - const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, +void batched_gemm( + const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, const int batchCount, const int strideA, const int strideB) { @@ -192,14 +186,14 @@ void batched_gemm( const float* Ak = &A[k * strideA]; const float* Bk = &B[k * strideB]; float* Ck = &C[k * M * N]; - gemm(context, transA, transB, M, N, K, alpha, Ak, - Bk, beta, Ck); + gemm(context, transA, transB, M, N, K, + alpha, Ak, Bk, beta, Ck); } } template <> -void batched_gemm( - const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, +void batched_gemm( + const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, const int batchCount, const int strideA, const int strideB) { @@ -207,55 +201,53 @@ void batched_gemm( const double* Ak = &A[k * strideA]; const double* Bk = &B[k * strideB]; double* Ck = &C[k * M * N]; - gemm(context, transA, transB, M, N, K, alpha, - Ak, Bk, beta, Ck); + gemm(context, transA, transB, M, N, K, + alpha, Ak, Bk, beta, Ck); } } #endif template <> -void gemv(const platform::DeviceContext& context, - const bool trans_a, const int M, - const int N, const float alpha, - const float* A, const float* B, - const float beta, float* C) { +void gemv( + const platform::CPUDeviceContext& context, const bool trans_a, const int M, + const int N, const float alpha, const float* A, const float* B, + const float beta, float* C) { CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; cblas_sgemv(CblasRowMajor, transA, M, N, alpha, A, N, B, 1, beta, C, 1); } template <> -void gemv(const platform::DeviceContext& context, - const bool trans_a, const int M, - const int N, const double alpha, - const double* A, const double* B, - const double beta, double* C) { +void gemv( + const platform::CPUDeviceContext& context, const bool trans_a, const int M, + const int N, const double alpha, const double* A, const double* B, + const double beta, double* C) { CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; cblas_dgemv(CblasRowMajor, transA, M, N, alpha, A, N, B, 1, beta, C, 1); } template <> -void axpy(const platform::DeviceContext& context, - const int n, const float alpha, - const float* x, float* y) { +void axpy( + const platform::CPUDeviceContext& context, const int n, const float alpha, + const float* x, float* y) { cblas_saxpy(n, alpha, x, 1, y, 1); } template <> -void axpy(const platform::DeviceContext& context, - const int n, const double alpha, - const double* x, double* y) { +void axpy( + const platform::CPUDeviceContext& context, const int n, const double alpha, + const double* x, double* y) { cblas_daxpy(n, alpha, x, 1, y, 1); } -template struct SetConstant; -template struct SetConstant; -template struct SetConstant; -template struct SetConstant; -template struct SetConstant; +template struct SetConstant; +template struct SetConstant; +template struct SetConstant; +template struct SetConstant; +template struct SetConstant; -#define DEFINE_CPU_TRANS(RANK) \ - template struct Transpose; \ - template struct Transpose; +#define DEFINE_CPU_TRANS(RANK) \ + template struct Transpose; \ + template struct Transpose; DEFINE_CPU_TRANS(1); DEFINE_CPU_TRANS(2); diff --git a/paddle/operators/math/math_function.cu b/paddle/operators/math/math_function.cu index 6b5f1ed9aed204a26ecb8a2b7868edd5a08b7d7e..4ed82b458857c51611e552509a0cf5f5f6617b49 100644 --- a/paddle/operators/math/math_function.cu +++ b/paddle/operators/math/math_function.cu @@ -22,13 +22,11 @@ namespace operators { namespace math { template <> -void gemm(const platform::DeviceContext& context, - const CBLAS_TRANSPOSE transA, - const CBLAS_TRANSPOSE transB, const int M, - const int N, const int K, - const float alpha, const float* A, - const float* B, const float beta, - float* C) { +void gemm( + const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, + const float alpha, const float* A, const float* B, const float beta, + float* C) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (transA == CblasNoTrans) ? K : M; @@ -39,19 +37,16 @@ void gemm(const platform::DeviceContext& context, (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; PADDLE_ENFORCE(platform::dynload::cublasSgemm( - reinterpret_cast(context) - .cublas_handle(), - cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); + context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, + lda, &beta, C, N)); } template <> -void gemm(const platform::DeviceContext& context, - const CBLAS_TRANSPOSE transA, - const CBLAS_TRANSPOSE transB, const int M, - const int N, const int K, - const double alpha, const double* A, - const double* B, const double beta, - double* C) { +void gemm( + const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, + const double alpha, const double* A, const double* B, const double beta, + double* C) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (transA == CblasNoTrans) ? K : M; @@ -61,51 +56,45 @@ void gemm(const platform::DeviceContext& context, cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; PADDLE_ENFORCE(platform::dynload::cublasDgemm( - reinterpret_cast(context) - .cublas_handle(), - cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); + context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, + lda, &beta, C, N)); } template <> -void gemm(const platform::DeviceContext& context, - const bool transA, const bool transB, - const int M, const int N, const int K, - const float alpha, const float* A, - const int lda, const float* B, - const int ldb, const float beta, float* C, - const int ldc) { +void gemm( + const platform::CUDADeviceContext& context, const bool transA, + const bool transB, const int M, const int N, const int K, const float alpha, + const float* A, const int lda, const float* B, const int ldb, + const float beta, float* C, const int ldc) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. cublasOperation_t cuTransA = transA == false ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = transB == false ? CUBLAS_OP_N : CUBLAS_OP_T; PADDLE_ENFORCE(platform::dynload::cublasSgemm( - reinterpret_cast(context) - .cublas_handle(), - cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc)); + context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, + lda, &beta, C, ldc)); } template <> -void gemm(const platform::DeviceContext& context, - const bool transA, const bool transB, - const int M, const int N, const int K, - const double alpha, const double* A, - const int lda, const double* B, - const int ldb, const double beta, - double* C, const int ldc) { +void gemm( + const platform::CUDADeviceContext& context, const bool transA, + const bool transB, const int M, const int N, const int K, + const double alpha, const double* A, const int lda, const double* B, + const int ldb, const double beta, double* C, const int ldc) { // Note that cublas follows fortran order, so the order is different from // the cblas convention. cublasOperation_t cuTransA = transA == false ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = transB == false ? CUBLAS_OP_N : CUBLAS_OP_T; PADDLE_ENFORCE(platform::dynload::cublasDgemm( - reinterpret_cast(context) - .cublas_handle(), - cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc)); + context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, + lda, &beta, C, ldc)); } template <> -void matmul( - const platform::DeviceContext& context, const framework::Tensor& matrix_a, - bool trans_a, const framework::Tensor& matrix_b, bool trans_b, float alpha, +void matmul( + const platform::CUDADeviceContext& context, + const framework::Tensor& matrix_a, bool trans_a, + const framework::Tensor& matrix_b, bool trans_b, float alpha, framework::Tensor* matrix_out, float beta) { auto dim_a = matrix_a.dims(); auto dim_b = matrix_b.dims(); @@ -125,15 +114,16 @@ void matmul( CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans; - gemm( + gemm( context, transA, transB, M, N, K, alpha, matrix_a.data(), matrix_b.data(), beta, matrix_out->data()); } template <> -void matmul( - const platform::DeviceContext& context, const framework::Tensor& matrix_a, - bool trans_a, const framework::Tensor& matrix_b, bool trans_b, double alpha, +void matmul( + const platform::CUDADeviceContext& context, + const framework::Tensor& matrix_a, bool trans_a, + const framework::Tensor& matrix_b, bool trans_b, double alpha, framework::Tensor* matrix_out, double beta) { auto dim_a = matrix_a.dims(); auto dim_b = matrix_b.dims(); @@ -153,14 +143,14 @@ void matmul( CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans; - gemm( + gemm( context, transA, transB, M, N, K, alpha, matrix_a.data(), matrix_b.data(), beta, matrix_out->data()); } template <> -void batched_gemm( - const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, +void batched_gemm( + const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, const int batchCount, const int strideA, const int strideB) { @@ -176,15 +166,13 @@ void batched_gemm( const int strideC = M * N; PADDLE_ENFORCE(platform::dynload::cublasSgemmStridedBatched( - reinterpret_cast(context) - .cublas_handle(), - cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, - &beta, C, ldc, strideC, batchCount)); + context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, + strideB, A, lda, strideA, &beta, C, ldc, strideC, batchCount)); } template <> -void batched_gemm( - const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, +void batched_gemm( + const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, const int batchCount, const int strideA, const int strideB) { @@ -200,68 +188,58 @@ void batched_gemm( const int strideC = M * N; PADDLE_ENFORCE(platform::dynload::cublasDgemmStridedBatched( - reinterpret_cast(context) - .cublas_handle(), - cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, - &beta, C, ldc, strideC, batchCount)); + context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, + strideB, A, lda, strideA, &beta, C, ldc, strideC, batchCount)); } template <> -void gemv(const platform::DeviceContext& context, - const bool trans_a, const int M, - const int N, const float alpha, - const float* A, const float* B, - const float beta, float* C) { +void gemv( + const platform::CUDADeviceContext& context, const bool trans_a, const int M, + const int N, const float alpha, const float* A, const float* B, + const float beta, float* C) { cublasOperation_t cuTransA = (trans_a == false) ? CUBLAS_OP_T : CUBLAS_OP_N; - PADDLE_ENFORCE(platform::dynload::cublasSgemv( - reinterpret_cast(context) - .cublas_handle(), - cuTransA, N, M, &alpha, A, N, B, 1, &beta, C, 1)); + PADDLE_ENFORCE(platform::dynload::cublasSgemv(context.cublas_handle(), + cuTransA, N, M, &alpha, A, N, B, + 1, &beta, C, 1)); } template <> -void gemv(const platform::DeviceContext& context, - const bool trans_a, const int M, - const int N, const double alpha, - const double* A, const double* B, - const double beta, double* C) { +void gemv( + const platform::CUDADeviceContext& context, const bool trans_a, const int M, + const int N, const double alpha, const double* A, const double* B, + const double beta, double* C) { cublasOperation_t cuTransA = (trans_a == false) ? CUBLAS_OP_T : CUBLAS_OP_N; - PADDLE_ENFORCE(platform::dynload::cublasDgemv( - reinterpret_cast(context) - .cublas_handle(), - cuTransA, N, M, &alpha, A, N, B, 1, &beta, C, 1)); + PADDLE_ENFORCE(platform::dynload::cublasDgemv(context.cublas_handle(), + cuTransA, N, M, &alpha, A, N, B, + 1, &beta, C, 1)); } template <> -void axpy(const platform::DeviceContext& context, - const int n, const float alpha, - const float* x, float* y) { - PADDLE_ENFORCE(platform::dynload::cublasSaxpy( - reinterpret_cast(context) - .cublas_handle(), - n, &alpha, x, 1, y, 1)); +void axpy( + const platform::CUDADeviceContext& context, const int n, const float alpha, + const float* x, float* y) { + PADDLE_ENFORCE(platform::dynload::cublasSaxpy(context.cublas_handle(), n, + &alpha, x, 1, y, 1)); } template <> -void axpy(const platform::DeviceContext& context, - const int n, const double alpha, - const double* x, double* y) { - PADDLE_ENFORCE(platform::dynload::cublasDaxpy( - reinterpret_cast(context) - .cublas_handle(), - n, &alpha, x, 1, y, 1)); +void axpy( + const platform::CUDADeviceContext& context, const int n, const double alpha, + const double* x, double* y) { + PADDLE_ENFORCE(platform::dynload::cublasDaxpy(context.cublas_handle(), n, + &alpha, x, 1, y, 1)); } -template struct SetConstant; -template struct SetConstant; -template struct SetConstant; -template struct SetConstant; -template struct SetConstant; +template struct SetConstant; +template struct SetConstant; +template struct SetConstant; +template struct SetConstant; +template struct SetConstant; -#define DEFINE_GPU_TRANS(RANK) \ - template struct Transpose; \ - template struct Transpose; +#define DEFINE_GPU_TRANS(RANK) \ + template struct Transpose; \ + template struct Transpose; DEFINE_GPU_TRANS(1); DEFINE_GPU_TRANS(2); @@ -277,8 +255,9 @@ struct TensorSetConstantGPU { template void operator()() const { - SetConstant functor; - functor(context_, tensor_, static_cast(value_)); + SetConstant functor; + functor(reinterpret_cast(context_), + tensor_, static_cast(value_)); } const platform::DeviceContext& context_; @@ -303,20 +282,20 @@ template struct RowwiseSum; // The ColwiseSum failed in debug mode, // and only failed for this case. So reimplemented it. template <> -void ColwiseSum::operator()( - const platform::DeviceContext& context, const framework::Tensor& input, +void ColwiseSum::operator()( + const platform::CUDADeviceContext& context, const framework::Tensor& input, framework::Tensor* vector) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; PADDLE_ENFORCE_EQ(vector->numel(), size); framework::Tensor one; one.mutable_data({in_dims[0]}, context.GetPlace()); - SetConstant set; + SetConstant set; set(context, &one, static_cast(1.0)); - gemv(context, true, static_cast(in_dims[0]), - static_cast(in_dims[1]), 1.0, - input.data(), one.data(), - 0.0, vector->data()); + gemv( + context, true, static_cast(in_dims[0]), static_cast(in_dims[1]), + 1.0, input.data(), one.data(), 0.0, + vector->data()); } } // namespace math diff --git a/paddle/operators/math/math_function.h b/paddle/operators/math/math_function.h index b4040b41e093be2841e395edacd1e02c2173a776..51e0fd9ad7777ab8f7d9cff789fdb4eacc44f3c7 100644 --- a/paddle/operators/math/math_function.h +++ b/paddle/operators/math/math_function.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#ifdef PADDLE_USE_MKLML +#ifdef PADDLE_WITH_MKLML #include #include #include @@ -62,53 +62,51 @@ namespace math { // Then matrixA: M * K, matrixB: K * N, matrixC : M * N // For more detailed info, please refer to // http://www.netlib.org/lapack/explore-html/d4/de2/sgemm_8f.html -template -void gemm(const platform::DeviceContext& context, const CBLAS_TRANSPOSE transA, +template +void gemm(const DeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const T alpha, const T* A, const T* B, const T beta, T* C); // gemm wrapper with stride args for matrix uncontinuous in memory -template -void gemm(const platform::DeviceContext& context, const bool transA, - const bool transB, const int M, const int N, const int K, - const T alpha, const T* A, const int lda, const T* B, const int ldb, - const T beta, T* C, const int ldc); +template +void gemm(const DeviceContext& context, const bool transA, const bool transB, + const int M, const int N, const int K, const T alpha, const T* A, + const int lda, const T* B, const int ldb, const T beta, T* C, + const int ldc); // matrix multiply with continuous memory -template -void matmul(const platform::DeviceContext& context, - const framework::Tensor& matrix_a, bool trans_a, - const framework::Tensor& matrix_b, bool trans_b, T alpha, - framework::Tensor* matrix_out, T beta); +template +void matmul(const DeviceContext& context, const framework::Tensor& matrix_a, + bool trans_a, const framework::Tensor& matrix_b, bool trans_b, + T alpha, framework::Tensor* matrix_out, T beta); // Batched gemm -template -void batched_gemm(const platform::DeviceContext& context, - const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, - const int M, const int N, const int K, const T alpha, - const T* A, const T* B, const T beta, T* C, - const int batchCount, const int strideA, const int strideB); - -template -void gemv(const platform::DeviceContext& context, const bool trans_a, - const int M, const int N, const T alpha, const T* A, const T* B, - const T beta, T* C); - -template -void axpy(const platform::DeviceContext& context, const int n, const T alpha, - const T* x, T* y); - -template +template +void batched_gemm(const DeviceContext& context, const CBLAS_TRANSPOSE transA, + const CBLAS_TRANSPOSE transB, const int M, const int N, + const int K, const T alpha, const T* A, const T* B, + const T beta, T* C, const int batchCount, const int strideA, + const int strideB); + +template +void gemv(const DeviceContext& context, const bool trans_a, const int M, + const int N, const T alpha, const T* A, const T* B, const T beta, + T* C); + +template +void axpy(const DeviceContext& context, const int n, const T alpha, const T* x, + T* y); + +template struct Transpose { - void operator()(const platform::DeviceContext& context, - const framework::Tensor& in, framework::Tensor* out, - const std::vector& axis); + void operator()(const DeviceContext& context, const framework::Tensor& in, + framework::Tensor* out, const std::vector& axis); }; -template +template struct SetConstant { - void operator()(const platform::DeviceContext& context, - framework::Tensor* tensor, T num); + void operator()(const DeviceContext& context, framework::Tensor* tensor, + T num); }; template @@ -118,17 +116,16 @@ void set_constant_with_place(const platform::DeviceContext& context, void set_constant(const platform::DeviceContext& context, framework::Tensor* tensor, float value); -template +template struct RowwiseAdd { - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, const framework::Tensor& vec, - framework::Tensor* output); + void operator()(const DeviceContext& context, const framework::Tensor& input, + const framework::Tensor& vec, framework::Tensor* output); }; -template +template struct ColwiseSum { - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, framework::Tensor* vec); + void operator()(const DeviceContext& context, const framework::Tensor& input, + framework::Tensor* vec); }; template diff --git a/paddle/operators/math/math_function_impl.h b/paddle/operators/math/math_function_impl.h index 8c1971fc611cf0678d1185d14fbebbeed7d8a594..4d5e8481018916317aa5c50d8130aed04e2ed245 100644 --- a/paddle/operators/math/math_function_impl.h +++ b/paddle/operators/math/math_function_impl.h @@ -20,16 +20,17 @@ namespace paddle { namespace operators { namespace math { -template -void SetConstant::operator()(const platform::DeviceContext& context, - framework::Tensor* tensor, T num) { +template +void SetConstant::operator()(const DeviceContext& context, + framework::Tensor* tensor, + T num) { auto t = framework::EigenVector::Flatten(*tensor); - t.device(*context.GetEigenDevice()) = t.constant(static_cast(num)); + t.device(*context.eigen_device()) = t.constant(static_cast(num)); } -template -void Transpose::operator()( - const platform::DeviceContext& context, const framework::Tensor& in, +template +void Transpose::operator()( + const DeviceContext& context, const framework::Tensor& in, framework::Tensor* out, const std::vector& axis) { Eigen::array permute; for (int i = 0; i < Rank; i++) { @@ -40,15 +41,15 @@ void Transpose::operator()( auto eigen_in = framework::EigenTensor::From(in); auto eigen_out = framework::EigenTensor::From(*out); - auto* dev = context.GetEigenDevice(); + auto* dev = context.eigen_device(); eigen_out.device(*dev) = eigen_in.shuffle(permute); } -template -void RowwiseAdd::operator()(const platform::DeviceContext& context, - const framework::Tensor& input, - const framework::Tensor& vector, - framework::Tensor* output) { +template +void RowwiseAdd::operator()(const DeviceContext& context, + const framework::Tensor& input, + const framework::Tensor& vector, + framework::Tensor* output) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; PADDLE_ENFORCE_EQ(vector.numel(), size); @@ -59,14 +60,14 @@ void RowwiseAdd::operator()(const platform::DeviceContext& context, auto out = framework::EigenMatrix::From(*output); Eigen::array shape({{1, static_cast(size)}}); Eigen::array bcast({{static_cast(in_dims[0]), 1}}); - out.device(*context.GetEigenDevice()) = + out.device(*context.eigen_device()) = in + vec.reshape(shape).broadcast(bcast); } -template -void ColwiseSum::operator()(const platform::DeviceContext& context, - const framework::Tensor& input, - framework::Tensor* vector) { +template +void ColwiseSum::operator()(const DeviceContext& context, + const framework::Tensor& input, + framework::Tensor* vector) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; PADDLE_ENFORCE_EQ(vector->numel(), size); @@ -74,7 +75,7 @@ void ColwiseSum::operator()(const platform::DeviceContext& context, auto vec = framework::EigenMatrix::From(*vector); auto in = framework::EigenMatrix::From(input); Eigen::array shape({{1, static_cast(size)}}); - vec.reshape(shape).device(*context.GetEigenDevice()) = + vec.reshape(shape).device(*context.eigen_device()) = in.sum(Eigen::array({{0}})).reshape(shape); } diff --git a/paddle/operators/math/math_function_test.cc b/paddle/operators/math/math_function_test.cc index 983c9fdcffb0a67da1bc0b5b4af9420a68bd2ac1..7c6f098ca9065ded1644420a3ab47911bf7bc3b3 100644 --- a/paddle/operators/math/math_function_test.cc +++ b/paddle/operators/math/math_function_test.cc @@ -21,7 +21,7 @@ TEST(math_function, gemm_notrans_cblas) { memcpy(input3_ptr, arr3, 8 * sizeof(float)); paddle::platform::CPUDeviceContext context(*cpu_place); - paddle::operators::math::gemm( + paddle::operators::math::gemm( context, false, false, m, n, k, 1, input1_ptr, 3, input2_ptr + 1, 4, 1, input3_ptr + 1, 4); @@ -55,7 +55,7 @@ TEST(math_function, gemm_trans_clbas) { memcpy(input3_ptr, arr3, 8 * sizeof(float)); paddle::platform::CPUDeviceContext context(*cpu_place); - paddle::operators::math::gemm( + paddle::operators::math::gemm( context, false, true, m, n, k, 1, input1_ptr, 3, input2_ptr + 3, 3, 1, input3_ptr + 1, 4); @@ -74,7 +74,8 @@ TEST(math_function, zero) { auto* cpu_place = new paddle::platform::CPUPlace(); float* t = tensor.mutable_data({2, 2}, *cpu_place); paddle::platform::CPUDeviceContext context(*cpu_place); - paddle::operators::math::SetConstant + paddle::operators::math::SetConstant functor; functor(context, &tensor, 0); EXPECT_EQ(t[0], 0); @@ -110,7 +111,7 @@ void GemvTest(int m, int n, bool trans) { } paddle::platform::CPUDeviceContext context(*cpu_place); - paddle::operators::math::gemv( + paddle::operators::math::gemv( context, trans, static_cast(m), static_cast(n), 1., data_a, data_b, 0., data_c); diff --git a/paddle/operators/math/math_function_test.cu b/paddle/operators/math/math_function_test.cu index d5d6f0c73bc6bce7a74db2c98fa9f884a0bcd9a2..32e96d948714a8fd1fa2c089057603fdaed85c16 100644 --- a/paddle/operators/math/math_function_test.cu +++ b/paddle/operators/math/math_function_test.cu @@ -21,7 +21,7 @@ TEST(math_function, notrans_mul_trans) { out_gpu.mutable_data({2, 2}, *gpu_place); - paddle::operators::math::matmul( + paddle::operators::math::matmul( context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); paddle::framework::CopyFrom(out_gpu, *cpu_place, context, &out); @@ -55,7 +55,7 @@ TEST(math_function, trans_mul_notrans) { out_gpu.mutable_data({3, 3}, *gpu_place); - paddle::operators::math::matmul( + paddle::operators::math::matmul( context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); paddle::framework::CopyFrom(out_gpu, *cpu_place, context, &out); @@ -106,7 +106,7 @@ TEST(math_function, gemm_notrans_cublas) { float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(*gpu_place); - paddle::operators::math::gemm( + paddle::operators::math::gemm( context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); paddle::framework::CopyFrom(input3_gpu, *cpu_place, context, &input3); @@ -161,7 +161,7 @@ TEST(math_function, gemm_trans_cublas) { float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(*gpu_place); - paddle::operators::math::gemm( + paddle::operators::math::gemm( context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); paddle::framework::CopyFrom(input3_gpu, *cpu_place, context, &input3); @@ -208,7 +208,7 @@ void GemvTest(int m, int n, bool trans) { paddle::framework::CopyFrom(mat_a, *gpu_place, context, &g_mat_a); paddle::framework::CopyFrom(vec_b, *gpu_place, context, &g_vec_b); - paddle::operators::math::gemv( + paddle::operators::math::gemv( context, trans, static_cast(m), static_cast(n), 1., g_data_a, g_data_b, 0., g_data_c); diff --git a/paddle/operators/math/matmul.h b/paddle/operators/math/matmul.h index 6ba9a0ba9a70bd938f9362179990ab68fa3186ba..7048e11e6f27a075892c28681a3c4913a27b3f3e 100644 --- a/paddle/operators/math/matmul.h +++ b/paddle/operators/math/matmul.h @@ -26,13 +26,12 @@ namespace math { // // Both a & b can be 1- to 3-dimensional. Higher rank tensors are not supported // yet. -template +template class MatMulFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& a, bool trans_a, - const framework::Tensor& b, bool trans_b, T alpha, - framework::Tensor* out, T beta) { + void operator()(const DeviceContext& context, const framework::Tensor& a, + bool trans_a, const framework::Tensor& b, bool trans_b, + T alpha, framework::Tensor* out, T beta) { auto dim_a = a.dims(); auto dim_b = b.dims(); @@ -108,13 +107,13 @@ class MatMulFunctor { if (!batchCount) { // regular matrix multiplication - gemm(context, transA, transB, M, N, kA, alpha, a.data(), - b.data(), beta, out->data()); + gemm(context, transA, transB, M, N, kA, alpha, + a.data(), b.data(), beta, out->data()); } else { // batched matrix multiplication - batched_gemm(context, transA, transB, M, N, kA, alpha, - a.data(), b.data(), beta, out->data(), - batchCount, strideA, strideB); + batched_gemm( + context, transA, transB, M, N, kA, alpha, a.data(), b.data(), + beta, out->data(), batchCount, strideA, strideB); } } }; diff --git a/paddle/operators/math/maxouting.cc b/paddle/operators/math/maxouting.cc index c9003962d33b70b8e21a0d6b78bf5a77981df409..fea86675f75dad99a336d795d4561ae32d58c30a 100644 --- a/paddle/operators/math/maxouting.cc +++ b/paddle/operators/math/maxouting.cc @@ -20,9 +20,9 @@ namespace math { // All tensors are in NCHW format, and the groups must be greater than 1 template -class MaxOutFunctor { +class MaxOutFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, framework::Tensor* output, int groups) { const int batch_size = input.dims()[0]; @@ -54,9 +54,9 @@ class MaxOutFunctor { }; template -class MaxOutGradFunctor { +class MaxOutGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, framework::Tensor* input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, int groups) { @@ -91,10 +91,10 @@ class MaxOutGradFunctor { } }; -template class MaxOutGradFunctor; -template class MaxOutGradFunctor; -template class MaxOutFunctor; -template class MaxOutFunctor; +template class MaxOutGradFunctor; +template class MaxOutGradFunctor; +template class MaxOutFunctor; +template class MaxOutFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/maxouting.cu b/paddle/operators/math/maxouting.cu index c3fabcae081e24d92d50d0e2a2cad4a2e9872125..6056ad251c12976fe9032f03aaaeb52727da1f42 100644 --- a/paddle/operators/math/maxouting.cu +++ b/paddle/operators/math/maxouting.cu @@ -78,9 +78,9 @@ __global__ void KernelMaxoutGrad(const int nthreads, const T* input_data, * All tensors are in NCHW format. */ template -class MaxOutFunctor { +class MaxOutFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, framework::Tensor* output, int groups) { const int batch_size = input.dims()[0]; @@ -98,20 +98,18 @@ class MaxOutFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxOut< - T><<(context) - .stream()>>>(nthreads, input_data, input_channels, - input_height, input_width, groups, output_data); + KernelMaxOut<<>>( + nthreads, input_data, input_channels, input_height, input_width, groups, + output_data); } }; /* * All tensors are in NCHW format. */ template -class MaxOutGradFunctor { +class MaxOutGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, framework::Tensor* input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, int groups) { @@ -132,20 +130,17 @@ class MaxOutGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxoutGrad< - T><<(context) - .stream()>>>(nthreads, input_data, output_data, - output_grad_data, input_grad_data, input_channels, - input_height, input_width, groups); + KernelMaxoutGrad<<>>( + nthreads, input_data, output_data, output_grad_data, input_grad_data, + input_channels, input_height, input_width, groups); } }; -template class MaxOutGradFunctor; -template class MaxOutGradFunctor; +template class MaxOutGradFunctor; +template class MaxOutGradFunctor; -template class MaxOutFunctor; -template class MaxOutFunctor; +template class MaxOutFunctor; +template class MaxOutFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/maxouting.h b/paddle/operators/math/maxouting.h index 2d9069b0b3ca3e7bad3b21a46985c52ef00f50e6..68f4743db07af0f369eb18f1a7cb6e326d469e85 100644 --- a/paddle/operators/math/maxouting.h +++ b/paddle/operators/math/maxouting.h @@ -23,20 +23,18 @@ namespace math { #define FLT_MAX __FLT_MAX__ -template - +template class MaxOutFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, framework::Tensor* output, - int groups); + void operator()(const DeviceContext& context, const framework::Tensor& input, + framework::Tensor* output, int groups); }; -template +template class MaxOutGradFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, framework::Tensor* input_grad, + void operator()(const DeviceContext& context, const framework::Tensor& input, + framework::Tensor* input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, int groups); }; diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index 135984586a67f666425f81456148c3623ed7ef25..150de6fd59ef3ac0c4cb9160bf5afb1ce1064577 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -24,9 +24,9 @@ namespace math { * height and width, respectively. */ template -class Pool2dFunctor { +class Pool2dFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, std::vector& ksize, std::vector& strides, std::vector& paddings, PoolProcess pool_process, framework::Tensor* output) { @@ -84,9 +84,9 @@ class Pool2dFunctor { * and width, respectively. */ template -class Pool2dGradFunctor { +class Pool2dGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, @@ -152,9 +152,9 @@ class Pool2dGradFunctor { * height and width, respectively. */ template -class MaxPool2dGradFunctor { +class MaxPool2dGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, @@ -213,25 +213,29 @@ class MaxPool2dGradFunctor { } }; -template class MaxPool2dGradFunctor; -template class MaxPool2dGradFunctor; +template class MaxPool2dGradFunctor; +template class MaxPool2dGradFunctor; -template class Pool2dFunctor, float>; -template class Pool2dFunctor, float>; -template class Pool2dGradFunctor< - platform::CPUPlace, paddle::operators::math::MaxPoolGrad, float>; -template class Pool2dGradFunctor< - platform::CPUPlace, paddle::operators::math::AvgPoolGrad, float>; -template class Pool2dFunctor, + float>; +template class Pool2dGradFunctor, + float>; +template class Pool2dFunctor, double>; -template class Pool2dFunctor, double>; -template class Pool2dGradFunctor< - platform::CPUPlace, paddle::operators::math::MaxPoolGrad, double>; -template class Pool2dGradFunctor< - platform::CPUPlace, paddle::operators::math::AvgPoolGrad, double>; +template class Pool2dGradFunctor, + double>; +template class Pool2dGradFunctor, + double>; /* * All tensors are in NCDHW format. @@ -239,9 +243,9 @@ template class Pool2dGradFunctor< * depth, height and width, respectively. */ template -class Pool3dFunctor { +class Pool3dFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, std::vector& ksize, std::vector& strides, std::vector& paddings, PoolProcess pool_process, framework::Tensor* output) { @@ -314,9 +318,9 @@ class Pool3dFunctor { * depth, height and width, respectively. */ template -class Pool3dGradFunctor { +class Pool3dGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, @@ -398,9 +402,9 @@ class Pool3dGradFunctor { * depth, height and width, respectively. */ template -class MaxPool3dGradFunctor { +class MaxPool3dGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, @@ -473,25 +477,29 @@ class MaxPool3dGradFunctor { } }; -template class MaxPool3dGradFunctor; -template class MaxPool3dGradFunctor; +template class MaxPool3dGradFunctor; +template class MaxPool3dGradFunctor; -template class Pool3dFunctor, float>; -template class Pool3dFunctor, float>; -template class Pool3dGradFunctor< - platform::CPUPlace, paddle::operators::math::MaxPoolGrad, float>; -template class Pool3dGradFunctor< - platform::CPUPlace, paddle::operators::math::AvgPoolGrad, float>; -template class Pool3dFunctor, + float>; +template class Pool3dGradFunctor, + float>; +template class Pool3dFunctor, double>; -template class Pool3dFunctor, double>; -template class Pool3dGradFunctor< - platform::CPUPlace, paddle::operators::math::MaxPoolGrad, double>; -template class Pool3dGradFunctor< - platform::CPUPlace, paddle::operators::math::AvgPoolGrad, double>; +template class Pool3dGradFunctor, + double>; +template class Pool3dGradFunctor, + double>; /* * All tensors are in NCHW format. @@ -499,9 +507,9 @@ template class Pool3dGradFunctor< * height and width, respectively. */ template -class MaxPool2dWithIndexFunctor { +class MaxPool2dWithIndexFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, std::vector& ksize, std::vector& strides, std::vector& paddings, framework::Tensor* output, framework::Tensor* mask) { @@ -564,9 +572,9 @@ class MaxPool2dWithIndexFunctor { * height and width, respectively. */ template -class MaxPool2dWithIndexGradFunctor { +class MaxPool2dWithIndexGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, std::vector& ksize, std::vector& strides, std::vector& paddings, @@ -602,10 +610,14 @@ class MaxPool2dWithIndexGradFunctor { } }; -template class MaxPool2dWithIndexFunctor; -template class MaxPool2dWithIndexGradFunctor; -template class MaxPool2dWithIndexFunctor; -template class MaxPool2dWithIndexGradFunctor; +template class MaxPool2dWithIndexFunctor; +template class MaxPool2dWithIndexGradFunctor; +template class MaxPool2dWithIndexFunctor; +template class MaxPool2dWithIndexGradFunctor; /* * All tensors are in NCDHW format. @@ -613,9 +625,9 @@ template class MaxPool2dWithIndexGradFunctor; * depth, height and width, respectively. */ template -class MaxPool3dWithIndexFunctor { +class MaxPool3dWithIndexFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, std::vector& ksize, std::vector& strides, std::vector& paddings, framework::Tensor* output, framework::Tensor* mask) { @@ -692,9 +704,9 @@ class MaxPool3dWithIndexFunctor { * depth, height and width, respectively. */ template -class MaxPool3dWithIndexGradFunctor { +class MaxPool3dWithIndexGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, std::vector& ksize, std::vector& strides, std::vector& paddings, @@ -735,10 +747,14 @@ class MaxPool3dWithIndexGradFunctor { } }; -template class MaxPool3dWithIndexFunctor; -template class MaxPool3dWithIndexGradFunctor; -template class MaxPool3dWithIndexFunctor; -template class MaxPool3dWithIndexGradFunctor; +template class MaxPool3dWithIndexFunctor; +template class MaxPool3dWithIndexGradFunctor; +template class MaxPool3dWithIndexFunctor; +template class MaxPool3dWithIndexGradFunctor; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index ca3560f264b59057fd655084f3d43adc617c6606..0243cf8316a2a83bfc4c091f64419574c1be2f5c 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -155,9 +155,9 @@ __global__ void KernelMaxPool2DGrad( * height and width, respectively. */ template -class Pool2dFunctor { +class Pool2dFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector& ksize, std::vector& strides, std::vector& paddings, PoolProcess pool_process, framework::Tensor* output) { @@ -183,11 +183,7 @@ class Pool2dFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelPool2D< - PoolProcess, - T><<(context) - .stream()>>>( + KernelPool2D<<>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process, output_data); @@ -200,9 +196,9 @@ class Pool2dFunctor { * height and width, respectively. */ template -class Pool2dGradFunctor { +class Pool2dGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, @@ -231,11 +227,7 @@ class Pool2dGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelPool2DGrad< - PoolProcess, - T><<(context) - .stream()>>>( + KernelPool2DGrad<<>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, @@ -249,9 +241,9 @@ class Pool2dGradFunctor { * height and width, respectively. */ template -class MaxPool2dGradFunctor { +class MaxPool2dGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, @@ -281,10 +273,7 @@ class MaxPool2dGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool2DGrad< - T><<(context) - .stream()>>>( + KernelMaxPool2DGrad<<>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, @@ -292,25 +281,29 @@ class MaxPool2dGradFunctor { } }; -template class MaxPool2dGradFunctor; -template class MaxPool2dGradFunctor; +template class MaxPool2dGradFunctor; +template class MaxPool2dGradFunctor; -template class Pool2dFunctor, float>; -template class Pool2dFunctor, float>; -template class Pool2dGradFunctor< - platform::GPUPlace, paddle::operators::math::MaxPoolGrad, float>; -template class Pool2dGradFunctor< - platform::GPUPlace, paddle::operators::math::AvgPoolGrad, float>; -template class Pool2dFunctor, + float>; +template class Pool2dGradFunctor, + float>; +template class Pool2dFunctor, double>; -template class Pool2dFunctor, double>; -template class Pool2dGradFunctor< - platform::GPUPlace, paddle::operators::math::MaxPoolGrad, double>; -template class Pool2dGradFunctor< - platform::GPUPlace, paddle::operators::math::AvgPoolGrad, double>; +template class Pool2dGradFunctor, + double>; +template class Pool2dGradFunctor, + double>; template __global__ void KernelPool3D(const int nthreads, const T* input_data, @@ -478,9 +471,9 @@ __global__ void KernelMaxPool3DGrad( * depth, height and width, respectively. */ template -class Pool3dFunctor { +class Pool3dFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector& ksize, std::vector& strides, std::vector& paddings, PoolProcess pool_process, framework::Tensor* output) { @@ -512,11 +505,7 @@ class Pool3dFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelPool3D< - PoolProcess, - T><<(context) - .stream()>>>( + KernelPool3D<<>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, @@ -531,9 +520,9 @@ class Pool3dFunctor { * depth, height and width, respectively. */ template -class Pool3dGradFunctor { +class Pool3dGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, @@ -569,11 +558,7 @@ class Pool3dGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelPool3DGrad< - PoolProcess, - T><<(context) - .stream()>>>( + KernelPool3DGrad<<>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, @@ -588,9 +573,9 @@ class Pool3dGradFunctor { * depth, height and width, respectively. */ template -class MaxPool3dGradFunctor { +class MaxPool3dGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, @@ -626,10 +611,7 @@ class MaxPool3dGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool3DGrad< - T><<(context) - .stream()>>>( + KernelMaxPool3DGrad<<>>( nthreads, input_data, output_data, output_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, @@ -638,25 +620,29 @@ class MaxPool3dGradFunctor { } }; -template class MaxPool3dGradFunctor; -template class MaxPool3dGradFunctor; +template class MaxPool3dGradFunctor; +template class MaxPool3dGradFunctor; -template class Pool3dFunctor, float>; -template class Pool3dFunctor, float>; -template class Pool3dGradFunctor< - platform::GPUPlace, paddle::operators::math::MaxPoolGrad, float>; -template class Pool3dGradFunctor< - platform::GPUPlace, paddle::operators::math::AvgPoolGrad, float>; -template class Pool3dFunctor, + float>; +template class Pool3dGradFunctor, + float>; +template class Pool3dFunctor, double>; -template class Pool3dFunctor, double>; -template class Pool3dGradFunctor< - platform::GPUPlace, paddle::operators::math::MaxPoolGrad, double>; -template class Pool3dGradFunctor< - platform::GPUPlace, paddle::operators::math::AvgPoolGrad, double>; +template class Pool3dGradFunctor, + double>; +template class Pool3dGradFunctor, + double>; template __global__ void KernelMaxPool2dWithIdx( @@ -747,9 +733,9 @@ __global__ void KernelMaxPool2DWithIdxGrad( * height and width, respectively. */ template -class MaxPool2dWithIndexFunctor { +class MaxPool2dWithIndexFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector& ksize, std::vector& strides, std::vector& paddings, framework::Tensor* output, framework::Tensor* mask) { @@ -776,10 +762,7 @@ class MaxPool2dWithIndexFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool2dWithIdx< - T1, T2><<(context) - .stream()>>>( + KernelMaxPool2dWithIdx<<>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data, mask_data); @@ -792,9 +775,9 @@ class MaxPool2dWithIndexFunctor { * height and width, respectively. */ template -class MaxPool2dWithIndexGradFunctor { +class MaxPool2dWithIndexGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, std::vector& ksize, std::vector& strides, std::vector& paddings, @@ -821,10 +804,7 @@ class MaxPool2dWithIndexGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool2DWithIdxGrad< - T1, T2><<(context) - .stream()>>>( + KernelMaxPool2DWithIdxGrad<<>>( nthreads, output_grad_data, mask_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, @@ -832,10 +812,14 @@ class MaxPool2dWithIndexGradFunctor { } }; -template class MaxPool2dWithIndexFunctor; -template class MaxPool2dWithIndexGradFunctor; -template class MaxPool2dWithIndexFunctor; -template class MaxPool2dWithIndexGradFunctor; +template class MaxPool2dWithIndexFunctor; +template class MaxPool2dWithIndexGradFunctor; +template class MaxPool2dWithIndexFunctor; +template class MaxPool2dWithIndexGradFunctor; template __global__ void KernelMaxPool3DWithIdx( @@ -950,9 +934,9 @@ __global__ void KernelMaxPool3DWithIdxGrad( * depth, height and width, respectively. */ template -class MaxPool3dWithIndexFunctor { +class MaxPool3dWithIndexFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, std::vector& ksize, std::vector& strides, std::vector& paddings, framework::Tensor* output, framework::Tensor* mask) { @@ -985,10 +969,7 @@ class MaxPool3dWithIndexFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool3DWithIdx< - T1, T2><<(context) - .stream()>>>( + KernelMaxPool3DWithIdx<<>>( nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, @@ -1002,9 +983,9 @@ class MaxPool3dWithIndexFunctor { * depth, height and width, respectively. */ template -class MaxPool3dWithIndexGradFunctor { +class MaxPool3dWithIndexGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, std::vector& ksize, std::vector& strides, std::vector& paddings, @@ -1037,10 +1018,7 @@ class MaxPool3dWithIndexGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool3DWithIdxGrad< - T1, T2><<(context) - .stream()>>>( + KernelMaxPool3DWithIdxGrad<<>>( nthreads, output_grad_data, mask_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, @@ -1049,10 +1027,14 @@ class MaxPool3dWithIndexGradFunctor { } }; -template class MaxPool3dWithIndexFunctor; -template class MaxPool3dWithIndexGradFunctor; -template class MaxPool3dWithIndexFunctor; -template class MaxPool3dWithIndexGradFunctor; +template class MaxPool3dWithIndexFunctor; +template class MaxPool3dWithIndexGradFunctor; +template class MaxPool3dWithIndexFunctor; +template class MaxPool3dWithIndexGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index 19fbd8b4bb2469d3ce8a139ce30a48641dbd6e0f..2759f06cb6a51f7ceb6b8010d792030eb6ad5d3e 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -84,62 +84,58 @@ class AvgPoolGrad { * This is different from average pooling. So we rewrite the max_pool_grad: * MaxPool2dGradFunctor, MaxPool3dGradFunctor. */ -template +template class Pool2dFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_compute, framework::Tensor* output); + void operator()(const DeviceContext& context, const framework::Tensor& input, + std::vector& ksize, std::vector& strides, + std::vector& paddings, PoolProcess pool_compute, + framework::Tensor* output); }; -template +template class Pool2dGradFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, + void operator()(const DeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, PoolProcess pool_compute, framework::Tensor* input_grad); }; -template +template class MaxPool2dGradFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, + void operator()(const DeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, framework::Tensor* input_grad); }; -template +template class Pool3dFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_compute, framework::Tensor* output); + void operator()(const DeviceContext& context, const framework::Tensor& input, + std::vector& ksize, std::vector& strides, + std::vector& paddings, PoolProcess pool_compute, + framework::Tensor* output); }; -template +template class Pool3dGradFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, + void operator()(const DeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, PoolProcess pool_compute, framework::Tensor* input_grad); }; -template +template class MaxPool3dGradFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, + void operator()(const DeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, @@ -153,38 +149,38 @@ class MaxPool3dGradFunctor { * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in * NCDHW format. */ -template +template class MaxPool2dWithIndexFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - framework::Tensor* output, framework::Tensor* mask); + void operator()(const DeviceContext& context, const framework::Tensor& input, + std::vector& ksize, std::vector& strides, + std::vector& paddings, framework::Tensor* output, + framework::Tensor* mask); }; -template +template class MaxPool2dWithIndexGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, std::vector& ksize, std::vector& strides, std::vector& paddings, framework::Tensor* input_grad); }; -template +template class MaxPool3dWithIndexFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - framework::Tensor* output, framework::Tensor* mask); + void operator()(const DeviceContext& context, const framework::Tensor& input, + std::vector& ksize, std::vector& strides, + std::vector& paddings, framework::Tensor* output, + framework::Tensor* mask); }; -template +template class MaxPool3dWithIndexGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::Tensor& output_grad, const framework::Tensor& mask, std::vector& ksize, std::vector& strides, std::vector& paddings, diff --git a/paddle/operators/math/selected_rows_functor.cc b/paddle/operators/math/selected_rows_functor.cc index 514f2adef284c8877e2e74b943b4e6419c6ae721..ab758d1e7fd8ab361948b28e8cb735b9a742a339 100644 --- a/paddle/operators/math/selected_rows_functor.cc +++ b/paddle/operators/math/selected_rows_functor.cc @@ -19,8 +19,8 @@ namespace paddle { namespace operators { namespace math { template -struct SelectedRowsAdd { - void operator()(const platform::DeviceContext& context, +struct SelectedRowsAdd { + void operator()(const platform::CPUDeviceContext& context, const framework::SelectedRows& input1, const framework::SelectedRows& input2, framework::SelectedRows* output) { @@ -67,12 +67,12 @@ struct SelectedRowsAdd { } }; -template struct SelectedRowsAdd; -template struct SelectedRowsAdd; +template struct SelectedRowsAdd; +template struct SelectedRowsAdd; template -struct SelectedRowsAddTensor { - void operator()(const platform::DeviceContext& context, +struct SelectedRowsAddTensor { + void operator()(const platform::CPUDeviceContext& context, const framework::SelectedRows& input1, const framework::Tensor& input2, framework::Tensor* output) { auto in1_height = input1.height(); @@ -88,7 +88,7 @@ struct SelectedRowsAddTensor { PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height); PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height); - SetConstant functor; + SetConstant functor; functor(context, output, 0.0); auto* in1_data = in1_value.data(); @@ -103,17 +103,16 @@ struct SelectedRowsAddTensor { auto out_eigen = framework::EigenVector::Flatten(*output); auto in2_eigen = framework::EigenVector::Flatten(input2); - out_eigen.device(*context.GetEigenDevice()) = - out_eigen + in2_eigen; + out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen; } }; -template struct SelectedRowsAddTensor; -template struct SelectedRowsAddTensor; +template struct SelectedRowsAddTensor; +template struct SelectedRowsAddTensor; template -struct SelectedRowsAddTo { - void operator()(const platform::DeviceContext& context, +struct SelectedRowsAddTo { + void operator()(const platform::CPUDeviceContext& context, const framework::SelectedRows& input1, const int64_t input2_offset, framework::SelectedRows* input2) { @@ -143,14 +142,14 @@ struct SelectedRowsAddTo { } }; -template struct SelectedRowsAddTo; -template struct SelectedRowsAddTo; -template struct SelectedRowsAddTo; -template struct SelectedRowsAddTo; +template struct SelectedRowsAddTo; +template struct SelectedRowsAddTo; +template struct SelectedRowsAddTo; +template struct SelectedRowsAddTo; template -struct SelectedRowsAddToTensor { - void operator()(const platform::DeviceContext& context, +struct SelectedRowsAddToTensor { + void operator()(const platform::CPUDeviceContext& context, const framework::SelectedRows& input1, framework::Tensor* input2) { auto in1_height = input1.height(); @@ -175,10 +174,10 @@ struct SelectedRowsAddToTensor { } }; -template struct SelectedRowsAddToTensor; -template struct SelectedRowsAddToTensor; -template struct SelectedRowsAddToTensor; -template struct SelectedRowsAddToTensor; +template struct SelectedRowsAddToTensor; +template struct SelectedRowsAddToTensor; +template struct SelectedRowsAddToTensor; +template struct SelectedRowsAddToTensor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/selected_rows_functor.cu b/paddle/operators/math/selected_rows_functor.cu index c1dd323ba29e03e3ab4a3e4d7248388b408fb9d6..c44577e00af5f362ae7e168495e496d60d05de95 100644 --- a/paddle/operators/math/selected_rows_functor.cu +++ b/paddle/operators/math/selected_rows_functor.cu @@ -20,8 +20,8 @@ namespace paddle { namespace operators { namespace math { template -struct SelectedRowsAdd { - void operator()(const platform::DeviceContext& context, +struct SelectedRowsAdd { + void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, const framework::SelectedRows& input2, framework::SelectedRows* output) { @@ -64,16 +64,15 @@ struct SelectedRowsAdd { reinterpret_cast(context).stream()); auto* in2_data = in2_value.data(); - memory::Copy( - boost::get(out_place), out_data + in1_value.numel(), - boost::get(in2_place), in2_data, - in2_value.numel() * sizeof(T), - reinterpret_cast(context).stream()); + memory::Copy(boost::get(out_place), + out_data + in1_value.numel(), + boost::get(in2_place), in2_data, + in2_value.numel() * sizeof(T), context.stream()); } }; -template struct SelectedRowsAdd; -template struct SelectedRowsAdd; +template struct SelectedRowsAdd; +template struct SelectedRowsAdd; namespace { template @@ -96,8 +95,8 @@ __global__ void SelectedRowsAddTensorKernel(const T* selected_rows, } // namespace template -struct SelectedRowsAddTensor { - void operator()(const platform::DeviceContext& context, +struct SelectedRowsAddTensor { + void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, const framework::Tensor& input2, framework::Tensor* output) { auto in1_height = input1.height(); @@ -117,30 +116,28 @@ struct SelectedRowsAddTensor { auto* in2_data = input2.data(); auto* out_data = output->data(); - SetConstant functor; + SetConstant functor; functor(context, output, 0.0); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(1, in1_rows.size()); - SelectedRowsAddTensorKernel<<< - grid, threads, 0, - reinterpret_cast(context) - .stream()>>>(in1_data, in1_rows.data(), out_data, in1_row_numel); + SelectedRowsAddTensorKernel< + T, block_size><<>>( + in1_data, in1_rows.data(), out_data, in1_row_numel); auto out_eigen = framework::EigenVector::Flatten(*output); auto in2_eigen = framework::EigenVector::Flatten(input2); - out_eigen.device(*context.GetEigenDevice()) = - out_eigen + in2_eigen; + out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen; } }; -template struct SelectedRowsAddTensor; -template struct SelectedRowsAddTensor; +template struct SelectedRowsAddTensor; +template struct SelectedRowsAddTensor; template -struct SelectedRowsAddTo { - void operator()(const platform::DeviceContext& context, +struct SelectedRowsAddTo { + void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, const int64_t input2_offset, framework::SelectedRows* input2) { @@ -163,18 +160,17 @@ struct SelectedRowsAddTo { auto* in1_data = in1_value.data(); auto* in2_data = in2_value->data(); - memory::Copy( - boost::get(in2_place), in2_data + input2_offset, - boost::get(in1_place), in1_data, - in1_value.numel() * sizeof(T), - reinterpret_cast(context).stream()); + memory::Copy(boost::get(in2_place), + in2_data + input2_offset, + boost::get(in1_place), in1_data, + in1_value.numel() * sizeof(T), context.stream()); } }; -template struct SelectedRowsAddTo; -template struct SelectedRowsAddTo; -template struct SelectedRowsAddTo; -template struct SelectedRowsAddTo; +template struct SelectedRowsAddTo; +template struct SelectedRowsAddTo; +template struct SelectedRowsAddTo; +template struct SelectedRowsAddTo; namespace { template @@ -197,8 +193,8 @@ __global__ void SelectedRowsAddToTensorKernel(const T* selected_rows, } // namespace template -struct SelectedRowsAddToTensor { - void operator()(const platform::DeviceContext& context, +struct SelectedRowsAddToTensor { + void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input1, framework::Tensor* input2) { auto in1_height = input1.height(); @@ -216,17 +212,16 @@ struct SelectedRowsAddToTensor { const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(1, in1_rows.size()); - SelectedRowsAddToTensorKernel<<< - grid, threads, 0, - reinterpret_cast(context) - .stream()>>>(in1_data, in1_rows.data(), in2_data, in1_row_numel); + SelectedRowsAddToTensorKernel< + T, block_size><<>>( + in1_data, in1_rows.data(), in2_data, in1_row_numel); } }; -template struct SelectedRowsAddToTensor; -template struct SelectedRowsAddToTensor; -template struct SelectedRowsAddToTensor; -template struct SelectedRowsAddToTensor; +template struct SelectedRowsAddToTensor; +template struct SelectedRowsAddToTensor; +template struct SelectedRowsAddToTensor; +template struct SelectedRowsAddToTensor; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/selected_rows_functor.h b/paddle/operators/math/selected_rows_functor.h index d6dc6c03c941f965394d952574d309c51eb82a62..1149075abf16547a120ac8928c45b4972409fc72 100644 --- a/paddle/operators/math/selected_rows_functor.h +++ b/paddle/operators/math/selected_rows_functor.h @@ -21,33 +21,33 @@ namespace math { // SelectedRows + SelectedRows will simplely concat value and rows. // The real computation happens in dealing with LoDTensor. -template +template struct SelectedRowsAdd { - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::SelectedRows& input1, const framework::SelectedRows& input2, framework::SelectedRows* output); }; -template +template struct SelectedRowsAddTensor { - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::SelectedRows& input1, const framework::Tensor& input2, framework::Tensor* output); }; // input2 = input1 + input2 -template +template struct SelectedRowsAddTo { - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::SelectedRows& input1, const int64_t input2_offset, framework::SelectedRows* input2); }; // input2 = input1 + input2 -template +template struct SelectedRowsAddToTensor { - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::SelectedRows& input1, framework::Tensor* input2); }; diff --git a/paddle/operators/math/selected_rows_functor_test.cc b/paddle/operators/math/selected_rows_functor_test.cc index a3649b6875aca61ee3ceb1ca83c7f9b38dc06c42..8c74cab0a1e817f9e98fa682fe4122db7837aec9 100644 --- a/paddle/operators/math/selected_rows_functor_test.cc +++ b/paddle/operators/math/selected_rows_functor_test.cc @@ -23,7 +23,7 @@ TEST(selected_rows_functor, cpu_add) { CPUPlace cpu_place; CPUDeviceContext ctx(cpu_place); - SetConstant functor; + SetConstant functor; int64_t height = 10; int64_t row_numel = 10; @@ -47,7 +47,7 @@ TEST(selected_rows_functor, cpu_add) { // simplely concat two SelectedRows out_value->mutable_data(make_ddim({7, 10}), cpu_place); - SelectedRowsAdd add_functor; + SelectedRowsAdd add_functor; add_functor(ctx, *selected_rows1, *selected_rows2, output.get()); auto out_height = output->height(); @@ -85,7 +85,7 @@ TEST(selected_rows_functor, cpu_add) { std::unique_ptr tensor2{new Tensor()}; tensor2->mutable_data(make_ddim({height, row_numel}), cpu_place); - SelectedRowsAddTensor add_tensor_functor; + SelectedRowsAddTensor add_tensor_functor; add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); auto* tensor2_data = tensor2->data(); @@ -112,7 +112,7 @@ TEST(selected_rows_functor, cpu_add_to) { CPUPlace cpu_place; CPUDeviceContext ctx(cpu_place); - SetConstant functor; + SetConstant functor; int64_t height = 10; int64_t row_numel = 10; @@ -137,7 +137,7 @@ TEST(selected_rows_functor, cpu_add_to) { // simplely concat two SelectedRows out_value->mutable_data(make_ddim({7, 10}), cpu_place); - SelectedRowsAddTo add_to_functor; + SelectedRowsAddTo add_to_functor; add_to_functor(ctx, *selected_rows1, 0, output.get()); add_to_functor(ctx, *selected_rows2, in1_value->numel(), output.get()); @@ -173,7 +173,7 @@ TEST(selected_rows_functor, cpu_add_to) { tensor1->mutable_data(make_ddim({height, row_numel}), cpu_place); functor(ctx, tensor1.get(), 3.0); - SelectedRowsAddToTensor add_to_tensor_functor; + SelectedRowsAddToTensor add_to_tensor_functor; add_to_tensor_functor(ctx, *output, tensor1.get()); auto* tensor1_data = tensor1->data(); diff --git a/paddle/operators/math/selected_rows_functor_test.cu b/paddle/operators/math/selected_rows_functor_test.cu index 7de9291c17d3f09a3c6076f00f2457f240e6f0af..777caf5635647d11e8fde05a68fdf7e2c32f48df 100644 --- a/paddle/operators/math/selected_rows_functor_test.cu +++ b/paddle/operators/math/selected_rows_functor_test.cu @@ -24,7 +24,7 @@ TEST(selected_rows_functor, gpu_add) { GPUPlace gpu_place(0); CPUPlace cpu_place; CUDADeviceContext ctx(gpu_place); - SetConstant functor; + SetConstant functor; int64_t height = 10; int64_t row_numel = 10; @@ -48,7 +48,7 @@ TEST(selected_rows_functor, gpu_add) { // simplely concat two SelectedRows out_value->mutable_data(make_ddim({7, 10}), gpu_place); - SelectedRowsAdd add_functor; + SelectedRowsAdd add_functor; add_functor(ctx, *selected_rows1, *selected_rows2, output.get()); auto out_height = output->height(); @@ -90,7 +90,7 @@ TEST(selected_rows_functor, gpu_add) { std::unique_ptr tensor2{new Tensor()}; tensor2->mutable_data(make_ddim({height, row_numel}), gpu_place); - SelectedRowsAddTensor add_tensor_functor; + SelectedRowsAddTensor add_tensor_functor; add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); Tensor tensor2_cpu; @@ -122,7 +122,7 @@ TEST(selected_rows_functor, gpu_add_to) { GPUPlace gpu_place(0); CPUPlace cpu_place; CUDADeviceContext ctx(gpu_place); - SetConstant functor; + SetConstant functor; int64_t height = 10; int64_t row_numel = 10; @@ -147,7 +147,7 @@ TEST(selected_rows_functor, gpu_add_to) { // simplely concat two SelectedRows out_value->mutable_data(make_ddim({7, 10}), gpu_place); - SelectedRowsAddTo add_to_functor; + SelectedRowsAddTo add_to_functor; add_to_functor(ctx, *selected_rows1, 0, output.get()); add_to_functor(ctx, *selected_rows2, in1_value->numel(), output.get()); @@ -187,7 +187,7 @@ TEST(selected_rows_functor, gpu_add_to) { tensor1->mutable_data(make_ddim({height, row_numel}), gpu_place); functor(ctx, tensor1.get(), 3.0); - SelectedRowsAddToTensor add_to_tensor_functor; + SelectedRowsAddToTensor add_to_tensor_functor; add_to_tensor_functor(ctx, *output, tensor1.get()); Tensor tensor1_cpu; diff --git a/paddle/operators/math/sequence2batch.cc b/paddle/operators/math/sequence2batch.cc index 5b3bde02fbf981772759caa3d0054fac4a8520f9..88977be1f8c030741c3a3a8f07a4feeb1d8bb4d9 100644 --- a/paddle/operators/math/sequence2batch.cc +++ b/paddle/operators/math/sequence2batch.cc @@ -19,9 +19,9 @@ namespace operators { namespace math { template -class CopyMatrixRowsFunctor { +class CopyMatrixRowsFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& src, const size_t* index, framework::Tensor& dst, bool is_src_index) { auto src_dims = src.dims(); @@ -48,13 +48,13 @@ class CopyMatrixRowsFunctor { } }; -template class CopyMatrixRowsFunctor; -template class CopyMatrixRowsFunctor; +template class CopyMatrixRowsFunctor; +template class CopyMatrixRowsFunctor; -template class LoDTensor2BatchFunctor; -template class LoDTensor2BatchFunctor; -template class Batch2LoDTensorFunctor; -template class Batch2LoDTensorFunctor; +template class LoDTensor2BatchFunctor; +template class LoDTensor2BatchFunctor; +template class Batch2LoDTensorFunctor; +template class Batch2LoDTensorFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/sequence2batch.cu b/paddle/operators/math/sequence2batch.cu index c5d968aeb216bbb3e0e17f138b9e891494d99f75..452ae8951000872b706f7e4227a62dbf98109e7e 100644 --- a/paddle/operators/math/sequence2batch.cu +++ b/paddle/operators/math/sequence2batch.cu @@ -39,9 +39,9 @@ __global__ void CopyMatrixRowsKernel(const T* src, T* dst, const size_t* index, } template -class CopyMatrixRowsFunctor { +class CopyMatrixRowsFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& src, const size_t* index, framework::Tensor& dst, bool is_src_index) { auto src_dims = src.dims(); @@ -59,20 +59,19 @@ class CopyMatrixRowsFunctor { dim3 threads(128, 8); dim3 grid(8, 1); - auto stream = - reinterpret_cast(context).stream(); + auto stream = context.stream(); CopyMatrixRowsKernel<<>>( src_data, dst_data, index, height, width, is_src_index); } }; -template class CopyMatrixRowsFunctor; -template class CopyMatrixRowsFunctor; +template class CopyMatrixRowsFunctor; +template class CopyMatrixRowsFunctor; -template class LoDTensor2BatchFunctor; -template class LoDTensor2BatchFunctor; -template class Batch2LoDTensorFunctor; -template class Batch2LoDTensorFunctor; +template class LoDTensor2BatchFunctor; +template class LoDTensor2BatchFunctor; +template class Batch2LoDTensorFunctor; +template class Batch2LoDTensorFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/sequence2batch.h b/paddle/operators/math/sequence2batch.h index 73295ddbcb73fe80be08e732790f0ec75e94b415..a5c43a2c7d4d729c35a20a27de2a23141e6019bc 100644 --- a/paddle/operators/math/sequence2batch.h +++ b/paddle/operators/math/sequence2batch.h @@ -26,7 +26,7 @@ template using EigenMatrix = framework::EigenMatrix; -template +template class CopyMatrixRowsFunctor { public: // If is_src_index is true, @@ -34,12 +34,12 @@ class CopyMatrixRowsFunctor { // If is_src_index is false, // copy the input src to the indexed rows of output dst. // The indexed rows are based on the input index. - void operator()(const platform::DeviceContext& context, - const framework::Tensor& src, const size_t* index, - framework::Tensor& dst, bool is_src_index); + void operator()(const DeviceContext& context, const framework::Tensor& src, + const size_t* index, framework::Tensor& dst, + bool is_src_index); }; -template +template class LoDTensor2BatchFunctor { // Calculate the length of each sequence and // sort sequence index by the length. @@ -56,7 +56,7 @@ class LoDTensor2BatchFunctor { }; public: - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::LoDTensor& lod_tensor, framework::LoDTensor& batch, bool is_cal_batch_lod, bool is_reverse = false) const { @@ -65,7 +65,7 @@ class LoDTensor2BatchFunctor { PADDLE_ENFORCE_GT(lods.size(), 2UL); PADDLE_ENFORCE_EQ(lods[1].size(), static_cast(lod_tensor.dims()[0])); - CopyMatrixRowsFunctor to_batch; + CopyMatrixRowsFunctor to_batch; to_batch(context, lod_tensor, lods[1].data(), batch, true); return; } @@ -143,22 +143,22 @@ class LoDTensor2BatchFunctor { } batch.set_lod(batch_lods); - CopyMatrixRowsFunctor to_batch; + CopyMatrixRowsFunctor to_batch; to_batch(context, lod_tensor, seq2batch_idx, batch, true); } }; -template +template class Batch2LoDTensorFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::LoDTensor& batch, framework::LoDTensor& lod_tensor) const { auto in_lod = batch.lod(); PADDLE_ENFORCE_GT(in_lod.size(), 2UL); PADDLE_ENFORCE_EQ(in_lod[1].size(), static_cast(lod_tensor.dims()[0])); - CopyMatrixRowsFunctor to_seq; + CopyMatrixRowsFunctor to_seq; size_t* index = in_lod[1].data(); to_seq(context, batch, index, lod_tensor, false); } diff --git a/paddle/operators/math/sequence_pooling.cc b/paddle/operators/math/sequence_pooling.cc index 5913c99fdb01100d0de44ab317124550fa626528..8fb92b1a130b8f25163d856f3f596136072180cf 100644 --- a/paddle/operators/math/sequence_pooling.cc +++ b/paddle/operators/math/sequence_pooling.cc @@ -20,9 +20,9 @@ namespace operators { namespace math { template -class MaxSeqPoolFunctor { +class MaxSeqPoolFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::LoDTensor& input, framework::Tensor* output, framework::Tensor* index) { auto in_dims = input.dims(); @@ -60,9 +60,9 @@ class MaxSeqPoolFunctor { }; template -class MaxSeqPoolGradFunctor { +class MaxSeqPoolGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& out_grad, const framework::Tensor& index, framework::LoDTensor* in_grad) { @@ -80,7 +80,7 @@ class MaxSeqPoolGradFunctor { const int* max_index = index.data(); T* ig_data = in_grad->data(); - SetConstant set_zero; + SetConstant set_zero; set_zero(context, in_grad, static_cast(0.0)); int64_t num_seq = og_dims[0]; int64_t dim = out_grad.numel() / num_seq; @@ -93,10 +93,10 @@ class MaxSeqPoolGradFunctor { } }; -template class MaxSeqPoolFunctor; -template class MaxSeqPoolFunctor; -template class MaxSeqPoolGradFunctor; -template class MaxSeqPoolGradFunctor; +template class MaxSeqPoolFunctor; +template class MaxSeqPoolFunctor; +template class MaxSeqPoolGradFunctor; +template class MaxSeqPoolGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/sequence_pooling.cu b/paddle/operators/math/sequence_pooling.cu index 5ed951402fecba66a8960f4d024bf3785dac51c7..4c9e6b375ce7251747b9cd443d86cca0858c84ef 100644 --- a/paddle/operators/math/sequence_pooling.cu +++ b/paddle/operators/math/sequence_pooling.cu @@ -46,9 +46,9 @@ __global__ void KeMaxSequencePool(const T* input, const size_t* starts, } template -class MaxSeqPoolFunctor { +class MaxSeqPoolFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::LoDTensor& input, framework::Tensor* output, framework::Tensor* index) { auto in_dims = input.dims(); @@ -71,8 +71,7 @@ class MaxSeqPoolFunctor { dim3 threads(256, 1); dim3 grid(num_seq, 1); - auto stream = - reinterpret_cast(context).stream(); + auto stream = context.stream(); KeMaxSequencePool<<>>( in_data, starts.data(), out_data, max_index, num_seq, dim); } @@ -91,9 +90,9 @@ __global__ void KeMaxSequencePoolGrad(const T* out_grad, const int* max_index, } template -class MaxSeqPoolGradFunctor { +class MaxSeqPoolGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& out_grad, const framework::Tensor& index, framework::LoDTensor* in_grad) { @@ -111,7 +110,7 @@ class MaxSeqPoolGradFunctor { const int* max_index = index.data(); T* ig_data = in_grad->data(); - SetConstant set_zero; + SetConstant set_zero; set_zero(context, in_grad, static_cast(0.0)); int64_t num_seq = og_dims[0]; int64_t dim = out_grad.numel() / num_seq; @@ -119,17 +118,16 @@ class MaxSeqPoolGradFunctor { unsigned int blocks = (num_seq * dim + 128 - 1) / 128; dim3 threads(128, 1); dim3 grid(blocks, 1); - auto stream = - reinterpret_cast(context).stream(); + auto stream = context.stream(); KeMaxSequencePoolGrad<<>>( og_data, max_index, ig_data, num_seq, dim); } }; -template class MaxSeqPoolFunctor; -template class MaxSeqPoolFunctor; -template class MaxSeqPoolGradFunctor; -template class MaxSeqPoolGradFunctor; +template class MaxSeqPoolFunctor; +template class MaxSeqPoolFunctor; +template class MaxSeqPoolGradFunctor; +template class MaxSeqPoolGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/sequence_pooling.h b/paddle/operators/math/sequence_pooling.h index 35dfe26de1a87a064410401244914d4e2a94176e..13ffb2ebef3a683b5e5fe64433a90237b944002e 100644 --- a/paddle/operators/math/sequence_pooling.h +++ b/paddle/operators/math/sequence_pooling.h @@ -23,18 +23,18 @@ namespace math { #define FLT_MAX __FLT_MAX__ -template +template class MaxSeqPoolFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::LoDTensor& input, framework::Tensor* output, framework::Tensor* index); }; -template +template class MaxSeqPoolGradFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::Tensor& out_grad, const framework::Tensor& index, framework::LoDTensor* in_grad); diff --git a/paddle/operators/math/softmax.cc b/paddle/operators/math/softmax.cc index 3e2f15d6c27f58818128f32fab0bd4c5f36b0050..72f10f35f4ef39b41fbc5e900313eafd7ba669e9 100644 --- a/paddle/operators/math/softmax.cc +++ b/paddle/operators/math/softmax.cc @@ -19,10 +19,10 @@ namespace paddle { namespace operators { namespace math { -template class SoftmaxFunctor; -template class SoftmaxFunctor; -template class SoftmaxGradFunctor; -template class SoftmaxGradFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxGradFunctor; +template class SoftmaxGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/softmax.cu b/paddle/operators/math/softmax.cu index 4dbab51d46bdaaa506a6c242d0958c73687f4eb9..9e73f6a371c950ed6f81ee90216f7fd3899f73ce 100644 --- a/paddle/operators/math/softmax.cu +++ b/paddle/operators/math/softmax.cu @@ -21,10 +21,10 @@ namespace paddle { namespace operators { namespace math { -template class SoftmaxFunctor; -template class SoftmaxFunctor; -template class SoftmaxGradFunctor; -template class SoftmaxGradFunctor; +template class SoftmaxFunctor; +template class SoftmaxFunctor; +template class SoftmaxGradFunctor; +template class SoftmaxGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/softmax.h b/paddle/operators/math/softmax.h index fe1074650234c5beb5889e7efd713164769ad740..471f44d340cfd0d6305a9127c34289ef1663accb 100644 --- a/paddle/operators/math/softmax.h +++ b/paddle/operators/math/softmax.h @@ -19,19 +19,18 @@ namespace paddle { namespace operators { namespace math { -template +template class SoftmaxFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor* X, framework::Tensor* Y); + void operator()(const DeviceContext& context, const framework::Tensor* X, + framework::Tensor* Y); }; -template +template class SoftmaxGradFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor* y, const framework::Tensor* y_grad, - framework::Tensor* x_grad); + void operator()(const DeviceContext& context, const framework::Tensor* y, + const framework::Tensor* y_grad, framework::Tensor* x_grad); }; } // namespace math diff --git a/paddle/operators/math/softmax_impl.h b/paddle/operators/math/softmax_impl.h index 05793eeb3eeafaf36c301236197555b7b35e5803..82f597ff792decb1760f59e693026cd453432d05 100644 --- a/paddle/operators/math/softmax_impl.h +++ b/paddle/operators/math/softmax_impl.h @@ -32,10 +32,10 @@ struct ValueClip { } }; -template -void SoftmaxFunctor::operator()( - const platform::DeviceContext& context, const framework::Tensor* X, - framework::Tensor* Y) { +template +void SoftmaxFunctor::operator()(const DeviceContext& context, + const framework::Tensor* X, + framework::Tensor* Y) { auto logits = EigenMatrix::From(*X); auto softmax = EigenMatrix::From(*Y); @@ -56,19 +56,18 @@ void SoftmaxFunctor::operator()( .broadcast(one_by_class)) .unaryExpr(ValueClip()); - softmax.device(*context.GetEigenDevice()) = shifted_logits.exp(); - softmax.device(*context.GetEigenDevice()) = - (softmax * - softmax.sum(along_class) - .inverse() - .eval() - .reshape(batch_by_one) - .broadcast(one_by_class)); + softmax.device(*context.eigen_device()) = shifted_logits.exp(); + softmax.device(*context.eigen_device()) = (softmax * + softmax.sum(along_class) + .inverse() + .eval() + .reshape(batch_by_one) + .broadcast(one_by_class)); } -template -void SoftmaxGradFunctor::operator()( - const platform::DeviceContext& context, const framework::Tensor* y, +template +void SoftmaxGradFunctor::operator()( + const DeviceContext& context, const framework::Tensor* y, const framework::Tensor* y_grad, framework::Tensor* x_grad) { auto softmax = EigenMatrix::From(*y); auto softmax_grad = EigenMatrix::From(*y_grad); @@ -89,8 +88,7 @@ void SoftmaxGradFunctor::operator()( .eval() .reshape(batch_by_one) .broadcast(one_by_class); - logits_grad.device(*context.GetEigenDevice()) = - (softmax_grad - dot) * softmax; + logits_grad.device(*context.eigen_device()) = (softmax_grad - dot) * softmax; } } // namespace math diff --git a/paddle/operators/math/unpooling.cc b/paddle/operators/math/unpooling.cc new file mode 100644 index 0000000000000000000000000000000000000000..ecd3a647e00655a57d11c2f082bd1f81822cf92b --- /dev/null +++ b/paddle/operators/math/unpooling.cc @@ -0,0 +1,91 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/unpooling.h" +namespace paddle { +namespace operators { +namespace math { +template +class Unpool2dMaxFunctor { + public: + void operator()(const platform::CPUDeviceContext& context, + const framework::Tensor& input, + const framework::Tensor& indices, framework::Tensor* output) { + const int batch_size = input.dims()[0]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output->dims()[1]; + const int output_height = output->dims()[2]; + const int output_width = output->dims()[3]; + int input_feasize = input_height * input_width; + int output_feasize = output_height * output_width; + const T* input_data = input.data(); + const int* indices_data = indices.data(); + T* output_data = output->mutable_data(context.GetPlace()); + for (int b = 0; b < batch_size; ++b) { + for (int c = 0; c < output_channels; ++c) { + for (int i = 0; i < input_feasize; ++i) { + int index = indices_data[i]; + PADDLE_ENFORCE(index < output_feasize, "err index in unpooling!"); + output_data[index] = input_data[i]; + } + input_data += input_feasize; + indices_data += input_feasize; + output_data += output_feasize; + } + } + } +}; +template +class Unpool2dMaxGradFunctor { + public: + void operator()(const platform::CPUDeviceContext& context, + const framework::Tensor& input, + const framework::Tensor& indices, + const framework::Tensor& output, + const framework::Tensor& output_grad, + framework::Tensor* input_grad) { + const int batch_size = input.dims()[0]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output.dims()[1]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + int input_feasize = input_height * input_width; + int output_feasize = output_height * output_width; + const int* indices_data = indices.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad->mutable_data(context.GetPlace()); + + for (int b = 0; b < batch_size; ++b) { + for (int c = 0; c < output_channels; ++c) { + for (int i = 0; i < input_feasize; ++i) { + int index = indices_data[i]; + PADDLE_ENFORCE(index < output_feasize, "err index in unpooling!"); + input_grad_data[i] = output_grad_data[index]; + } + input_grad_data += input_feasize; + indices_data += input_feasize; + output_grad_data += output_feasize; + } + } + } +}; +template class Unpool2dMaxGradFunctor; +template class Unpool2dMaxGradFunctor; +template class Unpool2dMaxFunctor; +template class Unpool2dMaxFunctor; +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/unpooling.cu b/paddle/operators/math/unpooling.cu new file mode 100644 index 0000000000000000000000000000000000000000..ecbde0f6a798ba817c28714b37af8187d2e9555e --- /dev/null +++ b/paddle/operators/math/unpooling.cu @@ -0,0 +1,128 @@ +/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/unpooling.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { +namespace math { +template +__global__ void KernelUnpool2dMax(const int nthreads, const T* input_data, + const int* indices_data, + const int input_height, const int input_width, + const int channels, T* output_data, + const int output_height, + const int output_width) { + int in_n_stride = input_height * input_width * channels; + int in_c_stride = input_height * input_width; + int out_n_stride = output_height * output_width * channels; + int out_c_stride = output_height * output_width; + int index = blockIdx.x * blockDim.x + threadIdx.x; + int offset = blockDim.x * gridDim.x; + for (int i = index; i < nthreads; i += offset) { + int bidx = i / in_n_stride; + int boffset = i % in_n_stride; + int cidx = boffset / in_c_stride; + int out_offset = bidx * out_n_stride + cidx * out_c_stride; + int out_index = indices_data[i]; + PADDLE_ASSERT(out_index < out_c_stride); + output_data[out_offset + out_index] = input_data[i]; + } +} +template +__global__ void KernelUnpool2dMaxGrad( + const int nthreads, const T* input_data, const int* indices_data, + const int input_height, const int input_width, const int channels, + const T* output_data, const T* output_grad, const int output_height, + const int output_width, T* input_grad) { + int in_n_stride = input_height * input_width * channels; + int in_c_stride = input_height * input_width; + int out_n_stride = output_height * output_width * channels; + int out_c_stride = output_height * output_width; + int index = blockIdx.x * blockDim.x + threadIdx.x; + int offset = blockDim.x * gridDim.x; + for (int i = index; i < nthreads; i += offset) { + int bidx = i / in_n_stride; + int boffset = i % in_n_stride; + int cidx = boffset / in_c_stride; + int out_offset = bidx * out_n_stride + cidx * out_c_stride; + int out_index = indices_data[i]; + PADDLE_ASSERT(out_index < out_c_stride); + input_grad[i] = output_grad[out_offset + out_index]; + } +} +/* + * All tensors are in NCHW format. + */ +template +class Unpool2dMaxFunctor { + public: + void operator()(const platform::CUDADeviceContext& context, + const framework::Tensor& input, + const framework::Tensor& indices, framework::Tensor* output) { + const int batch_size = input.dims()[0]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output->dims()[1]; + const int output_height = output->dims()[2]; + const int output_width = output->dims()[3]; + const T* input_data = input.data(); + const int* indices_data = indices.data(); + T* output_data = output->mutable_data(context.GetPlace()); + int threads = 1024; + int grid = (input.numel() + threads - 1) / threads; + KernelUnpool2dMax<<>>( + input.numel(), input_data, indices_data, input_height, input_width, + output_channels, output_data, output_height, output_width); + } +}; +/* + * All tensors are in NCHW format. + */ +template +class Unpool2dMaxGradFunctor { + public: + void operator()(const platform::CUDADeviceContext& context, + const framework::Tensor& input, + const framework::Tensor& indices, + const framework::Tensor& output, + const framework::Tensor& output_grad, + framework::Tensor* input_grad) { + const int batch_size = input.dims()[0]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output.dims()[1]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + const T* input_data = input.data(); + const int* indices_data = indices.data(); + const T* output_data = output.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad->mutable_data(context.GetPlace()); + int threads = 1024; + int grid = (input.numel() + threads - 1) / threads; + KernelUnpool2dMaxGrad<<>>( + input.numel(), input_data, indices_data, input_height, input_width, + output_channels, output_data, output_grad_data, output_height, + output_width, input_grad_data); + } +}; +template class Unpool2dMaxGradFunctor; +template class Unpool2dMaxGradFunctor; +template class Unpool2dMaxFunctor; +template class Unpool2dMaxFunctor; +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/unpooling.h b/paddle/operators/math/unpooling.h new file mode 100644 index 0000000000000000000000000000000000000000..0f0ff1371ebea8c7501aee1c7c45bc6a79de397e --- /dev/null +++ b/paddle/operators/math/unpooling.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/tensor.h" + +namespace paddle { +namespace operators { +namespace math { +template +class Unpool2dMaxFunctor { + public: + void operator()(const DeviceContext& context, const framework::Tensor& input, + const framework::Tensor& indices, framework::Tensor* output); +}; +template +class Unpool2dMaxGradFunctor { + public: + void operator()(const DeviceContext& context, const framework::Tensor& input, + const framework::Tensor& indices, + const framework::Tensor& output, + const framework::Tensor& output_grad, + framework::Tensor* input_grad); +}; +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/vol2col.cc b/paddle/operators/math/vol2col.cc index 99eb7fd46de42400a915d86706580d15b08a74a2..d574ed9234304d992a6e4a10fce0816aee7fa40a 100644 --- a/paddle/operators/math/vol2col.cc +++ b/paddle/operators/math/vol2col.cc @@ -25,9 +25,9 @@ namespace math { * output_depth, output_height, output_width] */ template -class Vol2ColFunctor { +class Vol2ColFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& vol, const std::vector& dilations, const std::vector& strides, @@ -111,9 +111,9 @@ class Vol2ColFunctor { * output_depth, output_height, output_width] */ template -class Col2VolFunctor { +class Col2VolFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& col, const std::vector& dilations, const std::vector& strides, @@ -190,10 +190,10 @@ class Col2VolFunctor { } }; -template class Vol2ColFunctor; -template class Vol2ColFunctor; -template class Col2VolFunctor; -template class Col2VolFunctor; +template class Vol2ColFunctor; +template class Vol2ColFunctor; +template class Col2VolFunctor; +template class Col2VolFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/vol2col.cu b/paddle/operators/math/vol2col.cu index dae3be858e9f47d0133aa37e8a5f90a0addf1dfd..b029442fe48dd27232d322aadec5864760e1b9ff 100644 --- a/paddle/operators/math/vol2col.cu +++ b/paddle/operators/math/vol2col.cu @@ -68,9 +68,9 @@ __global__ void vol2col(int num_kernels, const T* data_vol, int depth, * output_depth, output_height, output_width] */ template -class Vol2ColFunctor { +class Vol2ColFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& vol, const std::vector& dilations, const std::vector& strides, @@ -117,9 +117,7 @@ class Vol2ColFunctor { const int threads = 1024; const int blocks = (num_outputs + 1024 - 1) / 1024; - vol2col<<(context) - .stream()>>>( + vol2col<<>>( num_outputs, vol.data(), input_depth, input_height, input_width, dilations[0], dilations[1], dilations[2], filter_depth, filter_height, filter_width, strides[0], strides[1], strides[2], paddings[0], @@ -196,9 +194,9 @@ __global__ void col2vol(int num_kernels, const T* data_col, int depth, * output_depth, output_height, output_width] */ template -class Col2VolFunctor { +class Col2VolFunctor { public: - void operator()(const platform::DeviceContext& context, + void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& col, const std::vector& dilations, const std::vector& strides, @@ -245,9 +243,7 @@ class Col2VolFunctor { const int threads = 1024; const int blocks = (num_kernels + 1024 - 1) / 1024; - col2vol<<(context) - .stream()>>>( + col2vol<<>>( num_kernels, col.data(), input_depth, input_height, input_width, dilations[0], dilations[1], dilations[2], filter_depth, filter_height, filter_width, strides[0], strides[1], strides[2], paddings[0], @@ -256,10 +252,10 @@ class Col2VolFunctor { } }; -template class Vol2ColFunctor; -template class Vol2ColFunctor; -template class Col2VolFunctor; -template class Col2VolFunctor; +template class Vol2ColFunctor; +template class Vol2ColFunctor; +template class Col2VolFunctor; +template class Col2VolFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/vol2col.h b/paddle/operators/math/vol2col.h index dc64d1d9776261541a380ed15207904d6b4e641c..dcd80370e8516d34b764b1ab3b0b98516e738bf6 100644 --- a/paddle/operators/math/vol2col.h +++ b/paddle/operators/math/vol2col.h @@ -63,22 +63,20 @@ namespace math { * \note The caller needs to ensure that volShape.inputChannels is equal to * colShape.inputChannels. */ -template +template class Vol2ColFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& vol, + void operator()(const DeviceContext& context, const framework::Tensor& vol, const std::vector& dilations, const std::vector& strides, const std::vector& paddings, framework::Tensor* col) const; }; -template +template class Col2VolFunctor { public: - void operator()(const platform::DeviceContext& context, - const framework::Tensor& col, + void operator()(const DeviceContext& context, const framework::Tensor& col, const std::vector& dilations, const std::vector& strides, const std::vector& paddings, diff --git a/paddle/operators/math/vol2col_test.cc b/paddle/operators/math/vol2col_test.cc index 62c3152304ad7fe946c996be413e102f3dd92bb2..f46db3c56713399798a45854bf1613d07aee26e6 100644 --- a/paddle/operators/math/vol2col_test.cc +++ b/paddle/operators/math/vol2col_test.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -template +template void testVol2col() { paddle::framework::Tensor input; paddle::framework::Tensor input_tmp; @@ -24,18 +24,7 @@ void testVol2col() { paddle::framework::Tensor output_tmp; auto* place = new Place(); - paddle::platform::DeviceContext* context; - if (paddle::platform::is_cpu_place(*place)) { - context = - new paddle::platform::CPUDeviceContext(paddle::platform::CPUPlace()); - } else { -#ifdef PADDLE_WITH_CUDA - context = - new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); -#else - PADDLE_THROW("no GPU support"); -#endif // PADDLE_WITH_CUDA - } + DeviceContext* context = new DeviceContext(*place); /** * input = [[0, 1, 2, @@ -88,7 +77,7 @@ void testVol2col() { output_depth, output_height, output_width}, *place); - paddle::operators::math::Vol2ColFunctor vol2col; + paddle::operators::math::Vol2ColFunctor vol2col; vol2col(*context, input, dilations, strides, paddings, &output); float vol_2_col[] = {0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}; @@ -113,7 +102,7 @@ void testVol2col() { CopyFrom(input_tmp, *place, *context, &input); } - paddle::operators::math::Col2VolFunctor col2vol; + paddle::operators::math::Col2VolFunctor col2vol; col2vol(*context, output, dilations, strides, paddings, &input); float* in_ptr; @@ -130,8 +119,9 @@ void testVol2col() { } TEST(math, vol2col) { - testVol2col(); + testVol2col(); #ifdef PADDLE_WITH_CUDA - testVol2col(); + testVol2col(); #endif // PADDLE_WITH_CUDA } diff --git a/paddle/operators/matmul_op.cc b/paddle/operators/matmul_op.cc index 5a1a6154203d40186f1e41491194b19612931b1f..ee0bc0c3708ac20ad00e3222060244d42dbd6f2f 100644 --- a/paddle/operators/matmul_op.cc +++ b/paddle/operators/matmul_op.cc @@ -206,7 +206,8 @@ class MatMulOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(matmul, ops::MatMulOp, ops::MatMulOpMaker, matmul_grad, ops::MatMulOpGrad); -REGISTER_OP_CPU_KERNEL(matmul, - ops::MatMulKernel); REGISTER_OP_CPU_KERNEL( - matmul_grad, ops::MatMulGradKernel); + matmul, ops::MatMulKernel); +REGISTER_OP_CPU_KERNEL( + matmul_grad, + ops::MatMulGradKernel); diff --git a/paddle/operators/matmul_op.cu.cc b/paddle/operators/matmul_op.cu.cc index b7e66382f00445b087e14103e7a148d450b37405..6a3772c00457993dcc7b55a0f15493974633026c 100644 --- a/paddle/operators/matmul_op.cu.cc +++ b/paddle/operators/matmul_op.cu.cc @@ -15,7 +15,8 @@ #include "paddle/operators/matmul_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(matmul, - ops::MatMulKernel); -REGISTER_OP_GPU_KERNEL( - matmul_grad, ops::MatMulGradKernel); +REGISTER_OP_CUDA_KERNEL( + matmul, ops::MatMulKernel); +REGISTER_OP_CUDA_KERNEL( + matmul_grad, + ops::MatMulGradKernel); diff --git a/paddle/operators/matmul_op.h b/paddle/operators/matmul_op.h index 1e4aa48b7018d8e3d6f02591fbca2877ddbd3c5d..de9da487b3d627cc79962db3770632813e9cd9f5 100644 --- a/paddle/operators/matmul_op.h +++ b/paddle/operators/matmul_op.h @@ -27,7 +27,7 @@ using DDim = framework::DDim; using framework::make_ddim; using framework::vectorize; -template +template class MatMulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -38,8 +38,9 @@ class MatMulKernel : public framework::OpKernel { bool transpose_x = context.Attr("transpose_X"); bool transpose_y = context.Attr("transpose_Y"); - math::MatMulFunctor()(context.device_context(), x, transpose_x, y, - transpose_y, T(1), out, T(0)); + math::MatMulFunctor()( + context.template device_context(), x, transpose_x, y, + transpose_y, T(1), out, T(0)); } }; @@ -68,17 +69,16 @@ Tensor CombineBatchAndM(const Tensor& input) { // Reshape a rank-3 tensor from P x M x N to M x (P * N). // (Warning: This requires transposing data and writes into new memory.) // Identity op if the tensor is not of rank 3. -template -Tensor CombineBatchAndN(const framework::ExecutionContext& context, - const Tensor& input) { +template +Tensor CombineBatchAndN(const DeviceContext& context, const Tensor& input) { Tensor output; auto in_dims = input.dims(); if (in_dims.size() == 3) { output.Resize({in_dims[1], in_dims[0], in_dims[2]}); output.mutable_data(context.GetPlace()); std::vector axis = {1, 0, 2}; - math::Transpose trans; - trans(context.device_context(), input, &output, axis); + math::Transpose trans; + trans(context, input, &output, axis); std::vector out_dims = {in_dims[1], in_dims[0] * in_dims[2]}; output.Resize({in_dims[1], in_dims[0] * in_dims[2]}); } else { @@ -112,7 +112,7 @@ Tensor CombineBatchAndN(const framework::ExecutionContext& context, // // To handle this sort of scenario, we reshape X : P x M x K, dOut: P x M x N // to X: (P * M) x K, dOut: (P * M) x N. -template +template class MatMulGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -178,24 +178,23 @@ class MatMulGradKernel : public framework::OpKernel { Tensor Y = Reshape(y, make_ddim(y_dims)); Tensor dOut = Reshape(dout, make_ddim(dout_dims)); + auto& dev_ctx = context.template device_context(); if (dx) { dx->mutable_data(context.GetPlace()); const Tensor& dOut_for_dX = (x_dims.size() == 2 && y_dims.size() == 3) - ? CombineBatchAndN(context, dOut) + ? CombineBatchAndN(dev_ctx, dOut) : dOut; if (x_dims.size() == 2 && y_dims.size() == 3) { Y = transpose_y ? CombineBatchAndM(Y) - : CombineBatchAndN(context, Y); + : CombineBatchAndN(dev_ctx, Y); } if (transpose_x) { - math::MatMulFunctor()(context.device_context(), Y, - transpose_y, dOut_for_dX, transpose_x, - T(1), dx, T(0)); + math::MatMulFunctor()( + dev_ctx, Y, transpose_y, dOut_for_dX, transpose_x, T(1), dx, T(0)); } else { - math::MatMulFunctor()(context.device_context(), dOut_for_dX, - transpose_x, Y, !transpose_y, T(1), dx, - T(0)); + math::MatMulFunctor()( + dev_ctx, dOut_for_dX, transpose_x, Y, !transpose_y, T(1), dx, T(0)); } } @@ -205,18 +204,16 @@ class MatMulGradKernel : public framework::OpKernel { ? CombineBatchAndM(dOut) : dOut; if (y_dims.size() == 2 && x_dims.size() == 3) { - X = transpose_x ? CombineBatchAndN(context, X) + X = transpose_x ? CombineBatchAndN(dev_ctx, X) : CombineBatchAndM(X); dOut = CombineBatchAndM(dOut); } if (transpose_y) { - math::MatMulFunctor()(context.device_context(), dOut_for_dY, - transpose_y, X, transpose_x, T(1), dy, - T(0)); + math::MatMulFunctor()( + dev_ctx, dOut_for_dY, transpose_y, X, transpose_x, T(1), dy, T(0)); } else { - math::MatMulFunctor()(context.device_context(), X, - !transpose_x, dOut_for_dY, transpose_y, - T(1), dy, T(0)); + math::MatMulFunctor()( + dev_ctx, X, !transpose_x, dOut_for_dY, transpose_y, T(1), dy, T(0)); } } } diff --git a/paddle/operators/maxout_op.cc b/paddle/operators/maxout_op.cc index e203a25d544372220e8246e5e17ffbc6408d2998..011616e615a36efa0efe9ff15e678f1486c5177a 100644 --- a/paddle/operators/maxout_op.cc +++ b/paddle/operators/maxout_op.cc @@ -40,23 +40,28 @@ class MaxOutOpMaker : public framework::OpProtoAndCheckerMaker { "the number of channels divided by groups.." )DOC"); AddComment(R"DOC( - Assumed the input shape is (N, Ci, H, W). - The output shape is (N, Co, H, W). Then `Co = Ci / groups`. +MaxOut Operator. - math: - y_{si+j} = \max_k x_{gsi + sk + j} - g = groups - s = input.size / num_channels - 0 \le i < num_channels / groups - 0 \le j < s - 0 \le k < groups +Assumed the input shape is (N, Ci, H, W). +The output shape is (N, Co, H, W). +Then $Co = Ci / groups$ and the operator formula is as follows: - Please refer to Paper: - - Maxout Networks: http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf - - Multi-digit Number Recognition from Street View \ - Imagery using Deep Convolutional Neural Networks: \ - https://arxiv.org/pdf/1312.6082v4.pdf - )DOC"); +$$ +y_{si+j} = \max_k x_{gsi + sk + j} \\ +g = groups \\ +s = \frac{input.size}{num\_channels} \\ +0 \le i < \frac{num\_channels}{groups} \\ +0 \le j < s \\ +0 \le k < groups +$$ + +Please refer to Paper: + - Maxout Networks: http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf + - Multi-digit Number Recognition from Street View \ + Imagery using Deep Convolutional Neural Networks: \ + https://arxiv.org/pdf/1312.6082v4.pdf + +)DOC"); } }; @@ -96,7 +101,8 @@ class MaxOutOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(maxout, ops::MaxOutOp, ops::MaxOutOpMaker, maxout_grad, ops::MaxOutOpGrad); -REGISTER_OP_CPU_KERNEL(maxout, - ops::MaxOutKernel); REGISTER_OP_CPU_KERNEL( - maxout_grad, ops::MaxOutGradKernel); + maxout, ops::MaxOutKernel); +REGISTER_OP_CPU_KERNEL( + maxout_grad, + ops::MaxOutGradKernel); diff --git a/paddle/operators/maxout_op.cu.cc b/paddle/operators/maxout_op.cu.cc index decd43913d69d122330886e07178778d03f7fef5..2904f0ff96f06cefad29a65898cd82107d9bd600 100644 --- a/paddle/operators/maxout_op.cu.cc +++ b/paddle/operators/maxout_op.cu.cc @@ -15,9 +15,10 @@ #include "paddle/operators/maxout_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(maxout, - ops::MaxOutKernel, - ops::MaxOutKernel); -REGISTER_OP_GPU_KERNEL( - maxout_grad, ops::MaxOutGradKernel, - ops::MaxOutGradKernel); +REGISTER_OP_CUDA_KERNEL( + maxout, ops::MaxOutKernel, + ops::MaxOutKernel); +REGISTER_OP_CUDA_KERNEL( + maxout_grad, + ops::MaxOutGradKernel, + ops::MaxOutGradKernel); diff --git a/paddle/operators/maxout_op.h b/paddle/operators/maxout_op.h index 44a0d073dda642f6e261ce5760013f3e1055f43d..e8b12552b9ff39e23702de17abc9825a527f02aa 100644 --- a/paddle/operators/maxout_op.h +++ b/paddle/operators/maxout_op.h @@ -23,7 +23,7 @@ namespace operators { using Tensor = framework::Tensor; -template +template class MaxOutKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -31,12 +31,13 @@ class MaxOutKernel : public framework::OpKernel { Tensor* out = context.Output("Out"); int groups = context.template Attr("groups"); - math::MaxOutFunctor maxout_forward; - maxout_forward(context.device_context(), *in_x, out, groups); + math::MaxOutFunctor maxout_forward; + maxout_forward(context.template device_context(), *in_x, out, + groups); } }; -template +template class MaxOutGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -46,14 +47,13 @@ class MaxOutGradKernel : public framework::OpKernel { context.Input(framework::GradVarName("Out")); Tensor* in_x_grad = context.Output(framework::GradVarName("X")); int groups = context.template Attr("groups"); - auto& device_ctx = context.device_context(); - math::SetConstant zero; + auto& device_ctx = context.template device_context(); + math::SetConstant zero; if (in_x_grad) { in_x_grad->mutable_data(context.GetPlace()); zero(device_ctx, in_x_grad, static_cast(0.0)); - math::MaxOutGradFunctor maxout_backward; - maxout_backward(context.device_context(), *in_x, in_x_grad, *out, - *out_grad, groups); + math::MaxOutGradFunctor maxout_backward; + maxout_backward(device_ctx, *in_x, in_x_grad, *out, *out_grad, groups); } } }; diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index dcc5b4286f4ac833268a779a9a7edd2ed119ffff..8932d700c2ae17eefe919eefae2282ae4a5a80a8 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -76,8 +76,9 @@ class MeanGradMaker : public framework::SingleGradOpDescMaker { namespace ops = paddle::operators; REGISTER_OPERATOR(mean, ops::MeanOp, ops::MeanOpMaker, ops::MeanGradMaker); REGISTER_OPERATOR(mean_grad, ops::MeanGradOp); -REGISTER_OP_CPU_KERNEL(mean, ops::MeanKernel, - ops::MeanKernel); -REGISTER_OP_CPU_KERNEL(mean_grad, - ops::MeanGradKernel, - ops::MeanGradKernel); +REGISTER_OP_CPU_KERNEL( + mean, ops::MeanKernel, + ops::MeanKernel); +REGISTER_OP_CPU_KERNEL( + mean_grad, ops::MeanGradKernel, + ops::MeanGradKernel); diff --git a/paddle/operators/mean_op.cu b/paddle/operators/mean_op.cu index ca089938c048f7aa5bd561f57c093aa74cce4e11..93062bf540ad64350f7ee9a554c3c469aba46677 100644 --- a/paddle/operators/mean_op.cu +++ b/paddle/operators/mean_op.cu @@ -17,8 +17,9 @@ #include "paddle/operators/mean_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(mean, ops::MeanKernel, - ops::MeanKernel); -REGISTER_OP_GPU_KERNEL(mean_grad, - ops::MeanGradKernel, - ops::MeanGradKernel); +REGISTER_OP_CUDA_KERNEL( + mean, ops::MeanKernel, + ops::MeanKernel); +REGISTER_OP_CUDA_KERNEL( + mean_grad, ops::MeanGradKernel, + ops::MeanGradKernel); diff --git a/paddle/operators/mean_op.h b/paddle/operators/mean_op.h index c99286a5b928f1edcd845b01b21b95654c25db07..351b34595974b1771d9f4ae5232e0b3a33491104 100644 --- a/paddle/operators/mean_op.h +++ b/paddle/operators/mean_op.h @@ -27,7 +27,7 @@ template using EigenVector = framework::EigenVector; -template +template class MeanKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -38,13 +38,14 @@ class MeanKernel : public framework::OpKernel { auto X = EigenVector::Flatten(*input); auto y = EigenScalar::From(*output); - auto& place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); y.device(place) = X.mean(); } }; -template +template class MeanGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -56,7 +57,8 @@ class MeanGradKernel : public framework::OpKernel { T ig_size = static_cast(IG->numel()); Eigen::DSizes bcast(ig_size); - EigenVector::Flatten(*IG).device(context.GetEigenDevice()) = + EigenVector::Flatten(*IG).device( + *context.template device_context().eigen_device()) = (EigenVector::From(*OG) / ig_size).broadcast(bcast); } }; diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 4684c20208501a3239fd57b35428946bb52af4a0..27f0c8de2053064e65d9984ec9bd4242fee48e5f 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -102,5 +102,5 @@ class MinusGradMaker : public framework::GradOpDescMakerBase { namespace ops = paddle::operators; REGISTER_OPERATOR(minus, ops::MinusOp, ops::MinusOpMaker, ops::MinusGradMaker); -REGISTER_OP_CPU_KERNEL(minus, - ops::MinusKernel); +REGISTER_OP_CPU_KERNEL( + minus, ops::MinusKernel); diff --git a/paddle/operators/minus_op.cu b/paddle/operators/minus_op.cu index a8375cc6301b2c1a917299c3933b03226bb72907..3b202ea92ee8692f2441909083f559adff5fea8c 100644 --- a/paddle/operators/minus_op.cu +++ b/paddle/operators/minus_op.cu @@ -14,5 +14,6 @@ #include "paddle/operators/minus_op.h" -REGISTER_OP_GPU_KERNEL( - minus, paddle::operators::MinusKernel); +REGISTER_OP_CUDA_KERNEL( + minus, + paddle::operators::MinusKernel); diff --git a/paddle/operators/minus_op.h b/paddle/operators/minus_op.h index bd9a2790aa2b208c2d3dfc792031283eb6c42397..78e1e1be6d622d504db9e664dcb5f35ca0c22b95 100644 --- a/paddle/operators/minus_op.h +++ b/paddle/operators/minus_op.h @@ -19,7 +19,7 @@ namespace paddle { namespace operators { -template +template class MinusKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -28,7 +28,8 @@ class MinusKernel : public framework::OpKernel { auto* out_tensor = context.Output("Out"); out_tensor->mutable_data(context.GetPlace()); - auto& dev = context.GetEigenDevice(); + auto& dev = + *context.template device_context().eigen_device(); framework::EigenVector::Flatten(*out_tensor).device(dev) = framework::EigenVector::Flatten(*left_tensor) - framework::EigenVector::Flatten(*right_tensor); diff --git a/paddle/operators/modified_huber_loss_op.cc b/paddle/operators/modified_huber_loss_op.cc index 28528848af1f467bf38be53f9d05fee6ca3f93cc..f0a42491bf04a5bbe2de10de2f702877c9a2f839 100644 --- a/paddle/operators/modified_huber_loss_op.cc +++ b/paddle/operators/modified_huber_loss_op.cc @@ -115,6 +115,6 @@ REGISTER_OP(modified_huber_loss, ops::ModifiedHuberLossOp, REGISTER_OP_CPU_KERNEL( modified_huber_loss, - ops::ModifiedHuberLossKernel); + ops::ModifiedHuberLossKernel); REGISTER_OP_CPU_KERNEL(modified_huber_loss_grad, ops::ModifiedHuberLossGradCPUKernel); diff --git a/paddle/operators/modified_huber_loss_op.cu b/paddle/operators/modified_huber_loss_op.cu index 8854e166cd99ce914d7f9f9bcead3234b0649506..40a8447da4d9d4874af232f3408557c950b58482 100644 --- a/paddle/operators/modified_huber_loss_op.cu +++ b/paddle/operators/modified_huber_loss_op.cu @@ -71,8 +71,8 @@ class ModifiedHuberLossGradGPUKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( modified_huber_loss, - ops::ModifiedHuberLossKernel); -REGISTER_OP_GPU_KERNEL(modified_huber_loss_grad, - ops::ModifiedHuberLossGradGPUKernel); + ops::ModifiedHuberLossKernel); +REGISTER_OP_CUDA_KERNEL(modified_huber_loss_grad, + ops::ModifiedHuberLossGradGPUKernel); diff --git a/paddle/operators/modified_huber_loss_op.h b/paddle/operators/modified_huber_loss_op.h index aba75efad9c19e3e113b4f09bc1fbd4732f4e187..157ae0682e0cf4392dab003153d44f48209d00a1 100644 --- a/paddle/operators/modified_huber_loss_op.h +++ b/paddle/operators/modified_huber_loss_op.h @@ -46,7 +46,7 @@ struct ModifiedHuberLossForward { } }; -template +template class ModifiedHuberLossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -57,7 +57,8 @@ class ModifiedHuberLossKernel : public framework::OpKernel { out0->mutable_data(context.GetPlace()); out1->mutable_data(context.GetPlace()); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); auto x = EigenVector::Flatten(*in0); auto y = EigenVector::Flatten(*in1); diff --git a/paddle/operators/momentum_op.cc b/paddle/operators/momentum_op.cc index 19954006195c1e9fd34328b52ed2a9eade526235..2ab48fedecf0cce95dcf4d0593dcd4b30bc1f505 100644 --- a/paddle/operators/momentum_op.cc +++ b/paddle/operators/momentum_op.cc @@ -71,8 +71,12 @@ class MomentumOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor, default Tensor) " "Input learning rate"); - AddOutput("ParamOut", "(Tensor) Output updated parameter"); - AddOutput("VelocityOut", "(Tensor) Output updated velocity"); + AddOutput("ParamOut", + "(Tensor) This output is updated parameter. " + "It shared memory with Input(Param)."); + AddOutput("VelocityOut", + "(Tensor) This output is updated velocity. " + "It shared memory with Input(Velocity)."); AddAttr("mu", "(float) Momentum coefficient"); AddAttr("use_nesterov", @@ -101,5 +105,5 @@ $$ namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(momentum, ops::MomentumOp, ops::MomentumOpMaker); -REGISTER_OP_CPU_KERNEL( - momentum, ops::MomentumOpKernel); +REGISTER_OP_CPU_KERNEL(momentum, ops::MomentumOpKernel, + ops::MomentumOpKernel); diff --git a/paddle/operators/momentum_op.cu b/paddle/operators/momentum_op.cu index efc24e795e05951024009f0b3258769c352df344..00f1253465d336e0fad580d0c6b898369e4783ca 100644 --- a/paddle/operators/momentum_op.cu +++ b/paddle/operators/momentum_op.cu @@ -12,9 +12,67 @@ See the License for the specific language governing permissions and limitations under the License. */ -#define EIGEN_USE_GPU -#include "paddle/operators/momentum_op.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +__global__ void MomentumKernel(const T* p, const T* g, const T* v, + const T* learning_rate, const T mu, + const int64_t num, bool use_nesterov, T* p_out, + T* v_out) { + T lr = learning_rate[0]; + if (use_nesterov) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; + i += blockDim.x * gridDim.x) { + T g_val = g[i]; + T v_new = v[i] * mu + g_val; + v_out[i] = v_new; + p_out[i] = p[i] - (g_val - v_new * mu) * lr; + } + } else { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; + i += blockDim.x * gridDim.x) { + T v_new = v[i] * mu + g[i]; + v_out[i] = v_new; + p_out[i] = p[i] - lr * v_new; + } + } +} + +template +class MomentumOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto param_out = ctx.Output("ParamOut"); + auto velocity_out = ctx.Output("VelocityOut"); + auto param = ctx.Input("Param"); + auto velocity = ctx.Input("Velocity"); + auto grad = ctx.Input("Grad"); + auto learning_rate = ctx.Input("LearningRate"); + + T* p_out = param_out->mutable_data(ctx.GetPlace()); + T* v_out = velocity_out->mutable_data(ctx.GetPlace()); + + T mu = static_cast(ctx.Attr("mu")); + bool use_nesterov = ctx.Attr("use_nesterov"); + + auto* p = param->data(); + auto* v = velocity->data(); + auto* g = grad->data(); + auto* lr = learning_rate->data(); + + int block = 512; + int grid = (param->numel() + block - 1) / block; + MomentumKernel<<>>( + p, g, v, lr, mu, param->numel(), use_nesterov, p_out, v_out); + } +}; + +} // namespace operators +} // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - momentum, ops::MomentumOpKernel); +REGISTER_OP_CUDA_KERNEL(momentum, ops::MomentumOpCUDAKernel, + ops::MomentumOpCUDAKernel); diff --git a/paddle/operators/momentum_op.h b/paddle/operators/momentum_op.h index 8f7f5eb5c21c0342f57a47b85d28f4454f4566c2..da69532ea58bad8d3908770d82dbcc6e6b108fce 100644 --- a/paddle/operators/momentum_op.h +++ b/paddle/operators/momentum_op.h @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class MomentumOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -33,7 +33,7 @@ class MomentumOpKernel : public framework::OpKernel { param_out->mutable_data(ctx.GetPlace()); velocity_out->mutable_data(ctx.GetPlace()); - float mu = ctx.Attr("mu"); + T mu = static_cast(ctx.Attr("mu")); bool use_nesterov = ctx.Attr("use_nesterov"); auto p_out = framework::EigenVector::Flatten(*param_out); @@ -42,18 +42,13 @@ class MomentumOpKernel : public framework::OpKernel { auto p = framework::EigenVector::Flatten(*param); auto v = framework::EigenVector::Flatten(*velocity); auto g = framework::EigenVector::Flatten(*grad); - auto lr = framework::EigenVector::Flatten(*learning_rate); + auto* lr = learning_rate->data(); - auto place = ctx.GetEigenDevice(); - - Eigen::DSizes grad_dsize(grad->numel()); - - v_out.device(place) = v * mu + g; + v_out = v * mu + g; if (use_nesterov) { - p_out.device(place) = p - g * lr.broadcast(grad_dsize) + - v_out * mu * lr.broadcast(grad_dsize); + p_out = p - (g - v_out * mu) * lr[0]; } else { - p_out.device(place) = p - lr.broadcast(grad_dsize) * v_out; + p_out = p - lr[0] * v_out; } } }; diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 3c39ae10dc50084cff284c307167c33c9208a3ce..bc4a5fdf0b37ce07b4c07bba9e1af5611d2be7e3 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -149,6 +149,7 @@ REGISTER_OPERATOR(mul, paddle::framework::OperatorWithKernel, ops::MulOpMaker, ops::MulOpShapeInference, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(mul_grad, ops::MulOpGrad); -REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); -REGISTER_OP_CPU_KERNEL(mul_grad, - ops::MulGradKernel); +REGISTER_OP_CPU_KERNEL( + mul, ops::MulKernel); +REGISTER_OP_CPU_KERNEL( + mul_grad, ops::MulGradKernel); diff --git a/paddle/operators/mul_op.cu.cc b/paddle/operators/mul_op.cu.cc index 66dc3d6d106a18640adad413d4e967fa101abcfc..6095de58d0c58be6b647771e9784348cbf8c4ad4 100644 --- a/paddle/operators/mul_op.cu.cc +++ b/paddle/operators/mul_op.cu.cc @@ -15,6 +15,7 @@ #include "paddle/operators/mul_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(mul, ops::MulKernel); -REGISTER_OP_GPU_KERNEL(mul_grad, - ops::MulGradKernel); +REGISTER_OP_CUDA_KERNEL( + mul, ops::MulKernel); +REGISTER_OP_CUDA_KERNEL( + mul_grad, ops::MulGradKernel); diff --git a/paddle/operators/mul_op.h b/paddle/operators/mul_op.h index 0eb9df41e9415845f88af283de63856158b447f9..1b467dca8302c10fe08a157aac4586230e096dd0 100644 --- a/paddle/operators/mul_op.h +++ b/paddle/operators/mul_op.h @@ -23,7 +23,7 @@ namespace operators { using Tensor = framework::Tensor; -template +template class MulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -46,15 +46,16 @@ class MulKernel : public framework::OpKernel { if (z_dim.size() != 2) { z->Resize({x_matrix.dims()[0], y_matrix.dims()[1]}); } - math::matmul(context.device_context(), x_matrix, false, y_matrix, - false, 1, z, 0); + math::matmul( + context.template device_context(), x_matrix, false, + y_matrix, false, 1, z, 0); if (z_dim.size() != 2) { z->Resize(z_dim); } } }; -template +template class MulGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -77,6 +78,7 @@ class MulGradKernel : public framework::OpKernel { Tensor* dx = ctx.Output(framework::GradVarName("X")); Tensor* dy = ctx.Output(framework::GradVarName("Y")); + auto& dev_ctx = ctx.template device_context(); if (dx) { dx->mutable_data(ctx.GetPlace()); Tensor dx_matrix = dx->dims().size() > 2 @@ -84,8 +86,8 @@ class MulGradKernel : public framework::OpKernel { : *dx; // dx = dout * y'. dx: M x K, dout : M x N, y : K x N - math::matmul(ctx.device_context(), dout_mat, false, y_matrix, - true, 1, &dx_matrix, 0); + math::matmul(dev_ctx, dout_mat, false, y_matrix, true, + 1, &dx_matrix, 0); } if (dy) { dy->mutable_data(ctx.GetPlace()); @@ -93,8 +95,8 @@ class MulGradKernel : public framework::OpKernel { ? framework::ReshapeToMatrix(*dy, y_num_col_dims) : *dy; // dy = x' * dout. dy K x N, dout : M x N, x : M x K - math::matmul(ctx.device_context(), x_matrix, true, dout_mat, - false, 1, &dy_matrix, 0); + math::matmul(dev_ctx, x_matrix, true, dout_mat, false, + 1, &dy_matrix, 0); } } }; diff --git a/paddle/operators/multiplex_op.cc b/paddle/operators/multiplex_op.cc index f8527dfab3f3c42f430c433a11351f12b8dfae8b..b1ee8051c4c48f575690b38142ae082930fe2070 100644 --- a/paddle/operators/multiplex_op.cc +++ b/paddle/operators/multiplex_op.cc @@ -99,13 +99,7 @@ class MultiplexGradOp : public framework::OperatorWithKernel { "Output(X@Grad) should not be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null."); - std::vector d_ins; - auto ins = ctx->GetInputsDim("X"); - // No need to compute gradient for Input(Ids) - for (size_t i = 0; i < ins.size(); i++) { - d_ins.push_back(ins[i]); - } - ctx->SetOutputsDim(framework::GradVarName("X"), d_ins); + ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X")); } protected: @@ -125,7 +119,8 @@ REGISTER_OPERATOR(multiplex, ops::MultiplexOp, ops::MultiplexOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(multiplex_grad, ops::MultiplexGradOp); REGISTER_OP_CPU_KERNEL( - multiplex, ops::MultiplexCPUKernel); + multiplex, + ops::MultiplexCPUKernel); REGISTER_OP_CPU_KERNEL( multiplex_grad, - ops::MultiplexGradCPUKernel); + ops::MultiplexGradCPUKernel); diff --git a/paddle/operators/multiplex_op.cu b/paddle/operators/multiplex_op.cu index 10dff8d021d0394702cc8b92e779c012a4cf3eb2..47986e9ff86f2e08b0861cde35ac3a44b10caed1 100644 --- a/paddle/operators/multiplex_op.cu +++ b/paddle/operators/multiplex_op.cu @@ -36,7 +36,7 @@ class MultiplexGPUKernel : public framework::OpKernel { CopyFrom(*ids, platform::CPUPlace(), ctx.device_context(), &index_t_cpu); auto* index = index_t_cpu.data(); auto stream = ctx.cuda_device_context().stream(); - Place place = boost::get(ctx.GetPlace()); + platform::GPUPlace place = boost::get(ctx.GetPlace()); for (auto i = 0; i < rows; i++) { int32_t k = index[i]; PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative."); @@ -60,7 +60,8 @@ class MultiplexGradGPUKernel : public framework::OpKernel { if (d_ins[i]) { d_ins[i]->mutable_data(ctx.GetPlace()); auto t = framework::EigenVector::Flatten(*d_ins[i]); - t.device(ctx.GetEigenDevice()) = t.constant(static_cast(0)); + t.device(*ctx.template device_context().eigen_device()) = + t.constant(static_cast(0)); } } @@ -72,7 +73,7 @@ class MultiplexGradGPUKernel : public framework::OpKernel { auto* index = index_t_cpu.data(); auto stream = ctx.cuda_device_context().stream(); - Place place = boost::get(ctx.GetPlace()); + platform::GPUPlace place = boost::get(ctx.GetPlace()); for (auto i = 0; i < rows; i++) { size_t k = static_cast(index[i]); if (d_ins[k]) { @@ -87,8 +88,9 @@ class MultiplexGradGPUKernel : public framework::OpKernel { namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - multiplex, ops::MultiplexGPUKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( + multiplex, + ops::MultiplexGPUKernel); +REGISTER_OP_CUDA_KERNEL( multiplex_grad, - ops::MultiplexGradGPUKernel); + ops::MultiplexGradGPUKernel); diff --git a/paddle/operators/multiplex_op.h b/paddle/operators/multiplex_op.h index ab3cafaa324a29d6f249cf1f73db92e1364eebc8..344315116122f7ad843af740be8a31313c8a0342 100644 --- a/paddle/operators/multiplex_op.h +++ b/paddle/operators/multiplex_op.h @@ -22,7 +22,7 @@ namespace paddle { namespace operators { -template +template class MultiplexCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { @@ -35,7 +35,7 @@ class MultiplexCPUKernel : public framework::OpKernel { auto rows = ins[0]->dims()[0]; auto cols = ins[0]->numel() / rows; auto index = ids->data(); - Place place = boost::get(ctx.GetPlace()); + platform::CPUPlace place = boost::get(ctx.GetPlace()); for (auto i = 0; i < rows; i++) { int32_t k = index[i]; PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative."); @@ -47,7 +47,7 @@ class MultiplexCPUKernel : public framework::OpKernel { } }; -template +template class MultiplexGradCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { @@ -60,14 +60,15 @@ class MultiplexGradCPUKernel : public framework::OpKernel { if (d_ins[i]) { d_ins[i]->mutable_data(ctx.GetPlace()); auto t = framework::EigenVector::Flatten(*d_ins[i]); - t.device(ctx.GetEigenDevice()) = t.constant(static_cast(0)); + t.device(*ctx.template device_context().eigen_device()) = + t.constant(static_cast(0)); } } auto rows = ins[0]->dims()[0]; auto cols = ins[0]->numel() / rows; auto* index = ids->data(); - Place place = boost::get(ctx.GetPlace()); + platform::CPUPlace place = boost::get(ctx.GetPlace()); for (auto i = 0; i < rows; i++) { size_t k = static_cast(index[i]); if (d_ins[k]) { diff --git a/paddle/operators/nccl_op.cu.cc b/paddle/operators/nccl_op.cu.cc index 4f0a2a79edb9f24c7758fc91483d374425b36853..6ca6db7253da0e59c742f115cd25a1b8203a3044 100644 --- a/paddle/operators/nccl_op.cu.cc +++ b/paddle/operators/nccl_op.cu.cc @@ -204,6 +204,6 @@ class NCCLBcastKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(ncclAllReduce, ops::NCCLAllReduceKernel); -REGISTER_OP_GPU_KERNEL(ncclBcast, ops::NCCLBcastKernel); -REGISTER_OP_GPU_KERNEL(ncclReduce, ops::NCCLReduceKernel); +REGISTER_OP_CUDA_KERNEL(ncclAllReduce, ops::NCCLAllReduceKernel); +REGISTER_OP_CUDA_KERNEL(ncclBcast, ops::NCCLBcastKernel); +REGISTER_OP_CUDA_KERNEL(ncclReduce, ops::NCCLReduceKernel); diff --git a/paddle/operators/nccl_op_test.cu.cc b/paddle/operators/nccl_op_test.cu.cc index bb7ae20286dd8e52f72b79cbf353bd812a2cc092..d747cc0cf5f74b886bbd40549673e7d64de952e9 100644 --- a/paddle/operators/nccl_op_test.cu.cc +++ b/paddle/operators/nccl_op_test.cu.cc @@ -33,9 +33,9 @@ #include "paddle/platform/place.h" USE_NO_KERNEL_OP(ncclInit); -USE_GPU_ONLY_OP(ncclAllReduce); -USE_GPU_ONLY_OP(ncclReduce); -USE_GPU_ONLY_OP(ncclBcast); +USE_CUDA_ONLY_OP(ncclAllReduce); +USE_CUDA_ONLY_OP(ncclReduce); +USE_CUDA_ONLY_OP(ncclBcast); namespace f = paddle::framework; namespace p = paddle::platform; diff --git a/paddle/operators/nce_op.cc b/paddle/operators/nce_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..5ad1610fde041ee934486ef98ba41dca42559100 --- /dev/null +++ b/paddle/operators/nce_op.cc @@ -0,0 +1,186 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/nce_op.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class NCEOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Input")); + PADDLE_ENFORCE(ctx->HasInput("Label")); + PADDLE_ENFORCE(ctx->HasInput("Weight")); + PADDLE_ENFORCE(ctx->HasOutput("Cost")); + PADDLE_ENFORCE(ctx->HasOutput("SampleLogits")); + PADDLE_ENFORCE(ctx->HasOutput("SampleLabels")); + + auto x_dims = ctx->GetInputDim("Input"); + auto label_dims = ctx->GetInputDim("Label"); + PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0]); + int num_true_classes = label_dims.size() == 2 ? label_dims[1] : 1; + if (ctx->HasInput("Bias")) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Weight")[0], + ctx->GetInputDim("Bias")[0]); + } + auto num_neg_samples = ctx->Attrs().Get("num_neg_samples"); + auto num_total_classes = ctx->Attrs().Get("num_total_classes"); + std::vector custom_neg_classes = + ctx->Attrs().Get>("custom_neg_classes"); + PADDLE_ENFORCE_EQ(num_total_classes, ctx->GetInputDim("Weight")[0]); + if (custom_neg_classes.size() > 0) { + PADDLE_ENFORCE_EQ(custom_neg_classes.size(), + static_cast(num_neg_samples)); + } + // set dims of output(Out) + std::vector out_dims; + out_dims.push_back(x_dims[0]); + out_dims.push_back(1); + ctx->SetOutputDim("Cost", framework::make_ddim(out_dims)); + + // set dims of output(SampleOut) + std::vector sample_out_dims; + sample_out_dims.push_back(x_dims[0]); + sample_out_dims.push_back(num_neg_samples + num_true_classes); + ctx->SetOutputDim("SampleLogits", framework::make_ddim(sample_out_dims)); + ctx->SetOutputDim("SampleLabels", framework::make_ddim(sample_out_dims)); + } + + protected: + framework::OpKernelType GetKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), + ctx.GetPlace()); + } +}; + +class NCEOpMaker : public framework::OpProtoAndCheckerMaker { + public: + NCEOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Input", "(Tensor) A tensor of shape [batch_size, dim]."); + AddInput( + "Label", + "(Tensor) A tensor of shape [batch_size, num_true_class]. " + "'num_true_class' is the number of target classes in each sample." + "The number of target classes per sample should be same. " + "If you have a variable number of target classes, " + "you can pad them out to a constant number by either repeating them" + " or by padding with an otherwise unused class.)"); + AddInput("Weight", + "(Tensor) A tensor of shape [num_class, dim]. 'num_class' is the " + "total number of class."); + AddInput( + "Bias", + "(Tensor) A tensor of shape [num_class, 1]. 'num_class' is the total " + "number of class. It is a dispensable input.") + .AsDispensable(); + AddInput("SampleWeight", + "(Tensor) A tensor of shape [batch_size, 1] storing a weight for " + "each sample. And it is a dispensable input. The default value of " + "sample is 1.") + .AsDispensable(); + AddOutput("Cost", + "(Tensor) A tensor of shape [batch_size, 1]. Cost of samples."); + AddOutput("SampleLogits", + "An intermediate tensor of shape[batch_size, num_neg_samples + " + "num_pos_samples]." + "This tensor is output of forward kernel and used in backward " + "kernel to compute grads." + "Given X is the dot product of input tensor and sampled labels' " + "weights." + "Then 'SampleLogits' is sigmoid(X).") + .AsIntermediate(); + AddOutput("SampleLabels", + "An intermediate tensor of shape[batch_size, num_neg_samples + " + "num_pos_samples]." + "This tensor is output of forward kernel and used in backward " + "kernel to compute grads." + "") + .AsIntermediate(); + AddAttr("num_total_classes", + "Total number of classes in all samples."); + AddAttr("num_neg_samples", + "The number of negative classes. The default value is 10.") + .SetDefault(10); + AddAttr>("custom_neg_classes", + "This attribute only be used in unitest. Classes " + "in this list wiil be used as negative classes " + "for every samples. Under normal conditions, " + "user should avoid setting this attribute."); + AddComment(R"DOC( +Compute and return the noise-contrastive estimation training loss. +See [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf). +By default this operator uses a uniform distribution for sampling. +)DOC"); + } +}; + +class NCEOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Input")); + PADDLE_ENFORCE(ctx->HasInput("Weight")); + PADDLE_ENFORCE(ctx->HasInput("Cost")); + PADDLE_ENFORCE(ctx->HasInput("SampleLogits")); + PADDLE_ENFORCE(ctx->HasInput("SampleLabels")); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Cost")), + "The input(Out@GRAD) should not be null."); + + auto x_dims = ctx->GetInputDim("Input"); + auto x_grad_name = framework::GradVarName("Input"); + if (ctx->HasOutput(x_grad_name)) { + ctx->SetOutputDim(x_grad_name, x_dims); + } + + auto w_dims = ctx->GetInputDim("Weight"); + auto w_grad_name = framework::GradVarName("Weight"); + if (ctx->HasOutput(w_grad_name)) { + ctx->SetOutputDim(w_grad_name, w_dims); + } + + auto bias_grad_name = framework::GradVarName("Bias"); + if (ctx->HasOutput(bias_grad_name)) { + auto bias_dims = ctx->GetInputDim("Bias"); + ctx->SetOutputDim(bias_grad_name, bias_dims); + } + } + + protected: + framework::OpKernelType GetKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), + ctx.GetPlace()); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(nce, ops::NCEOp, ops::NCEOpMaker, nce_grad, ops::NCEOpGrad); +REGISTER_OP_CPU_KERNEL(nce, ops::NCEKernel, + ops::NCEKernel); +REGISTER_OP_CPU_KERNEL(nce_grad, + ops::NCEGradKernel, + ops::NCEGradKernel); diff --git a/paddle/operators/nce_op.h b/paddle/operators/nce_op.h new file mode 100644 index 0000000000000000000000000000000000000000..6636dad06037f163252dc342200a99c756ed2a2e --- /dev/null +++ b/paddle/operators/nce_op.h @@ -0,0 +1,211 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include +#include +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "unsupported/Eigen/CXX11/Tensor" +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +using EigenMatrix = framework::EigenMatrix; + +template +void PrepareSamples(const framework::ExecutionContext& context) { + auto label = context.Input("Label"); + const int64_t* label_data = label->data(); + auto label_dims = label->dims(); + int num_total_classes = context.Attr("num_total_classes"); + // for unitest + std::vector custom_neg_classes = + context.Attr>("custom_neg_classes"); + // random machine + std::random_device rd; + std::mt19937 rng(rd()); + std::uniform_int_distribution rand(0, num_total_classes - 1); + + auto sample_labels = context.Output("SampleLabels"); + auto sample_labels_dims = sample_labels->dims(); + int64_t* sample_labels_data = + sample_labels->mutable_data(context.GetPlace()); + + int num_label = label_dims.size() == 2 ? label_dims[1] : 1; + int index = 0; + for (int64_t i = 0; i < label_dims[0]; ++i) { + int j = 0; + for (; j < num_label; ++j) { + sample_labels_data[index++] = label_data[i * num_label + j]; + } + if (custom_neg_classes.size() > 0) { + for (auto label : custom_neg_classes) { + sample_labels_data[index++] = label; + } + } else { + for (; j < sample_labels_dims[1]; ++j) { + // TODO(wanghaoshuang): support more distribution sampling + sample_labels_data[index++] = rand(rng); + } + } + } +} + +template +class NCEKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + PrepareSamples(context); + auto sample_labels = context.Output("SampleLabels"); + const int64_t* sample_labels_data = sample_labels->data(); + auto sample_out = context.Output("SampleLogits"); + T* sample_out_data = sample_out->mutable_data(context.GetPlace()); + auto label = context.Input("Label"); + auto sample_weight = context.Input("SampleWeight"); + const T* sample_weight_data = nullptr; + if (sample_weight != nullptr) { + sample_weight_data = sample_weight->data(); + } + auto out = context.Output("Cost"); + T* out_data = out->mutable_data(context.GetPlace()); + int num_neg_samples = context.Attr("num_neg_samples"); + int num_total_classes = context.Attr("num_total_classes"); + int64_t num_true_class = 1; + if (label != nullptr) { + num_true_class = label->dims()[1]; + } + T b = 1. / num_total_classes * num_neg_samples; + // forward bias + auto bias = context.Input("Bias"); + if (bias != nullptr) { + const T* bias_data = bias->data(); + for (int64_t i = 0; i < sample_labels->numel(); ++i) { + sample_out_data[i] = bias_data[sample_labels_data[i]]; + } + } else { + for (int64_t i = 0; i < sample_labels->numel(); ++i) { + sample_out_data[i] = 0; + } + } + // forward mul + auto input_mat = EigenMatrix::From(*(context.Input("Input"))); + auto weight_mat = EigenMatrix::From(*(context.Input("Weight"))); + for (int64_t i = 0; i < sample_labels->numel(); ++i) { + Eigen::Tensor result = + (input_mat.chip((int)(i / sample_labels->dims()[1]), 0) * + weight_mat.chip(sample_labels_data[i], 0)) + .sum(); + sample_out_data[i] += result(0); + sample_out_data[i] = (1. / (1. + exp(-sample_out_data[i]))); + } + // forward cost + for (int64_t i = 0; i < sample_labels->dims()[0]; ++i) { + int64_t j = 0; + out_data[i] = 0; + T w = sample_weight == nullptr ? 1. : sample_weight_data[i]; + // for true classes + for (; j < num_true_class; ++j) { + T o = sample_out_data[i * sample_out->dims()[1] + j]; + T cost = -log(o / (o + b)); + out_data[i] += w * cost; + } + // for sampled neg classes + for (; j < sample_labels->dims()[1]; ++j) { + T o = sample_out_data[i * sample_out->dims()[1] + j]; + T cost = -log(b / (o + b)); + out_data[i] += w * cost; + } + } + } +}; + +template +class NCEGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto d_out = context.Input(framework::GradVarName("Cost")); + const T* d_out_data = d_out->data(); + auto label = context.Input("Label"); + auto sample_out = context.Input("SampleLogits"); + const T* sample_out_data = sample_out->data(); + auto sample_labels = context.Input("SampleLabels"); + const int64_t* sample_labels_data = sample_labels->data(); + auto sample_weight = context.Input("SampleWeight"); + const T* sample_weight_data = nullptr; + if (sample_weight != nullptr) { + sample_weight_data = sample_weight->data(); + } + int num_neg_samples = context.Attr("num_neg_samples"); + int num_total_classes = context.Attr("num_total_classes"); + int num_true_class = 1; + if (label != nullptr) { + num_true_class = label->dims()[1]; + } + T b = 1. / num_total_classes * num_neg_samples; + Tensor sample_grad; // tmp tensor + T* sample_grad_data = + sample_grad.mutable_data(sample_labels->dims(), context.GetPlace()); + // backward cost + for (int64_t i = 0; i < sample_labels->numel(); ++i) { + T o = sample_out_data[i]; + T w = sample_weight == nullptr + ? 1 + : sample_weight_data[i / sample_labels->dims()[1]]; + sample_grad_data[i] = (i % sample_labels->dims()[1]) < num_true_class + ? w * (b / (o + b)) * (o - 1) + : w * (o * (1 - o) / (o + b)); + sample_grad_data[i] *= d_out_data[i / sample_labels->dims()[1]]; + } + // get d_bias + auto d_bias = context.Output(framework::GradVarName("Bias")); + if (d_bias != nullptr) { + T* d_bias_data = d_bias->mutable_data(context.GetPlace()); + std::fill(d_bias_data, d_bias_data + d_bias->numel(), 0.0); + for (int64_t i = 0; i < sample_labels->numel(); ++i) { + d_bias_data[sample_labels_data[i]] += sample_grad_data[i]; + } + } + // get d_w + auto d_w = context.Output(framework::GradVarName("Weight")); + if (d_w != nullptr) { + auto d_w_data = d_w->mutable_data(context.GetPlace()); + std::fill(d_w_data, d_w_data + d_w->numel(), 0.0); + auto d_w_matrix = EigenMatrix::From(*d_w); + auto x_matrix = EigenMatrix::From(*(context.Input("Input"))); + for (int64_t i = 0; i < sample_labels->numel(); ++i) { + d_w_matrix.chip(sample_labels_data[i], 0) += + x_matrix.chip((int)(i / sample_labels->dims()[1]), 0) * + sample_grad_data[i]; + } + } + // get d_x + auto d_x = context.Output(framework::GradVarName("Input")); + if (d_x != nullptr) { + d_x->mutable_data(context.GetPlace()); + auto d_x_matrix = EigenMatrix::From(*d_x); + auto w_matrix = EigenMatrix::From(*(context.Input("Weight"))); + for (int64_t i = 0; i < sample_labels->numel(); ++i) { + d_x_matrix.chip((int)(i / sample_labels->dims()[1]), 0) += + w_matrix.chip(sample_labels_data[i], 0) * sample_grad_data[i]; + } + } + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index ebeb262d9621fa35c870b6407992f6b6d2bf7c70..8935751f15ccc4861c9e06d8d9031c8dff1a4af3 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -38,7 +38,10 @@ namespace operators { class NetOp : public framework::OperatorBase { public: static const char kAll[]; - NetOp() : framework::OperatorBase("plain_net", {}, {}, {}) {} + NetOp() + : framework::OperatorBase("plain_net", framework::VariableNameMap{}, + framework::VariableNameMap{}, + framework::AttributeMap{}) {} NetOp(const std::string& type, const framework::VariableNameMap& inputs, const framework::VariableNameMap& outputs, diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index 63bebd5b44719868a38ddf2b023955d1ab05245c..22fba9568d018586b4622884b7d6145fd646adb0 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -38,10 +38,10 @@ TEST(OpKernel, all) { net->AppendOp(std::unique_ptr( new TestOp("test", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"Out", {"y"}}}, {}))); + {{"Out", {"y"}}}, framework::AttributeMap{}))); net->AppendOp(std::unique_ptr( new TestOp("test", {{"X", {"y"}}, {"W", {"w2"}}, {"b", {"b2"}}}, - {{"Out", {"z"}}}, {}))); + {{"Out", {"z"}}}, framework::AttributeMap{}))); net->CompleteAddOp(); AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, @@ -58,7 +58,7 @@ TEST(NetOp, insert_op) { NetOp net; auto op1 = std::unique_ptr( new framework::NOP("empty", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"Out", {"y"}}}, {})); + {{"Out", {"y"}}}, framework::AttributeMap{})); net.AppendOp(*op1); net.InsertOp(0, *op1); ASSERT_EQ(2UL, net.ops_.size()); @@ -68,10 +68,12 @@ TEST(NetOp, insert_op) { TEST(NetOp, Clone) { NetOp net; - net.AppendOp( - std::unique_ptr(new framework::NOP{"empty", {}, {}, {}})); - net.AppendOp(std::unique_ptr( - new framework::NOP{"empty2", {}, {}, {}})); + net.AppendOp(std::unique_ptr(new framework::NOP{ + "empty", framework::VariableNameMap{}, framework::VariableNameMap{}, + framework::AttributeMap{}})); + net.AppendOp(std::unique_ptr(new framework::NOP{ + "empty2", framework::VariableNameMap{}, framework::VariableNameMap{}, + framework::AttributeMap{}})); net.CompleteAddOp(true); auto new_net_op = net.Clone(); ASSERT_NE(new_net_op, nullptr); diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index adb75df6ef10c59fc6f3db4d36e1ffb1ae0b4b1e..936dde22c34a30c5a50e2ac8a76f0f91dfb328ab 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -134,6 +134,7 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(pad, ops::PadOp, ops::PadOpMaker, ops::PadOpGradMaker); REGISTER_OPERATOR(pad_grad, ops::PadOpGrad); -REGISTER_OP_CPU_KERNEL(pad, ops::PadKernel); -REGISTER_OP_CPU_KERNEL(pad_grad, - ops::PadGradKernel); +REGISTER_OP_CPU_KERNEL( + pad, ops::PadKernel); +REGISTER_OP_CPU_KERNEL( + pad_grad, ops::PadGradKernel); diff --git a/paddle/operators/pad_op.cu b/paddle/operators/pad_op.cu index 555a7dba23c6fa2659cabf4858b42ff70d74bf18..c309fb625cca203418db2599a59ea0144782efc2 100644 --- a/paddle/operators/pad_op.cu +++ b/paddle/operators/pad_op.cu @@ -16,6 +16,7 @@ #include "paddle/operators/pad_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(pad, ops::PadKernel); -REGISTER_OP_GPU_KERNEL(pad_grad, - ops::PadGradKernel); +REGISTER_OP_CUDA_KERNEL( + pad, ops::PadKernel); +REGISTER_OP_CUDA_KERNEL( + pad_grad, ops::PadGradKernel); diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 9534dbf54529e3b9ae2b6640d51fe291e9521927..1b95942af3b3711fcad965cdc3f2d2f99b2f32e8 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -26,7 +26,7 @@ template using EigenTensor = framework::EigenTensor; -template +template void PadFunction(const framework::ExecutionContext& context) { auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; @@ -42,33 +42,34 @@ void PadFunction(const framework::ExecutionContext& context) { auto x_tensor = EigenTensor::From(*x); auto out_tensor = EigenTensor::From(*out); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); out_tensor.device(place) = x_tensor.pad(paddings, pad_value); } -template +template class PadKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { int rank = context.Input("X")->dims().size(); switch (rank) { case 1: - PadFunction(context); + PadFunction(context); break; case 2: - PadFunction(context); + PadFunction(context); break; case 3: - PadFunction(context); + PadFunction(context); break; case 4: - PadFunction(context); + PadFunction(context); break; case 5: - PadFunction(context); + PadFunction(context); break; case 6: - PadFunction(context); + PadFunction(context); break; default: PADDLE_THROW( @@ -77,7 +78,7 @@ class PadKernel : public framework::OpKernel { } }; -template +template void PadGradFunction(const framework::ExecutionContext& context) { auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; @@ -91,12 +92,13 @@ void PadGradFunction(const framework::ExecutionContext& context) { d_x->mutable_data(context.GetPlace()); auto d_x_tensor = EigenTensor::From(*d_x); auto d_out_tensor = EigenTensor::From(*d_out); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); d_x_tensor.device(place) = d_out_tensor.pad(paddings, 0); } } -template +template class PadGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -104,22 +106,22 @@ class PadGradKernel : public framework::OpKernel { context.Input(framework::GradVarName("Out"))->dims().size(); switch (rank) { case 1: - PadGradFunction(context); + PadGradFunction(context); break; case 2: - PadGradFunction(context); + PadGradFunction(context); break; case 3: - PadGradFunction(context); + PadGradFunction(context); break; case 4: - PadGradFunction(context); + PadGradFunction(context); break; case 5: - PadGradFunction(context); + PadGradFunction(context); break; case 6: - PadGradFunction(context); + PadGradFunction(context); break; default: PADDLE_THROW( diff --git a/paddle/operators/pool_cudnn_op.cc b/paddle/operators/pool_cudnn_op.cc index be9fcc5661f420aadf908cf80cce6c963008b0e4..77407f5cdf7e4ef7b76c38ef8992517b4bd1c5fe 100644 --- a/paddle/operators/pool_cudnn_op.cc +++ b/paddle/operators/pool_cudnn_op.cc @@ -19,19 +19,21 @@ namespace ops = paddle::operators; REGISTER_OP(pool2d_cudnn, ops::PoolOp, ops::Pool2dOpMaker, pool2d_cudnn_grad, ops::PoolOpGrad); -REGISTER_OP_CPU_KERNEL(pool2d_cudnn, - ops::PoolKernel, - ops::PoolKernel); -REGISTER_OP_CPU_KERNEL(pool2d_cudnn_grad, - ops::PoolGradKernel, - ops::PoolGradKernel) +REGISTER_OP_CPU_KERNEL( + pool2d_cudnn, ops::PoolKernel, + ops::PoolKernel); +REGISTER_OP_CPU_KERNEL( + pool2d_cudnn_grad, + ops::PoolGradKernel, + ops::PoolGradKernel) REGISTER_OP(pool3d_cudnn, ops::PoolOp, ops::Pool3dOpMaker, pool3d_cudnn_grad, ops::PoolOpGrad); -REGISTER_OP_CPU_KERNEL(pool3d_cudnn, - ops::PoolKernel, - ops::PoolKernel); -REGISTER_OP_CPU_KERNEL(pool3d_cudnn_grad, - ops::PoolGradKernel, - ops::PoolGradKernel) +REGISTER_OP_CPU_KERNEL( + pool3d_cudnn, ops::PoolKernel, + ops::PoolKernel); +REGISTER_OP_CPU_KERNEL( + pool3d_cudnn_grad, + ops::PoolGradKernel, + ops::PoolGradKernel) diff --git a/paddle/operators/pool_cudnn_op.cu.cc b/paddle/operators/pool_cudnn_op.cu.cc index 66dd194ccd5ed629c5861552a7c124dc911362d7..fc2b37bd0fbac82005e709779b2939843b839596 100644 --- a/paddle/operators/pool_cudnn_op.cu.cc +++ b/paddle/operators/pool_cudnn_op.cu.cc @@ -162,12 +162,12 @@ class PoolCudnnGradOpKernel : public framework::OpKernel { namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(pool2d_cudnn, ops::PoolCudnnOpKernel, - ops::PoolCudnnOpKernel); -REGISTER_OP_GPU_KERNEL(pool2d_cudnn_grad, ops::PoolCudnnGradOpKernel, - ops::PoolCudnnGradOpKernel); - -REGISTER_OP_GPU_KERNEL(pool3d_cudnn, ops::PoolCudnnOpKernel, - ops::PoolCudnnOpKernel); -REGISTER_OP_GPU_KERNEL(pool3d_cudnn_grad, ops::PoolCudnnGradOpKernel, - ops::PoolCudnnGradOpKernel); +REGISTER_OP_CUDA_KERNEL(pool2d_cudnn, ops::PoolCudnnOpKernel, + ops::PoolCudnnOpKernel); +REGISTER_OP_CUDA_KERNEL(pool2d_cudnn_grad, ops::PoolCudnnGradOpKernel, + ops::PoolCudnnGradOpKernel); + +REGISTER_OP_CUDA_KERNEL(pool3d_cudnn, ops::PoolCudnnOpKernel, + ops::PoolCudnnOpKernel); +REGISTER_OP_CUDA_KERNEL(pool3d_cudnn_grad, ops::PoolCudnnGradOpKernel, + ops::PoolCudnnGradOpKernel); diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index d8c58618cf703d086d3cabc927ebc5eb038b1aec..45fa20280c1ad20f63d6542d5199e002ff60495f 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -105,7 +105,7 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, // TypedAttrChecker don't support vector type.) AddAttr>( "paddings", - "(vector, defalut {0,0}), paddings(height, width) of pooling " + "(vector, default {0,0}), paddings(height, width) of pooling " "operator." "If global_pooling = true, paddings and ksize will be ignored.") .SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently, @@ -122,15 +122,15 @@ Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively. The input(X) size and output(Out) size may be different. -Example: +Example: Input: X shape: $(N, C, H_{in}, W_{in})$ Output: Out shape: $(N, C, H_{out}, W_{out})$ - where + Where $$ - H_{out} = (H_{in} - ksize[0] + 2 * paddings[0]) / strides[0] + 1 \\ - W_{out} = (W_{in} - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\ + W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 $$ )DOC"); @@ -177,7 +177,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, // TypedAttrChecker don't support vector type.) AddAttr>( "paddings", - "(vector, defalut {0,0,0}), paddings(depth, height, " + "(vector, default {0,0,0}), paddings(depth, height, " "width) of pooling operator. " "If global_pooling = true, ksize and paddings will be ignored.") .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, @@ -199,12 +199,12 @@ Example: X shape: $(N, C, D_{in}, H_{in}, W_{in})$ Output: Out shape: $(N, C, D_{out}, H_{out}, W_{out})$ - where - $$ - D_{out} = (D_{in} - ksize[0] + 2 * paddings[0]) / strides[0] + 1 \\ - H_{out} = (H_{in} - ksize[1] + 2 * paddings[1]) / strides[1] + 1 \\ - W_{out} = (W_{in} - ksize[2] + 2 * paddings[2]) / strides[2] + 1 - $$ + Where + $$ + D_{out} = \frac{(D_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\ + H_{out} = \frac{(H_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 \\ + W_{out} = \frac{(W_{in} - ksize[2] + 2 * paddings[2])}{strides[2]} + 1 + $$ )DOC"); } @@ -216,19 +216,19 @@ namespace ops = paddle::operators; REGISTER_OP(pool2d, ops::PoolOp, ops::Pool2dOpMaker, pool2d_grad, ops::PoolOpGrad); -REGISTER_OP_CPU_KERNEL(pool2d, - ops::PoolKernel, - ops::PoolKernel); -REGISTER_OP_CPU_KERNEL(pool2d_grad, - ops::PoolGradKernel, - ops::PoolGradKernel) +REGISTER_OP_CPU_KERNEL( + pool2d, ops::PoolKernel, + ops::PoolKernel); +REGISTER_OP_CPU_KERNEL( + pool2d_grad, ops::PoolGradKernel, + ops::PoolGradKernel) REGISTER_OP(pool3d, ops::PoolOp, ops::Pool3dOpMaker, pool3d_grad, ops::PoolOpGrad); -REGISTER_OP_CPU_KERNEL(pool3d, - ops::PoolKernel, - ops::PoolKernel); -REGISTER_OP_CPU_KERNEL(pool3d_grad, - ops::PoolGradKernel, - ops::PoolGradKernel); +REGISTER_OP_CPU_KERNEL( + pool3d, ops::PoolKernel, + ops::PoolKernel); +REGISTER_OP_CPU_KERNEL( + pool3d_grad, ops::PoolGradKernel, + ops::PoolGradKernel); diff --git a/paddle/operators/pool_op.cu.cc b/paddle/operators/pool_op.cu.cc index 1010cb762289dd39cd632c699f7528f4ba638278..39a9dfbf794b3dbaf81e2435f8609014dc27f3af 100644 --- a/paddle/operators/pool_op.cu.cc +++ b/paddle/operators/pool_op.cu.cc @@ -16,16 +16,18 @@ limitations under the License. */ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(pool2d, - ops::PoolKernel, - ops::PoolKernel); -REGISTER_OP_GPU_KERNEL(pool2d_grad, - ops::PoolGradKernel, - ops::PoolGradKernel); +REGISTER_OP_CUDA_KERNEL( + pool2d, ops::PoolKernel, + ops::PoolKernel); +REGISTER_OP_CUDA_KERNEL( + pool2d_grad, + ops::PoolGradKernel, + ops::PoolGradKernel); -REGISTER_OP_GPU_KERNEL(pool3d, - ops::PoolKernel, - ops::PoolKernel); -REGISTER_OP_GPU_KERNEL(pool3d_grad, - ops::PoolGradKernel, - ops::PoolGradKernel); +REGISTER_OP_CUDA_KERNEL( + pool3d, ops::PoolKernel, + ops::PoolKernel); +REGISTER_OP_CUDA_KERNEL( + pool3d_grad, + ops::PoolGradKernel, + ops::PoolGradKernel); diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index 63492a89e8d4e44a036bc3c2b16cc54c7e77b534..ab85d587a3131237d7a9ec774a11193c70220c7c 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -50,7 +50,7 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker* op_checker); }; -template +template class PoolKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -67,41 +67,41 @@ class PoolKernel : public framework::OpKernel { ksize[i] = static_cast(in_x->dims()[i + 2]); } } - + auto& dev_ctx = context.template device_context(); switch (ksize.size()) { case 2: { if (pooling_type == "max") { paddle::operators::math::Pool2dFunctor< - Place, paddle::operators::math::MaxPool, T> + DeviceContext, paddle::operators::math::MaxPool, T> pool2d_forward; paddle::operators::math::MaxPool pool_process; - pool2d_forward(context.device_context(), *in_x, ksize, strides, - paddings, pool_process, out); + pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, + out); } else if (pooling_type == "avg") { paddle::operators::math::Pool2dFunctor< - Place, paddle::operators::math::AvgPool, T> + DeviceContext, paddle::operators::math::AvgPool, T> pool2d_forward; paddle::operators::math::AvgPool pool_process; - pool2d_forward(context.device_context(), *in_x, ksize, strides, - paddings, pool_process, out); + pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, + out); } } break; case 3: { if (pooling_type == "max") { paddle::operators::math::Pool3dFunctor< - Place, paddle::operators::math::MaxPool, T> + DeviceContext, paddle::operators::math::MaxPool, T> pool3d_forward; paddle::operators::math::MaxPool pool_process; - pool3d_forward(context.device_context(), *in_x, ksize, strides, - paddings, pool_process, out); + pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, + out); } else if (pooling_type == "avg") { paddle::operators::math::Pool3dFunctor< - Place, paddle::operators::math::AvgPool, T> + DeviceContext, paddle::operators::math::AvgPool, T> pool3d_forward; paddle::operators::math::AvgPool pool_process; - pool3d_forward(context.device_context(), *in_x, ksize, strides, - paddings, pool_process, out); + pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, + out); } } break; default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } @@ -109,7 +109,7 @@ class PoolKernel : public framework::OpKernel { } }; -template +template class PoolGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -130,42 +130,43 @@ class PoolGradKernel : public framework::OpKernel { ksize[i] = static_cast(in_x->dims()[i + 2]); } } - + auto& dev_ctx = context.template device_context(); if (in_x_grad) { in_x_grad->mutable_data(context.GetPlace()); auto temp = framework::EigenVector::Flatten(*in_x_grad); - temp.device(context.GetEigenDevice()) = + temp.device( + *context.template device_context().eigen_device()) = temp.constant(static_cast(0)); switch (ksize.size()) { case 2: { if (pooling_type == "max") { - paddle::operators::math::MaxPool2dGradFunctor + paddle::operators::math::MaxPool2dGradFunctor pool2d_backward; - pool2d_backward(context.device_context(), *in_x, *out, *out_grad, - ksize, strides, paddings, in_x_grad); + pool2d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides, + paddings, in_x_grad); } else if (pooling_type == "avg") { paddle::operators::math::Pool2dGradFunctor< - Place, paddle::operators::math::AvgPoolGrad, T> + DeviceContext, paddle::operators::math::AvgPoolGrad, T> pool2d_backward; paddle::operators::math::AvgPoolGrad pool_process; - pool2d_backward(context.device_context(), *in_x, *out, *out_grad, - ksize, strides, paddings, pool_process, in_x_grad); + pool2d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides, + paddings, pool_process, in_x_grad); } } break; case 3: { if (pooling_type == "max") { - paddle::operators::math::MaxPool3dGradFunctor + paddle::operators::math::MaxPool3dGradFunctor pool3d_backward; - pool3d_backward(context.device_context(), *in_x, *out, *out_grad, - ksize, strides, paddings, in_x_grad); + pool3d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides, + paddings, in_x_grad); } else if (pooling_type == "avg") { paddle::operators::math::Pool3dGradFunctor< - Place, paddle::operators::math::AvgPoolGrad, T> + DeviceContext, paddle::operators::math::AvgPoolGrad, T> pool3d_backward; paddle::operators::math::AvgPoolGrad pool_process; - pool3d_backward(context.device_context(), *in_x, *out, *out_grad, - ksize, strides, paddings, pool_process, in_x_grad); + pool3d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides, + paddings, pool_process, in_x_grad); } } break; default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc index 4958fa645405db0798f37165030eae95da371477..1a2383f8b80357d2927c3b6a8c57c787ba7e366d 100644 --- a/paddle/operators/pool_with_index_op.cc +++ b/paddle/operators/pool_with_index_op.cc @@ -142,7 +142,7 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { // TypedAttrChecker don't support vector type.) AddAttr>( "paddings", - "(vector, defalut:{0, 0}), paddings(height, width) of pooling " + "(vector, default:{0, 0}), paddings(height, width) of pooling " "operator. " "If global_pooling = true, paddings and will be ignored.") .SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently, @@ -166,10 +166,10 @@ Example: Output: Out shape: $(N, C, H_{out}, W_{out})$ Mask shape: $(N, C, H_{out}, W_{out})$ - where + Where $$ - H_{out} = (H_{in} - ksize[0] + 2 * paddings[0]) / strides[0] + 1 \\ - W_{out} = (W_{in} - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\ + W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 $$ )DOC"); @@ -220,7 +220,7 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { // TypedAttrChecker don't support vector type.) AddAttr>( "paddings", - "(vector, defalut {0,0,0}), paddings(depth, " + "(vector, default {0,0,0}), paddings(depth, " "height, width) of pooling operator. " "If global_pooling = true, paddings and ksize will be ignored.") .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, @@ -244,11 +244,11 @@ Example: Output: Out shape: $(N, C, D_{out}, H_{out}, W_{out})$ Mask shape: $(N, C, D_{out}, H_{out}, W_{out})$ - where + Where $$ - D_{out} = (D_{in} - ksize[0] + 2 * paddings[0]) / strides[0] + 1 \\ - H_{out} = (H_{in} - ksize[1] + 2 * paddings[1]) / strides[1] + 1 \\ - W_{out} = (W_{in} - ksize[2] + 2 * paddings[2]) / strides[2] + 1 + D_{out} = \frac{(D_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\ + H_{out} = \frac{(H_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 \\ + W_{out} = \frac{(W_{in} - ksize[2] + 2 * paddings[2])}{strides[2]} + 1 $$ )DOC"); @@ -266,12 +266,15 @@ REGISTER_OP(max_pool2d_with_index, ops::MaxPoolWithIndexOp, REGISTER_OP_CPU_KERNEL( max_pool2d_with_index, - ops::MaxPoolWithIndexKernel, - ops::MaxPoolWithIndexKernel); + ops::MaxPoolWithIndexKernel, + ops::MaxPoolWithIndexKernel); REGISTER_OP_CPU_KERNEL( max_pool2d_with_index_grad, - ops::MaxPoolWithIndexGradKernel, - ops::MaxPoolWithIndexGradKernel) + ops::MaxPoolWithIndexGradKernel, + ops::MaxPoolWithIndexGradKernel) REGISTER_OP(max_pool3d_with_index, ops::MaxPoolWithIndexOp, ops::MaxPool3dWithIndexOpMaker, max_pool3d_with_index_grad, @@ -279,9 +282,12 @@ REGISTER_OP(max_pool3d_with_index, ops::MaxPoolWithIndexOp, REGISTER_OP_CPU_KERNEL( max_pool3d_with_index, - ops::MaxPoolWithIndexKernel, - ops::MaxPoolWithIndexKernel); + ops::MaxPoolWithIndexKernel, + ops::MaxPoolWithIndexKernel); REGISTER_OP_CPU_KERNEL( max_pool3d_with_index_grad, - ops::MaxPoolWithIndexGradKernel, - ops::MaxPoolWithIndexGradKernel) + ops::MaxPoolWithIndexGradKernel, + ops::MaxPoolWithIndexGradKernel) diff --git a/paddle/operators/pool_with_index_op.cu.cc b/paddle/operators/pool_with_index_op.cu.cc index 335064a7eea4ec15c529db5254cbb026ba575f3d..4c9804da639e3ad44f90963b53948cd8b755a6ac 100644 --- a/paddle/operators/pool_with_index_op.cu.cc +++ b/paddle/operators/pool_with_index_op.cu.cc @@ -16,20 +16,28 @@ limitations under the License. */ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( max_pool2d_with_index, - ops::MaxPoolWithIndexKernel, - ops::MaxPoolWithIndexKernel); -REGISTER_OP_GPU_KERNEL( + ops::MaxPoolWithIndexKernel, + ops::MaxPoolWithIndexKernel); +REGISTER_OP_CUDA_KERNEL( max_pool2d_with_index_grad, - ops::MaxPoolWithIndexGradKernel, - ops::MaxPoolWithIndexGradKernel) + ops::MaxPoolWithIndexGradKernel, + ops::MaxPoolWithIndexGradKernel) -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( max_pool3d_with_index, - ops::MaxPoolWithIndexKernel, - ops::MaxPoolWithIndexKernel); -REGISTER_OP_GPU_KERNEL( + ops::MaxPoolWithIndexKernel, + ops::MaxPoolWithIndexKernel); +REGISTER_OP_CUDA_KERNEL( max_pool3d_with_index_grad, - ops::MaxPoolWithIndexGradKernel, - ops::MaxPoolWithIndexGradKernel) + ops::MaxPoolWithIndexGradKernel, + ops::MaxPoolWithIndexGradKernel) diff --git a/paddle/operators/pool_with_index_op.h b/paddle/operators/pool_with_index_op.h index 40766c7e821e8b85aeda9473798a1f696d0ad719..4f4087d1dd36d6e91cdd9a9253dd72a71735e136 100644 --- a/paddle/operators/pool_with_index_op.h +++ b/paddle/operators/pool_with_index_op.h @@ -24,7 +24,7 @@ namespace operators { using Tensor = framework::Tensor; -template +template class MaxPoolWithIndexKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -35,6 +35,8 @@ class MaxPoolWithIndexKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); + + auto& dev_ctx = context.template device_context(); if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { paddings[i] = 0; @@ -44,23 +46,23 @@ class MaxPoolWithIndexKernel : public framework::OpKernel { switch (ksize.size()) { case 2: { - paddle::operators::math::MaxPool2dWithIndexFunctor + paddle::operators::math::MaxPool2dWithIndexFunctor pool2d_forward; - pool2d_forward(context.device_context(), *in_x, ksize, strides, - paddings, out, mask); + pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings, out, mask); } break; case 3: { - paddle::operators::math::MaxPool3dWithIndexFunctor + paddle::operators::math::MaxPool3dWithIndexFunctor pool3d_forward; - pool3d_forward(context.device_context(), *in_x, ksize, strides, - paddings, out, mask); + pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, out, mask); } break; default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } } } }; -template +template class MaxPoolWithIndexGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -81,18 +83,20 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel { if (in_x_grad) { in_x_grad->mutable_data(context.GetPlace()); - auto& device_ctx = context.device_context(); + auto& device_ctx = context.template device_context(); math::set_constant(device_ctx, in_x_grad, 0); switch (ksize.size()) { case 2: { - paddle::operators::math::MaxPool2dWithIndexGradFunctor + paddle::operators::math::MaxPool2dWithIndexGradFunctor pool2d_backward; pool2d_backward(device_ctx, *out_grad, *mask, ksize, strides, paddings, in_x_grad); } break; case 3: { - paddle::operators::math::MaxPool3dWithIndexGradFunctor + paddle::operators::math::MaxPool3dWithIndexGradFunctor pool3d_backward; pool3d_backward(device_ctx, *out_grad, *mask, ksize, strides, paddings, in_x_grad); diff --git a/paddle/operators/positive_negative_pair_op.h b/paddle/operators/positive_negative_pair_op.h index 2efd3777e04c17b27c07bccde524de5785af35fe..977e59b7d2f771fc4c3412f0092f1eba92ef22da 100644 --- a/paddle/operators/positive_negative_pair_op.h +++ b/paddle/operators/positive_negative_pair_op.h @@ -22,7 +22,7 @@ namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; -template +template class PositiveNegativePairKernel : public framework::OpKernel { public: struct PredictionResult { diff --git a/paddle/operators/precision_recall_op.h b/paddle/operators/precision_recall_op.h index 4a871ce6741469cf9af409ec90215f721d52f36c..c0d55405a362809f414b8dc3b12ed692f96c24e9 100644 --- a/paddle/operators/precision_recall_op.h +++ b/paddle/operators/precision_recall_op.h @@ -26,7 +26,7 @@ using EigenMatrix = framework::EigenMatrix; enum StateVariable { TP = 0, FP, TN, FN }; -template +template class PrecisionRecallKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { diff --git a/paddle/operators/prelu_op.cc b/paddle/operators/prelu_op.cc index 055c471b4561e5fd3c7a65c6f81d66cdce1a5578..317a2a40154f92f2e13a3012d2f7a63df9a69afb 100644 --- a/paddle/operators/prelu_op.cc +++ b/paddle/operators/prelu_op.cc @@ -85,7 +85,8 @@ namespace ops = paddle::operators; REGISTER_OP(prelu, ops::PReluOp, ops::PReluOpMaker, prelu_grad, ops::PReluGradOp); -REGISTER_OP_CPU_KERNEL(prelu, - ops::PReluKernel); -REGISTER_OP_CPU_KERNEL(prelu_grad, - ops::PReluGradKernel); +REGISTER_OP_CPU_KERNEL( + prelu, ops::PReluKernel); +REGISTER_OP_CPU_KERNEL( + prelu_grad, + ops::PReluGradKernel); diff --git a/paddle/operators/prelu_op.cu b/paddle/operators/prelu_op.cu index 9e391dabae735cc8a740b46b50d31d271f99b65d..12033dee0e1c190b08080023d6746fcad48db2fd 100644 --- a/paddle/operators/prelu_op.cu +++ b/paddle/operators/prelu_op.cu @@ -14,8 +14,9 @@ #include "paddle/operators/prelu_op.h" -REGISTER_OP_GPU_KERNEL( - prelu, paddle::operators::PReluKernel); -REGISTER_OP_GPU_KERNEL( - prelu_grad, - paddle::operators::PReluGradKernel); +REGISTER_OP_CUDA_KERNEL( + prelu, + paddle::operators::PReluKernel); +REGISTER_OP_CUDA_KERNEL(prelu_grad, + paddle::operators::PReluGradKernel< + paddle::platform::CUDADeviceContext, float>); diff --git a/paddle/operators/prelu_op.h b/paddle/operators/prelu_op.h index 5ad31c2203ae6c9bf6f48bb9ecf9a714597e7da8..56f9a553ec12d5bfa745af63ec0570ad30910628 100644 --- a/paddle/operators/prelu_op.h +++ b/paddle/operators/prelu_op.h @@ -39,7 +39,7 @@ class PReluFunctor { const T* alpha_; }; -template +template class PReluKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -54,9 +54,9 @@ class PReluKernel : public framework::OpKernel { int numel = x->numel(); - Transform trans; - trans(context.device_context(), x_ptr, x_ptr + numel, o_ptr, - PReluFunctor(alpha_ptr)); + Transform trans; + trans(context.template device_context(), x_ptr, + x_ptr + numel, o_ptr, PReluFunctor(alpha_ptr)); } }; @@ -76,7 +76,7 @@ class PReluGradFunctor { const T* alpha_; }; -template +template class PReluGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -92,9 +92,9 @@ class PReluGradKernel : public framework::OpKernel { const T* out_ptr = out->data(); int numel = dx->numel(); - Transform trans; - trans(context.device_context(), out_ptr, out_ptr + numel, dout_ptr, dx_ptr, - PReluGradFunctor(alpha_ptr)); + Transform trans; + trans(context.template device_context(), out_ptr, + out_ptr + numel, dout_ptr, dx_ptr, PReluGradFunctor(alpha_ptr)); // TODO(Zhuoyuan): add dalpha upgrade when GPU kernels ready } diff --git a/paddle/operators/proximal_adagrad_op.cc b/paddle/operators/proximal_adagrad_op.cc index 36e460103ab46bf6f1408840a0699793e2be134d..cc350f6d26e6d8bd6e59f2fda74a3b734df55247 100644 --- a/paddle/operators/proximal_adagrad_op.cc +++ b/paddle/operators/proximal_adagrad_op.cc @@ -114,4 +114,4 @@ REGISTER_OP_WITHOUT_GRADIENT(proximal_adagrad, ops::ProximalAdagradOp, ops::ProximalAdagradOpMaker); REGISTER_OP_CPU_KERNEL( proximal_adagrad, - ops::ProximalAdagradOpKernel); + ops::ProximalAdagradOpKernel); diff --git a/paddle/operators/proximal_adagrad_op.cu b/paddle/operators/proximal_adagrad_op.cu index d0ae0395184ae4f794565f2e28c57f960f0ccbeb..42a178f94b94c8e80ec8f9b5e6471b75878b65d1 100644 --- a/paddle/operators/proximal_adagrad_op.cu +++ b/paddle/operators/proximal_adagrad_op.cu @@ -15,6 +15,6 @@ specific language governing permissions and limitations under the License. */ #include "paddle/operators/proximal_adagrad_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( proximal_adagrad, - ops::ProximalAdagradOpKernel); + ops::ProximalAdagradOpKernel); diff --git a/paddle/operators/proximal_adagrad_op.h b/paddle/operators/proximal_adagrad_op.h index 7a1560e8cb339a306ab19513808aab165f82cc8a..523924d80e127d9ad2483e6b239fb948aa72200c 100644 --- a/paddle/operators/proximal_adagrad_op.h +++ b/paddle/operators/proximal_adagrad_op.h @@ -24,7 +24,7 @@ template using EigenVector = framework::EigenVector; -template +template class ProximalAdagradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -45,20 +45,20 @@ class ProximalAdagradOpKernel : public framework::OpKernel { auto p_out = EigenVector::Flatten(*param_out); auto m_out = EigenVector::Flatten(*moment_out); - auto place = ctx.GetEigenDevice(); + auto* place = ctx.template device_context().eigen_device(); Eigen::DSizes grad_dsize(grad->numel()); - m_out.device(place) = m + g * g; + m_out.device(*place) = m + g * g; auto prox_param = p - lr.broadcast(grad_dsize) * g / m_out.sqrt(); if (l1 > static_cast(0)) { - p_out.device(place) = + p_out.device(*place) = prox_param.sign() * (((prox_param.abs() - (lr * l1).broadcast(grad_dsize)) .cwiseMax(static_cast(0.0))) / (static_cast(1.0) + (lr * l2).broadcast(grad_dsize))); } else { - p_out.device(place) = + p_out.device(*place) = prox_param / (static_cast(1.0) + (lr * l2).broadcast(grad_dsize)); } } diff --git a/paddle/operators/proximal_gd_op.cc b/paddle/operators/proximal_gd_op.cc index 5693d0ec9ebf4c470dfa5141b6eeee431f24f2ea..0b26beb3ac3803c78f45cc2ce0a8f444bdc313b6 100644 --- a/paddle/operators/proximal_gd_op.cc +++ b/paddle/operators/proximal_gd_op.cc @@ -94,4 +94,5 @@ namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(proximal_gd, ops::ProximalGDOp, ops::ProximalGDOpMaker); REGISTER_OP_CPU_KERNEL( - proximal_gd, ops::ProximalGDOpKernel); + proximal_gd, + ops::ProximalGDOpKernel); diff --git a/paddle/operators/proximal_gd_op.cu b/paddle/operators/proximal_gd_op.cu index 26f4ebaa0f43620fee7ece2d71755be94a0e01a5..b7dd840d19a13cd3329fb68563693a80d22291ca 100644 --- a/paddle/operators/proximal_gd_op.cu +++ b/paddle/operators/proximal_gd_op.cu @@ -15,5 +15,6 @@ specific language governing permissions and limitations under the License. */ #include "paddle/operators/proximal_gd_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - proximal_gd, ops::ProximalGDOpKernel); +REGISTER_OP_CUDA_KERNEL( + proximal_gd, + ops::ProximalGDOpKernel); diff --git a/paddle/operators/proximal_gd_op.h b/paddle/operators/proximal_gd_op.h index bebda0204173ec5c3ec9a7a9da6fb623171f4cea..64648b3ccaf9615c995d65464607105d87c04198 100644 --- a/paddle/operators/proximal_gd_op.h +++ b/paddle/operators/proximal_gd_op.h @@ -24,7 +24,7 @@ template using EigenVector = framework::EigenVector; -template +template class ProximalGDOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -42,7 +42,7 @@ class ProximalGDOpKernel : public framework::OpKernel { auto lr = EigenVector::Flatten(*ctx.Input("LearningRate")); auto p_out = EigenVector::Flatten(*param_out); - auto place = ctx.GetEigenDevice(); + auto& place = *ctx.template device_context().eigen_device(); Eigen::DSizes grad_dsize(grad->numel()); diff --git a/paddle/operators/rank_loss_op.cc b/paddle/operators/rank_loss_op.cc index 061e82412ea5f4f17fd26a7094e68b97138cc09c..b80b175792f3fc56d689c187b7182198542d7345 100644 --- a/paddle/operators/rank_loss_op.cc +++ b/paddle/operators/rank_loss_op.cc @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -35,9 +35,10 @@ class RankLossOp : public framework::OperatorWithKernel { auto right_dims = ctx->GetInputDim("Right"); PADDLE_ENFORCE((label_dims == left_dims) && (left_dims == right_dims), - "All inputs must have the same size"); - PADDLE_ENFORCE((label_dims.size() == 2) && (label_dims[1] == 1), - "All inputs must be row vector with size batch_size x 1."); + "All inputs must have the same size."); + PADDLE_ENFORCE( + (label_dims.size() == 2) && (label_dims[1] == 1), + "All inputs must be 2-D tensors with shape [batch_size x 1]."); ctx->SetOutputDim("Out", label_dims); } }; @@ -48,10 +49,17 @@ class RankLossOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Label", - "The label indicating A ranked higher than B or not, row vector."); - AddInput("Left", "The output of RankNet for doc A, vector."); - AddInput("Right", "The output of RankNet for doc B, vetor."); - AddOutput("Out", "The output loss of RankLoss operator, vector."); + "(2-D Tensor with shape [batch_size x 1]) " + "The label indicating A ranked higher than B or not."); + AddInput("Left", + "(2-D Tensor with shape [batch_size x 1]) " + "The output of RankNet for doc A."); + AddInput("Right", + "(2-D Tensor with shape [batch_size x 1]) " + "The output of RankNet for doc B."); + AddOutput("Out", + "(2-D Tensor with shape [batch_size x 1]) " + "The output loss of RankLoss operator."); AddComment(R"DOC( RankLoss Operator. @@ -65,16 +73,17 @@ P = {0, 1} or {0, 0.5, 1}, where 0.5 means no information about the rank of the input pair. The RankLoss operator takes three inputs: Left (o_i), Right (o_j) and Label -(P_{i,j}), which represent the output of RankNet for the two docs and the label, -respectively, and yields the rank loss C_{i,j} using the following equation: +(P_{i,j}), which represent the output score of RankNet for the two docs and +the label respectively, and yields the rank loss C_{i,j} using the following +equation: -\f$$ - C_{i,j} = -\tilde{P_{ij}} * o_{i,j} + log(1 + e^{o_{i,j}}) \\ +$$ + C_{i,j} = -\tilde{P_{ij}} * o_{i,j} + \log(1 + e^{o_{i,j}}) \\ o_{i,j} = o_i - o_j \\ \tilde{P_{i,j}} = \left \{0, 0.5, 1 \right \} \ or \ \left \{0, 1 \right \} -\f$$ +$$ -The operator can take inputs of one sample or in batch. +The operator can take batch inputs with size batch_size (batch_size >= 1). )DOC"); } @@ -114,7 +123,8 @@ namespace ops = paddle::operators; REGISTER_OP(rank_loss, ops::RankLossOp, ops::RankLossOpMaker, rank_loss_grad, ops::RankLossGradOp); -REGISTER_OP_CPU_KERNEL(rank_loss, - ops::RankLossKernel); REGISTER_OP_CPU_KERNEL( - rank_loss_grad, ops::RankLossGradKernel); + rank_loss, ops::RankLossKernel); +REGISTER_OP_CPU_KERNEL( + rank_loss_grad, + ops::RankLossGradKernel); diff --git a/paddle/operators/rank_loss_op.cu b/paddle/operators/rank_loss_op.cu index 779588ff36c792b8925a535d60f1cfbbe3c66d86..5aee66443d60c8e20625880ba2ec9606b8a007a0 100644 --- a/paddle/operators/rank_loss_op.cu +++ b/paddle/operators/rank_loss_op.cu @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,9 +14,9 @@ #include "paddle/operators/rank_loss_op.h" -REGISTER_OP_GPU_KERNEL( - rank_loss, - paddle::operators::RankLossKernel); -REGISTER_OP_GPU_KERNEL( - rank_loss_grad, - paddle::operators::RankLossGradKernel); +REGISTER_OP_CUDA_KERNEL(rank_loss, + paddle::operators::RankLossKernel< + paddle::platform::CUDADeviceContext, float>); +REGISTER_OP_CUDA_KERNEL(rank_loss_grad, + paddle::operators::RankLossGradKernel< + paddle::platform::CUDADeviceContext, float>); diff --git a/paddle/operators/rank_loss_op.h b/paddle/operators/rank_loss_op.h index f184d6efcb496a1d7f38540712b6c431f816482e..ea24b61fd94b57950e79b7c1ddb13fa165953538 100644 --- a/paddle/operators/rank_loss_op.h +++ b/paddle/operators/rank_loss_op.h @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,7 @@ namespace paddle { namespace operators { -template +template class RankLossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { @@ -35,13 +35,13 @@ class RankLossKernel : public framework::OpKernel { auto left = framework::EigenVector::Flatten(*left_t); auto right = framework::EigenVector::Flatten(*right_t); - auto& dev = ctx.GetEigenDevice(); + auto& dev = *ctx.template device_context().eigen_device(); out.device(dev) = (1. + (left - right).exp()).log() - label * (left - right); } }; -template +template class RankLossGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { @@ -55,7 +55,7 @@ class RankLossGradKernel : public framework::OpKernel { auto* left_t = ctx.Input("Left"); auto* right_t = ctx.Input("Right"); - auto& dev = ctx.GetEigenDevice(); + auto& dev = *ctx.template device_context().eigen_device(); auto d_out = framework::EigenVector::Flatten(*d_out_t); auto label = framework::EigenVector::Flatten(*label_t); auto left = framework::EigenVector::Flatten(*left_t); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index c976e22c7740ad11279ab5ee75e4d130be8fa0c5..29f91636438449f90ea3ffee8adc21595aabe202 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -408,7 +408,8 @@ class RecurrentGradOp : public RecurrentBase { attrs["value"] = 0.0f; auto zero_op = framework::OpRegistry::CreateOp( - "fill_constant", {}, {{"Out", {pg_names[param_id]}}}, attrs); + "fill_constant", framework::VariableNameMap{}, + {{"Out", {pg_names[param_id]}}}, attrs); zero_op->Run(scope, dev_ctx); } @@ -417,7 +418,7 @@ class RecurrentGradOp : public RecurrentBase { auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {pg_names[param_id], new_inside_name}}}, - {{"Out", {pg_names[param_id]}}}, {}); + {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); sum_op->Run(cur_scope, dev_ctx); cur_scope.Rename(new_inside_name, inside_grad_name); @@ -599,7 +600,9 @@ class RecurrentGradOpShapeInference : public framework::InferShapeBase { std::vector output{kOutputs}; for (auto &s : input) { PADDLE_ENFORCE(ctx->HasInputs(s)); - PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(s))); + PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(s)), + "Cannot find the gradient variable %s", + framework::GradVarName(s)); } for (auto &s : output) { PADDLE_ENFORCE(ctx->HasInputs(s)); diff --git a/paddle/operators/recv_op.cc b/paddle/operators/recv_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..eed482c1b458cd442ede523838b400d85c23a155 --- /dev/null +++ b/paddle/operators/recv_op.cc @@ -0,0 +1,123 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include +#include +#include + +#include + +#include "paddle/framework/data_type.h" +#include "paddle/framework/executor.h" +#include "paddle/framework/framework.pb.h" +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/detail/send_recv_impl.h" +#include "paddle/operators/detail/simple_block_queue.h" + +namespace paddle { +namespace operators { + +void RunServer(Server **rpc_server, + std::shared_ptr service, + const std::string &server_address) { + ServerBuilder builder; + builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); + builder.RegisterService(service.get()); + std::unique_ptr server(builder.BuildAndStart()); + *rpc_server = server.get(); + LOG(INFO) << "Server listening on " << server_address << std::endl; + server->Wait(); +} + +class RecvOp : public framework::OperatorBase { + public: + RecvOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) { + if (!rpc_service_) { + rpc_service_.reset(new detail::SendRecvServerImpl()); + std::string endpoint = Attr("endpoint"); + server_thread_.reset( + new std::thread(RunServer, &rpc_server_, rpc_service_, endpoint)); + } + } + + virtual ~RecvOp() { + rpc_server_->Shutdown(); + server_thread_->join(); + } + + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + // blocking get one var from client. + const framework::LoDTensor &t = rpc_service_->Get(); + framework::Scope &recv_scope = scope.NewScope(); + // set graph input var + auto *var = recv_scope.Var(Input("RX")); + auto *tensor = var->GetMutable(); + // FIXME(typhoonzero): do not copy + framework::CopyFrom(t, dev_ctx.GetPlace(), dev_ctx, tensor); + + std::string program_str = Attr("OptimizeProgram"); + framework::ProgramDesc program_desc; + program_desc.ParseFromString(program_str); + framework::ProgramDescBind program(program_desc); + framework::Executor executor(dev_ctx); + // Run sub graph to get optimized tensor + executor.Run(program, &recv_scope, 0, /*global_block*/ + false /*create_local_scope*/); + + auto *out_var = recv_scope.FindVar("Out"); + // push back + rpc_service_->Push(out_var->Get()); + } + + protected: + // grpc server instance to track status and gracefully shutdown. + // borrow an pointer from server thread. + Server *rpc_server_{nullptr}; + // grpc send/recv service implement to register. + std::shared_ptr rpc_service_; + std::shared_ptr server_thread_; +}; + +class RecvOpMaker : public framework::OpProtoAndCheckerMaker { + public: + RecvOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("RX", "(Tensor) Input tensor to be saved"); + AddComment(R"DOC( +Recv operator + +This operator will recv tensor from send_op +)DOC"); + AddAttr("endpoint", + "(string, default 127.0.0.1:6164)" + "IP address to listen on.") + .SetDefault("127.0.0.1:6164") + .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); + AddAttr("OptimizeProgram", "type string", + "Serialized ProgramDesc string for recv to run."); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(recv, ops::RecvOp, ops::RecvOpMaker); diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 2589a54cfc7fc5bc11ae983797d480a134e0eb25..b754637bf29225615f129d7423d60518e053ca18 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -180,12 +180,13 @@ REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad, REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMinOpMaker, reduce_min_grad, ops::ReduceGradOp); -#define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ - REGISTER_OP_CPU_KERNEL( \ - reduce_type, \ - ops::ReduceKernel); \ - REGISTER_OP_CPU_KERNEL(reduce_type##_grad, \ - ops::ReduceGradKernel); +#define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ + REGISTER_OP_CPU_KERNEL(reduce_type, \ + ops::ReduceKernel); \ + REGISTER_OP_CPU_KERNEL( \ + reduce_type##_grad, \ + ops::ReduceGradKernel); FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_CPU_KERNEL); diff --git a/paddle/operators/reduce_op.cu b/paddle/operators/reduce_op.cu index d306e1a24096d737438d71d4d4abc35328d160cb..a10ace5253b850db5855bef8384278edebc9e45f 100644 --- a/paddle/operators/reduce_op.cu +++ b/paddle/operators/reduce_op.cu @@ -17,12 +17,13 @@ namespace ops = paddle::operators; -#define REGISTER_REDUCE_GPU_KERNEL(reduce_type, functor, grad_functor) \ - REGISTER_OP_GPU_KERNEL( \ - reduce_type, \ - ops::ReduceKernel); \ - REGISTER_OP_GPU_KERNEL(reduce_type##_grad, \ - ops::ReduceGradKernel); +#define REGISTER_REDUCE_GPU_KERNEL(reduce_type, functor, grad_functor) \ + REGISTER_OP_CUDA_KERNEL( \ + reduce_type, ops::ReduceKernel); \ + REGISTER_OP_CUDA_KERNEL( \ + reduce_type##_grad, \ + ops::ReduceGradKernel); FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_GPU_KERNEL); diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index dd6547542d16b0fe336184a0c09a8498027db6ea..47ce910f2821467c701a7f5e22a8dbe5c8c95c92 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -32,55 +32,55 @@ template ; struct SumFunctor { - template - void operator()(const Place& place, X& x, Y& y, const Dim& dim) { + template + void operator()(const DeviceContext& place, X& x, Y& y, const Dim& dim) { y.device(place) = x.sum(dim); } }; struct SumGradFunctor { - template - void operator()(const Place& place, X& x, Y& y, DX& dx, DY& dy, + template + void operator()(const DeviceContext& place, X& x, Y& y, DX& dx, DY& dy, const Dim& dim, int size) { dx.device(place) = dy.broadcast(dim); } }; struct MeanFunctor { - template - void operator()(const Place& place, X& x, Y& y, const Dim& dim) { + template + void operator()(const DeviceContext& place, X& x, Y& y, const Dim& dim) { y.device(place) = x.mean(dim); } }; struct MeanGradFunctor { - template - void operator()(const Place& place, X& x, Y& y, DX& dx, DY& dy, + template + void operator()(const DeviceContext& place, X& x, Y& y, DX& dx, DY& dy, const Dim& dim, int size) { dx.device(place) = dy.broadcast(dim) / dx.constant(size); } }; struct MaxFunctor { - template - void operator()(const Place& place, X& x, Y& y, const Dim& dim) { + template + void operator()(const DeviceContext& place, X& x, Y& y, const Dim& dim) { y.device(place) = x.maximum(dim); } }; struct MinFunctor { - template - void operator()(const Place& place, X& x, Y& y, const Dim& dim) { + template + void operator()(const DeviceContext& place, X& x, Y& y, const Dim& dim) { y.device(place) = x.minimum(dim); } }; struct MaxOrMinGradFunctor { - template - void operator()(const Place& place, X& x, Y& y, DX& dx, DY& dy, + template + void operator()(const DeviceContext& place, X& x, Y& y, DX& dx, DY& dy, const Dim& dim, int size) { auto equals = x == y.broadcast(dim); auto ones = dx.constant(1); @@ -91,7 +91,7 @@ struct MaxOrMinGradFunctor { } }; -template +template class ReduceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -139,7 +139,8 @@ class ReduceKernel : public framework::OpKernel { dims = framework::make_ddim(dims_vector); } - auto& place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); Functor functor; if (D == 1) { @@ -152,7 +153,7 @@ class ReduceKernel : public framework::OpKernel { } }; -template +template class ReduceGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -201,7 +202,8 @@ class ReduceGradKernel : public framework::OpKernel { Eigen::array broadcast_dim; for (size_t i = 0; i < D; ++i) broadcast_dim[i] = 1; broadcast_dim[dim] = input0->dims()[dim]; - auto& place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); Functor functor; functor(place, x, x_reduce, x_grad, x_reduce_grad, broadcast_dim, broadcast_dim[dim]); diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index ba774ec2160c0460867de42f7ad9d5cd65ad8d6a..7fd33bf662a1d0b7b6fa4e772bdadbf34b2f4fdd 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -1,11 +1,10 @@ - /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -38,8 +37,8 @@ class ReshapeOp : public framework::OperatorWithKernel { // TODO(qiao) change batch_size for (size_t i = 1; i < shape.size(); ++i) { PADDLE_ENFORCE(shape[i] > 0, - "Each dimension of shape " - "must be positiv except the first."); + "Each dimension of Attr(shape) " + "must be positive except the first one."); } if (shape[0] < 0) { shape[0] = x_dims[0]; @@ -85,9 +84,9 @@ Given a 2-D tensor X with 2 rows and 2 columns [[1, 2], [3, 4]] and target shape = [1, 4], the reshape operator will transform -the tensor X into a 1-D tensor: +the tensor X into a 2-D tensor: - [1, 2, 3, 4] + [[1, 2, 3, 4]] )DOC"); } diff --git a/paddle/operators/reshape_op.cu.cc b/paddle/operators/reshape_op.cu similarity index 88% rename from paddle/operators/reshape_op.cu.cc rename to paddle/operators/reshape_op.cu index 23dbe089d3b37aabedf9ef166f7bbfbf67da7e0a..b7329238c0ea8ebb374d35bd7cddced3dfee1a2c 100644 --- a/paddle/operators/reshape_op.cu.cc +++ b/paddle/operators/reshape_op.cu @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,9 +14,9 @@ #include "paddle/operators/reshape_op.h" -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( reshape, paddle::operators::ReshapeKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( reshape_grad, paddle::operators::ReshapeGradKernel); diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 0e98c8b4f443f88ecba044f2f79228227695e182..92d8cbbb56e224fe67e630bdfcb16d7df44f2846 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -4,7 +4,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,7 +20,7 @@ namespace paddle { namespace operators { -template +template class ReshapeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { @@ -33,7 +33,7 @@ class ReshapeKernel : public framework::OpKernel { } }; -template +template class ReshapeGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { diff --git a/paddle/operators/rmsprop_op.cc b/paddle/operators/rmsprop_op.cc index a9c45f639c6728ff2fd6de6fcdadfe5032a705d7..fc3f9b8988ec7fe0093ef6b09a105747b0025ec1 100644 --- a/paddle/operators/rmsprop_op.cc +++ b/paddle/operators/rmsprop_op.cc @@ -116,5 +116,5 @@ http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(rmsprop, ops::RmspropOp, ops::RmspropOpMaker); -REGISTER_OP_CPU_KERNEL(rmsprop, - ops::RmspropOpKernel); +REGISTER_OP_CPU_KERNEL( + rmsprop, ops::RmspropOpKernel); diff --git a/paddle/operators/rmsprop_op.cu b/paddle/operators/rmsprop_op.cu index 52634a54816bcd5ad0ba82a56f1df95110112265..2a9fd6e1044e923b9ccffab834ff64df0f7cf5d7 100644 --- a/paddle/operators/rmsprop_op.cu +++ b/paddle/operators/rmsprop_op.cu @@ -16,5 +16,5 @@ #include "paddle/operators/rmsprop_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(rmsprop, - ops::RmspropOpKernel); +REGISTER_OP_CUDA_KERNEL( + rmsprop, ops::RmspropOpKernel); diff --git a/paddle/operators/rmsprop_op.h b/paddle/operators/rmsprop_op.h index 7bf2129010f994966d79ef11d5cec30159b47068..16a561835d02457cf2268f713289001773e63d6c 100644 --- a/paddle/operators/rmsprop_op.h +++ b/paddle/operators/rmsprop_op.h @@ -24,7 +24,7 @@ template using EigenVector = framework::EigenVector; -template +template class RmspropOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -51,7 +51,7 @@ class RmspropOpKernel : public framework::OpKernel { auto p_out = EigenVector::Flatten(*param_out); auto mom_out = EigenVector::Flatten(*moment_out); auto ms_out = EigenVector::Flatten(*mean_square_out); - auto place = ctx.GetEigenDevice(); + auto& place = *ctx.template device_context().eigen_device(); Eigen::DSizes grad_dsize(grad->numel()); diff --git a/paddle/operators/roi_pool_op.cc b/paddle/operators/roi_pool_op.cc index 2b5e66c96b726a3c1fdb2596a244c5395db85279..75fcea8401fbbc2943c0d6a50ca81288268823d8 100644 --- a/paddle/operators/roi_pool_op.cc +++ b/paddle/operators/roi_pool_op.cc @@ -157,9 +157,10 @@ namespace ops = paddle::operators; REGISTER_OP(roi_pool, ops::ROIPoolOp, ops::ROIPoolOpMaker, roi_pool_grad, ops::ROIPoolGradOp); REGISTER_OP_CPU_KERNEL( - roi_pool, ops::CPUROIPoolOpKernel, - ops::CPUROIPoolOpKernel); + roi_pool, + ops::CPUROIPoolOpKernel, + ops::CPUROIPoolOpKernel); REGISTER_OP_CPU_KERNEL( roi_pool_grad, - ops::CPUROIPoolGradOpKernel, - ops::CPUROIPoolOpKernel); + ops::CPUROIPoolGradOpKernel, + ops::CPUROIPoolOpKernel); diff --git a/paddle/operators/roi_pool_op.cu b/paddle/operators/roi_pool_op.cu index 9a4c8ca752bb7abc4f44d4815743769bc989703a..a874befe4d12029afa9ce55230da22cb048000aa 100644 --- a/paddle/operators/roi_pool_op.cu +++ b/paddle/operators/roi_pool_op.cu @@ -177,7 +177,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { if (x_grad) { x_grad->mutable_data(ctx.GetPlace()); math::SetConstant set_zero; - set_zero(ctx.device_context(), x_grad, static_cast(0)); + set_zero(ctx.cuda_device_context(), x_grad, static_cast(0)); int output_grad_size = out_grad->numel(); int blocks = NumBlocks(output_grad_size); @@ -199,10 +199,11 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - roi_pool, ops::GPUROIPoolOpKernel, - ops::GPUROIPoolOpKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( + roi_pool, + ops::GPUROIPoolOpKernel, + ops::GPUROIPoolOpKernel); +REGISTER_OP_CUDA_KERNEL( roi_pool_grad, - ops::GPUROIPoolGradOpKernel, - ops::GPUROIPoolOpKernel); + ops::GPUROIPoolGradOpKernel, + ops::GPUROIPoolOpKernel); diff --git a/paddle/operators/roi_pool_op.h b/paddle/operators/roi_pool_op.h index 1691eb482b03eab9fc793974ba1f39fbf17beafa..09a9d3d870c1066f1c6f780c4b3203679e9e7505 100644 --- a/paddle/operators/roi_pool_op.h +++ b/paddle/operators/roi_pool_op.h @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class CPUROIPoolOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -126,60 +126,55 @@ class CPUROIPoolOpKernel : public framework::OpKernel { } }; -template +template class CPUROIPoolGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); auto* rois = ctx.Input("ROIs"); auto* argmax = ctx.Input("Argmax"); - auto* out_grad = ctx.Input(framework::GradVarName("Out")); - auto* x_grad = ctx.Output(framework::GradVarName("X")); + auto* in_grad = ctx.Output(framework::GradVarName("X")); auto pooled_height = ctx.Attr("pooled_height"); auto pooled_width = ctx.Attr("pooled_width"); - if (x_grad) { - int channels = in->dims()[1]; + if (in_grad) { + const int64_t* rois_data = rois->data(); + const T* out_grad_data = out_grad->data(); + const int64_t* argmax_data = argmax->data(); + T* in_grad_data = in_grad->mutable_data(ctx.GetPlace()); + math::SetConstant set_zero; + set_zero(ctx.template device_context(), in_grad, + static_cast(0)); + auto in_stride = framework::stride(in->dims()); + auto argmax_stride = framework::stride(argmax->dims()); auto roi_stride = framework::stride(rois->dims()); + auto out_stride = framework::stride(out_grad->dims()); - const int64_t* rois_data = rois->data(); int rois_num = rois->dims()[0]; + int channels = in->dims()[1]; - T* x_grad_data = x_grad->mutable_data(ctx.GetPlace()); - math::SetConstant set_zero; - set_zero(ctx.device_context(), x_grad, static_cast(0)); - - size_t roi_offset = roi_stride[0]; - size_t batch_offset = in_stride[0]; - size_t channel_offset = in_stride[1]; - - const T* out_grad_data = out_grad->data(); - size_t pool_channel_offset = pooled_height * pooled_width; - const int64_t* argmax_data = argmax->data(); - - for (size_t n = 0; n < rois_num; ++n) { - size_t roi_batch_idx = rois_data[0]; - T* batch_grad_data = x_grad_data + batch_offset * roi_batch_idx; + for (int n = 0; n < rois_num; ++n) { + int roi_batch_idx = rois_data[0]; + T* batch_grad_data = in_grad_data + roi_batch_idx * in_stride[0]; for (int c = 0; c < channels; ++c) { for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { - size_t pool_index = ph * pooled_width + pw; - + int pool_index = ph * pooled_width + pw; if (argmax_data[pool_index] >= 0) { - size_t index = static_cast(argmax_data[pool_index]); + auto index = argmax_data[pool_index]; batch_grad_data[index] += out_grad_data[pool_index]; } } } - batch_grad_data += channel_offset; - out_grad_data += pool_channel_offset; - argmax_data += pool_channel_offset; + batch_grad_data += in_stride[1]; + out_grad_data += out_stride[1]; + argmax_data += argmax_stride[1]; } - rois_data += roi_offset; + rois_data += roi_stride[0]; } } } diff --git a/paddle/operators/row_conv_op.cc b/paddle/operators/row_conv_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..5203a5079c8b125f8dc156202f70ce76711a1e30 --- /dev/null +++ b/paddle/operators/row_conv_op.cc @@ -0,0 +1,260 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/row_conv_op.h" +#include "paddle/framework/eigen.h" + +namespace paddle { +namespace operators { + +using LoDTensor = framework::LoDTensor; +using framework::Tensor; + +template +using EigenMatrix = framework::EigenMatrix; + +class RowConvOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of RowConvOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Filter"), + "Input(Filter) of RowConvOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of RowConvOp should not be null."); + + auto x_dims = ctx->GetInputDim("X"); + auto filter_dims = ctx->GetInputDim("Filter"); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); + PADDLE_ENFORCE_EQ(filter_dims.size(), 2, "Input(Y)'s rank should be 2."); + PADDLE_ENFORCE_EQ( + x_dims[1], filter_dims[1], + "The 2nd dimension of Input(X) and Input(Filter) should be same."); + ctx->SetOutputDim("Out", x_dims); + ctx->ShareLoD("X", "Out"); + } +}; + +class RowConvGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Filter"), + "Input(Filter) should not be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Gradient of output(Out) should not be null."); + + auto x_grad_name = framework::GradVarName("X"); + if (ctx->HasOutput(x_grad_name)) { + auto x_dims = ctx->GetInputDim("X"); + ctx->SetOutputDim(x_grad_name, x_dims); + } + + auto filter_grad_name = framework::GradVarName("Filter"); + if (ctx->HasOutput(filter_grad_name)) { + auto filter_dims = ctx->GetInputDim("Filter"); + ctx->SetOutputDim(filter_grad_name, filter_dims); + } + } +}; + +class RowConvOpMaker : public framework::OpProtoAndCheckerMaker { + public: + RowConvOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "(LoDTensor), the input(X) is a LodTensor, which supports " + "variable time-length input sequences. The underlying tensor " + "in this LoDTensor is a matrix with shape (T x N), where T " + "is the total time steps in this mini-batch and N is the input " + "data dimension."); + AddInput("Filter", + "(Tensor), the input(Filter) is a learnable parameter. It " + "is a 2-D tensor with shape (future_context x N), where, " + "future_context is the future context length and N is the data " + "dimension."); + AddOutput("Out", + "(LoDTensor), the output(Out) is a LodTensor, which supports " + "variable time-length input sequences. The underlying tensor " + "in this LodTensor is a matrix with shape T x N, i.e., the " + "same shape as X."); + AddComment(R"DOC( +Row-convolution Operator. + +The row convolution is called lookahead convolution. This operator was +introduced in the following paper for DeepSpeech2: +http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf + +The main motivation is that a bidirectional RNN, useful in DeepSpeech +like speech models, learns representation for a sequence by performing a +forward and a backward pass through the entire sequence. However, unlike +unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online +and low-latency setting. The lookahead convolution incorporates information +from future subsequences in a computationally efficient manner to improve +unidirectional recurrent neural networks. The row convolution operator is +different from the 1D sequence convolution, and is computed as follows: + +Given an input sequence $in$ of length $t$ and input dimension $d$, +and a filter ($W$) of size $context \times d$, +the output sequence is convolved as: + +$$ +out_{i, :} = \sum_{j=i}^{i + context} in_{j,:} \dot W_{i-j, :} +$$ + +)DOC"); + } +}; + +template +class RowConvKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *x = context.Input("X"); + auto *filter = context.Input("Filter"); + auto *out = context.Output("Out"); + + out->mutable_data(context.GetPlace()); + + auto batch_indices = x->lod()[0]; + auto input_dim = x->dims()[1]; // 'in' is of size T x N + size_t num_sequence = batch_indices.size() - 1; + + auto future_context = filter->dims()[0]; + auto weights = EigenMatrix::From(*filter); + + for (size_t i = 0; i < num_sequence; i++) { + int start = static_cast(batch_indices[i]); + int end = static_cast(batch_indices[i + 1]); + int current_timesteps = end - start; + Tensor cur_input_sequence = + x->Slice(start, end); // Current input sequence + Tensor cur_output_sequence = + out->Slice(start, end); // Current output sequence + auto cip_seq = EigenMatrix::From(cur_input_sequence); + auto cot_seq = EigenMatrix::From(cur_output_sequence); + + for (int k = 0; k < current_timesteps; + k++) { // For different time steps in the same sequence + for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); + w++) { + for (int d = 0; d < input_dim; d++) { + if (w == 0) { + cot_seq(k, d) = weights(w, d) * cip_seq(k + w, d); + } else { + cot_seq(k, d) += weights(w, d) * cip_seq(k + w, d); + } + } + } + } + } + } +}; + +template +class RowConvGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *x = context.Input("X"); + auto *filter = context.Input("Filter"); + auto *d_out = context.Input(framework::GradVarName("Out")); + auto *dx = context.Output(framework::GradVarName("X")); + auto *d_filter = context.Output(framework::GradVarName("Filter")); + + auto input_dim = x->dims()[1]; // 'x' is of size T x N + auto batch_indices = x->lod()[0]; + size_t num_sequence = batch_indices.size() - 1; + auto future_context = filter->dims()[0]; + + if (d_filter) { + d_filter->mutable_data(context.GetPlace()); + auto dweights = + EigenMatrix::From(*d_filter); // Gradient of weight matrix + dweights.setZero(); + + for (size_t i = 0; i < num_sequence; i++) { // For different sequences + int start = static_cast(batch_indices[i]); + int end = static_cast(batch_indices[i + 1]); + + Tensor cur_input = x->Slice(start, end); // Current input sequence + Tensor cur_doutput = + d_out->Slice(start, end); // Current output grad sequence + + auto cur_ip = EigenMatrix::From(cur_input); + auto cur_dout = EigenMatrix::From(cur_doutput); + int current_timesteps = end - start; + + for (int k = 0; k < current_timesteps; + k++) { // For different time steps in the same sequence + for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); + w++) { + // For dweights (Updating the gradient of weight matrix) + for (int d = 0; d < input_dim; d++) { + dweights(w, d) += cur_ip(k + w, d) * cur_dout(k, d); + } + } + } + } + } + + if (dx) { + dx->mutable_data(context.GetPlace()); + auto weights = EigenMatrix::From(*filter); + for (size_t i = 0; i < num_sequence; i++) { // For different sequences + int start = static_cast(batch_indices[i]); + int end = static_cast(batch_indices[i + 1]); + + Tensor cur_doutput = + d_out->Slice(start, end); // Current output grad sequence + Tensor cur_dinput = + dx->Slice(start, end); // Current input grad sequence + + auto cur_dout = EigenMatrix::From(cur_doutput); + auto cur_dip = EigenMatrix::From(cur_dinput); + cur_dip.setZero(); + int current_timesteps = end - start; + + for (int k = 0; k < current_timesteps; + k++) { // For different time steps in the same sequence + for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); + w++) { + // For dinput (Updating the gradient wrt input) + for (int d = 0; d < input_dim; d++) { + cur_dip(k + w, d) += weights(w, d) * cur_dout(k, d); + } + } + } + } + } + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(row_conv, ops::RowConvOp, ops::RowConvOpMaker, row_conv_grad, + ops::RowConvGradOp); +REGISTER_OP_CPU_KERNEL( + row_conv, ops::RowConvKernel); +REGISTER_OP_CPU_KERNEL( + row_conv_grad, + ops::RowConvGradKernel); diff --git a/paddle/operators/row_conv_op.cu b/paddle/operators/row_conv_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..56a98ff299e8263179306756631949761e386f70 --- /dev/null +++ b/paddle/operators/row_conv_op.cu @@ -0,0 +1,410 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/row_conv_op.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { + +using LoDTensor = framework::LoDTensor; +using framework::Tensor; + +namespace { + +inline int DivUp(int x, int y) { return (x + y - 1) / y; } + +// Forward prop (shared memory version, for small future_context) +template +__global__ void RowConvForwardSharedMemory(const T *in, const T *wt, + int num_sequence, int input_dim, + int future_context, + const size_t *batch_indices, + T *out) { + int blx = blockDim.x; + int bly = blockDim.y; + int thx = threadIdx.x; + int thy = threadIdx.y; + int d = blockIdx.x * blx + thx; // index along input dim + + extern __shared__ T mem[]; + T *sw = mem; + + if (thy < future_context) { + sw[thy * blx + thx] = + (d < input_dim) ? wt[thy * input_dim + d] : static_cast(0); + } + __syncthreads(); + + for (size_t i = 0; i < num_sequence; i++) { + int start = static_cast(batch_indices[i]); + int end = static_cast(batch_indices[i + 1]); + int current_timesteps = end - start; + for (int k = thy; k < current_timesteps; k += bly) { + T sum = 0; + for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); + w++) { + sum += (d < input_dim) + ? sw[w * blx + thx] * in[(start + k + w) * input_dim + d] + : static_cast(0); + } + if (d < input_dim) { + out[(start + k) * input_dim + d] = sum; + } + } + } +} + +// Forward prop (naive version) +template +__global__ void RowConvForward(const T *in, const T *wt, int num_sequence, + int input_dim, int future_context, + const size_t *batch_indices, T *out) { + int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim + int bly = blockDim.y; + int thy = threadIdx.y; + + if (d >= input_dim) return; + + for (size_t i = 0; i < num_sequence; i++) { + int start = static_cast(batch_indices[i]); + int end = static_cast(batch_indices[i + 1]); + int current_timesteps = end - start; + for (int k = thy; k < current_timesteps; k += bly) { + T sum = 0; + for (int w = 0; (w < future_context) && ((k + w) < current_timesteps); + w++) { + sum += (wt[w * input_dim + d] * in[(start + k + w) * input_dim + d]); + } + out[(start + k) * input_dim + d] = sum; + } + } +} + +// Compute input gradient (shared memory version, for small future_context) +template +__global__ void RowConvGradInputSharedMemory(const T *dout, const T *wt, + int num_sequence, int input_dim, + int future_context, + const size_t *batch_indices, + T *din) { + int blx = blockDim.x; + int bly = blockDim.y; + int thx = threadIdx.x; + int thy = threadIdx.y; + int d = blockIdx.x * blx + thx; // index along input dim + + extern __shared__ T mem[]; + T *sw = mem; + if (thy < future_context) { + sw[thy * blx + thx] = + (d < input_dim) ? wt[thy * input_dim + d] : static_cast(0); + } + __syncthreads(); + + for (int i = 0; i < num_sequence; i++) { + int start = static_cast(batch_indices[i]); + int end = static_cast(batch_indices[i + 1]); + int current_timesteps = end - start; + for (int k = thy; k < current_timesteps; k += bly) { + T sum = 0; + for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { + sum += (d < input_dim) + ? (sw[w * blx + thx] * dout[(k + start - w) * input_dim + d]) + : static_cast(0); + } + if (d < input_dim) { + din[(k + start) * input_dim + d] = sum; + } + } + } +} + +// Compute input gradient (Naive version) +template +__global__ void RowConvGradInput(const T *dout, const T *wt, int num_sequence, + int input_dim, int future_context, + const size_t *batch_indices, T *din) { + int d = blockIdx.x * blockDim.x + threadIdx.x; // index along input_dim + int bly = blockDim.y; + int thy = threadIdx.y; + + if (d >= input_dim) return; + for (int i = 0; i < num_sequence; i++) { + int start = static_cast(batch_indices[i]); + int end = static_cast(batch_indices[i + 1]); + int current_timesteps = end - start; + for (int k = thy; k < current_timesteps; k += bly) { + T sum = 0; + for (int w = 0; (w < future_context) && ((k - w) >= 0); w++) { + sum += (wt[w * input_dim + d] * dout[(k + start - w) * input_dim + d]); + } + din[(k + start) * input_dim + d] = sum; + } + } +} + +// Compute W gradient (small future_context version) +template +__global__ void RowConvGradFilterImproved(const T *in, const T *dout, + int num_sequence, int input_dim, + int future_context, int block_x, + int block_y, + const size_t *batch_indices, + T *dfilter) { + int blx = blockDim.x; + int bly = blockDim.y; + int thx = threadIdx.x; + int thy = threadIdx.y; + int gx = blockIdx.x * blx; + int d = gx + thx; // index along input dim + + extern __shared__ T mem[]; + + int xdim_sh_in = block_y; + int xdim_sh_dout = block_y; + // int xdim_sh_dfilter = future_context; + int ydim_sh_in = block_x; + int ydim_sh_dout = block_x + future_context - 1; + int ydim_sh_dfilter = block_y; + + T *sh_in = mem; + T *sh_dout = &mem[xdim_sh_in * ydim_sh_in]; + T *sh_dfilter = &mem[xdim_sh_in * ydim_sh_in + xdim_sh_dout * ydim_sh_dout]; + + if (thy < future_context) { + sh_dfilter[thy * ydim_sh_dfilter + thx] = static_cast(0); + } + __syncthreads(); + + for (int i = 0; i < num_sequence; i++) { + int start = static_cast(batch_indices[i]); + int end = static_cast(batch_indices[i + 1]); + int current_timesteps = end - start; + int scaled_cur_steps = + ((current_timesteps + block_x - 1) / block_x) * block_x; + + for (int k = thy; k < scaled_cur_steps; k += block_x) { + int pos = start + k; + sh_in[thx * ydim_sh_in + thy] = + (d < input_dim && pos < end) ? in[pos * input_dim + d] : T(0); + sh_dout[thx * ydim_sh_dout + thy + future_context - 1] = + (d < input_dim && pos < end) ? dout[pos * input_dim + d] : T(0); + __syncthreads(); + + if (thy < future_context - 1) { + int pos_offset = pos - future_context + 1; + sh_dout[thx * ydim_sh_dout + thy] = + (d < input_dim && pos_offset >= start) + ? dout[pos_offset * input_dim + d] + : T(0); + } + __syncthreads(); + + for (int w = 0; w < future_context; w++) { + T val = sh_in[thy * ydim_sh_in + thx] * + sh_dout[thy * ydim_sh_dout + thx + future_context - 1 - w]; + __syncthreads(); + + for (int offset = 16; offset > 0; + offset = offset / 2) { // blockDim.x is 32. + val += __shfl_down(val, offset); + } + __syncthreads(); + + if (thx == 0) { + sh_dfilter[w * ydim_sh_dfilter + thy] += val; + } + __syncthreads(); + } + } + } + for (int w = thy; (w < future_context) && (d < input_dim); w += bly) { + dfilter[w * input_dim + d] += sh_dfilter[w * ydim_sh_dfilter + thx]; + } +} + +// Compute weight(filter) gradient +template +__global__ void RowConvGradFilter(const T *in, const T *dout, int num_sequence, + int input_dim, int future_context, + int block_x, int block_y, + const size_t *batch_indices, T *dfilter) { + int blx = blockDim.x; + int thx = threadIdx.x; + int thy = threadIdx.y; + int gx = blockIdx.x * blx; + int d = gx + thx; // index along input dim + extern __shared__ T mem[]; + T *sh_in = mem; + T *sh_dout = &mem[block_x * block_y]; + + for (int i = 0; i < num_sequence; i++) { + int start = static_cast(batch_indices[i]); + int end = static_cast(batch_indices[i + 1]); + int current_timesteps = end - start; + int scaled_cur_steps = + ((current_timesteps + block_x - 1) / block_x) * block_x; + + for (int k = thy; k < scaled_cur_steps; k += block_x) { + int pos = start + k; + sh_in[thx * block_y + thy] = + (d < input_dim && pos < end) ? in[pos * input_dim + d] : 0.0; + __syncthreads(); + + for (int w = 0; w < future_context; w++) { + sh_dout[thx * block_y + thy] = + (d < input_dim && (k - w) >= 0 && (k - w) < current_timesteps) + ? dout[(pos - w) * input_dim + d] + : 0.0; + __syncthreads(); + + T val = sh_in[thy * block_y + thx] * sh_dout[thy * block_y + thx]; + __syncthreads(); + + for (int offset = 16; offset > 0; + offset = offset / 2) { // blockDim.x is 32. + val += __shfl_down(val, offset); + } + __syncthreads(); + + if (thx == 0 && (gx + thy) < input_dim) { + dfilter[w * input_dim + gx + thy] += val; + } + } + } + } +} + +} // namespace + +template +class RowConvKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *X = context.Input("X"); + auto *Filter = context.Input("Filter"); + auto *Out = context.Output("Out"); + + const T *in = X->data(); + const T *weight = Filter->data(); + T *out = Out->mutable_data(context.GetPlace()); + + auto batch_indices = X->lod()[0]; + int input_dim = X->dims()[1]; + int num_sequence = batch_indices.size() - 1; + int future_context = Filter->dims()[0]; + size_t *idx = batch_indices.data(); + auto stream = context.cuda_device_context().stream(); + + if (future_context <= 32) { + dim3 block_dim = dim3(32, 32); + dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); + int mem_per_block = (future_context * block_dim.x) * sizeof(T); + RowConvForwardSharedMemory< + T><<>>( + in, weight, num_sequence, input_dim, future_context, idx, out); + } else { + dim3 block_dim = dim3(32, 32); + dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); + RowConvForward<<>>( + in, weight, num_sequence, input_dim, future_context, idx, out); + } + } +}; + +template +class RowConvGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *X = context.Input("X"); + auto *Filter = context.Input("Filter"); + auto *dOut = context.Input(framework::GradVarName("Out")); + const T *in = X->data(); + const T *weights = Filter->data(); + const T *dout = dOut->data(); + + Tensor *dX = context.Output(framework::GradVarName("X")); + Tensor *dFilter = context.Output(framework::GradVarName("Filter")); + + auto batch_indices = X->lod()[0]; + int input_dim = X->dims()[1]; + int num_sequence = batch_indices.size() - 1; + int future_context = Filter->dims()[0]; + size_t *idx = batch_indices.data(); + + auto &device_ctx = context.cuda_device_context(); + math::SetConstant zero; + + if (dFilter) { + T *dfilter = dFilter->mutable_data(context.GetPlace()); + zero(device_ctx, dFilter, static_cast(0.0)); + + if (future_context <= 32) { + dim3 block_dim = dim3(32, 32); + dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); + int block_x = block_dim.x; + int block_y = block_dim.y; + int mem_per_block = + (block_y * block_x + block_y * (block_x + future_context - 1) + + future_context * block_y) * + sizeof(T); + RowConvGradFilterImproved< + T><<>>( + in, dout, num_sequence, input_dim, future_context, block_x, block_y, + idx, dfilter); + } else { + dim3 block_dim = dim3(32, 32); + dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); + int block_x = block_dim.x; + int block_y = block_dim.y; + int mem_per_block = + (block_x * block_y * 2) * sizeof(T); // For 2 arrays of size 32x32 + RowConvGradFilter< + T><<>>( + in, dout, num_sequence, input_dim, future_context, block_x, block_y, + idx, dfilter); + } + } + + if (dX) { + T *din = dX->mutable_data(context.GetPlace()); + if (future_context <= 32) { + dim3 block_dim = dim3(32, 32); + dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); + int mem_per_block = (future_context * block_dim.x) * sizeof(T); + RowConvGradInputSharedMemory< + T><<>>( + dout, weights, num_sequence, input_dim, future_context, idx, din); + } else { + dim3 block_dim = dim3(32, 32); + dim3 grid_dim = dim3(DivUp(input_dim, block_dim.x), 1); + RowConvGradInput<<>>( + dout, weights, num_sequence, input_dim, future_context, idx, din); + } + } + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + row_conv, ops::RowConvKernel); +REGISTER_OP_CUDA_KERNEL( + row_conv_grad, + ops::RowConvGradKernel); diff --git a/paddle/operators/row_conv_op.h b/paddle/operators/row_conv_op.h new file mode 100644 index 0000000000000000000000000000000000000000..80912ad8f73b3581efa9e263427e99304208d581 --- /dev/null +++ b/paddle/operators/row_conv_op.h @@ -0,0 +1,33 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class RowConvKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override; +}; + +template +class RowConvGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override; +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/save_op.cc b/paddle/operators/save_op.cc index 56909fb65f44ad00314103e21bee9535fbd59317..d4921cb80c8d78c52ae1887c36819b52621470eb 100644 --- a/paddle/operators/save_op.cc +++ b/paddle/operators/save_op.cc @@ -88,73 +88,7 @@ class SaveOp : public framework::OperatorBase { "SaveOp only support LoDTensor, %s has wrong type", iname); auto &tensor = var->Get(); - - { // the 1st field, uint32_t version - constexpr uint32_t version = 0; - fout.write(reinterpret_cast(&version), sizeof(version)); - } - { // the 2nd field, tensor description - // int32_t size - // void* protobuf message - framework::TensorDesc desc; - desc.set_data_type(framework::ToDataType(tensor.type())); - auto dims = framework::vectorize(tensor.dims()); - auto *pb_dims = desc.mutable_dims(); - pb_dims->Resize(static_cast(dims.size()), 0); - std::copy(dims.begin(), dims.end(), pb_dims->begin()); - int32_t size = desc.ByteSize(); - fout.write(reinterpret_cast(&size), sizeof(size)); - auto out = desc.SerializeAsString(); - fout.write(out.data(), size); - } - { // the 3rd field, tensor data - uint64_t size = tensor.memory_size(); - auto *data_ptr = tensor.data(); - PADDLE_ENFORCE(size < std::numeric_limits::max(), - "Index overflow when writing tensor"); - if (platform::is_gpu_place(tensor.place())) { -#ifdef PADDLE_WITH_CUDA - constexpr size_t kBufSize = 1024 * 1024 * 64; // 64MB - std::unique_ptr buf(new char[kBufSize]); - auto &gpu_dev_ctx = - static_cast(dev_ctx); - platform::CPUPlace cpu; - uintptr_t data = reinterpret_cast(data_ptr); - while (size != 0) { - size_t size_to_write = std::min(kBufSize, static_cast(size)); - memory::Copy(cpu, buf.get(), - boost::get(tensor.place()), - reinterpret_cast(data), size_to_write, - gpu_dev_ctx.stream()); - gpu_dev_ctx.Wait(); - fout.write(buf.get(), size_to_write); - data += size_to_write; - size -= size_to_write; - } -#else - PADDLE_THROW("Unexpected branch"); -#endif - } else { - fout.write(static_cast(data_ptr), - static_cast(size)); - } - } - { // the 4th field, lod information - // uint64_t lod_level - // uint64_t lod_level_1 size in byte. - // int* lod_level_1 data - // ... - auto lod = tensor.lod(); - uint64_t size = lod.size(); - fout.write(reinterpret_cast(&size), sizeof(size)); - - for (auto &each : lod) { - size = each.size() * sizeof(framework::LoD::value_type::value_type); - fout.write(reinterpret_cast(&size), sizeof(size)); - fout.write(reinterpret_cast(each.data()), - static_cast(size)); - } - } + framework::SerializeToStream(fout, tensor, dev_ctx); } }; diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index 5745580504fb9bda551f21665bff5c65ae82aeb9..d848be823e602e595f66138f4b5dfd6e38dd85a1 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -75,6 +75,8 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(scale, ops::ScaleOp, ops::ScaleOpMaker, ops::ScaleGradMaker); -REGISTER_OP_CPU_KERNEL(scale, - ops::ScaleKernel, - ops::ScaleKernel); +REGISTER_OP_CPU_KERNEL( + scale, ops::ScaleKernel, + ops::ScaleKernel, + ops::ScaleKernel, + ops::ScaleKernel); diff --git a/paddle/operators/scale_op.cu b/paddle/operators/scale_op.cu index 820fd4e6855bb192ec3292ea6983d5ecae73b6e6..0c7980430f31e2720c7af97aa14cf146c7dfc009 100644 --- a/paddle/operators/scale_op.cu +++ b/paddle/operators/scale_op.cu @@ -14,6 +14,10 @@ #include "paddle/operators/scale_op.h" -REGISTER_OP_GPU_KERNEL( - scale, paddle::operators::ScaleKernel, - paddle::operators::ScaleKernel); +REGISTER_OP_CUDA_KERNEL( + scale, + paddle::operators::ScaleKernel, + paddle::operators::ScaleKernel, + paddle::operators::ScaleKernel, + paddle::operators::ScaleKernel); diff --git a/paddle/operators/scale_op.h b/paddle/operators/scale_op.h index 4931294c9d3661f4c53798bd0895a5cd38ae4501..02a8c97a83f5b6f95bbd4079c453dfdc7b7c1481 100644 --- a/paddle/operators/scale_op.h +++ b/paddle/operators/scale_op.h @@ -19,7 +19,7 @@ namespace paddle { namespace operators { -template +template class ScaleKernel : public framework::OpKernel { public: virtual void Compute(const framework::ExecutionContext& context) const { @@ -31,7 +31,8 @@ class ScaleKernel : public framework::OpKernel { auto eigen_out = framework::EigenVector::Flatten(*tensor); auto eigen_in = framework::EigenVector::Flatten(*in); - auto& dev = context.GetEigenDevice(); + auto& dev = + *context.template device_context().eigen_device(); eigen_out.device(dev) = scale * eigen_in; } }; diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index ce4b794bc35aca0912d89a4ae81a9aa0c73a2104..573bbcd1875c86a2d843b6c5e9c1af4d48a5cb18 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -87,10 +87,15 @@ class ScatterOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Updates", "The updated value of updates op"); AddOutput("Out", "The output of add op"); AddComment(R"DOC( -Scatter Operator by selecting from the first axis, +Scatter Operator. -Out = Ref +This operator obtains output by updating the input on selected indices on the first axis: + +$$ +Out = Ref \\ Out[Index] = Ref[Index] + Updates +$$ + )DOC"); } }; diff --git a/paddle/operators/scatter_op.cu b/paddle/operators/scatter_op.cu index 3b32ae2fb77a5d3d4c558742ec469c74d15eee07..6b43a1389f98bf268cb3b70d7e61409f361e0063 100644 --- a/paddle/operators/scatter_op.cu +++ b/paddle/operators/scatter_op.cu @@ -59,5 +59,5 @@ class ScatterGradOpCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(scatter, ops::ScatterOpCUDAKernel); -REGISTER_OP_GPU_KERNEL(scatter_grad, ops::ScatterGradOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(scatter, ops::ScatterOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(scatter_grad, ops::ScatterGradOpCUDAKernel); diff --git a/paddle/operators/send_op.cc b/paddle/operators/send_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..a3059847f2d420359b347e3a5d514d8a3829a4e2 --- /dev/null +++ b/paddle/operators/send_op.cc @@ -0,0 +1,84 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include + +#include "paddle/framework/data_type.h" +#include "paddle/framework/framework.pb.h" +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/op_registry.h" + +#include "paddle/operators/detail/send_recv_impl.h" +#include "paddle/operators/detail/simple_block_queue.h" + +namespace paddle { +namespace operators { + +// TODO(typhoonzero): this is a simple implementation which only send +// one tensor +class SendOp : public framework::OperatorBase { + public: + SendOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) { + // init client when the operator is created at runtime. + if (!client_) { + std::string endpoint = Attr("endpoint"); + client_.reset(new detail::RPCClient( + grpc::CreateChannel(endpoint, grpc::InsecureChannelCredentials()))); + // TODO(typhoonzero): how to call InitVariables + } + } + void Run(const framework::Scope &scope, + const platform::DeviceContext &dev_ctx) const override { + auto iname = Input("X"); + auto oname = Output("Out"); + // TODO(typhoonzero): currently it's non-blocking, + // should block until server responds. + bool ret = client_->SendVariable(scope, iname, oname); + if (!ret) { + LOG(ERROR) << "send variable error"; + } + } + + protected: + std::shared_ptr client_{nullptr}; +}; + +class SendOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SendOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(Tensor) Input tensor to be saved"); + AddOutput("Out", "(Tensor) Output fetched from server"); + AddComment(R"DOC( +Recv operator + +This operator will recv tensor from send_op +)DOC"); + AddAttr("endpoint", + "(string, default 127.0.0.1:6164)" + "IP address to listen on.") + .SetDefault("127.0.0.1:6164") + .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(send, ops::SendOp, ops::SendOpMaker); diff --git a/paddle/operators/send_recv_op_test.cc b/paddle/operators/send_recv_op_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..3e2e2051afacb748877e3b0c3dec8d6662ac4e72 --- /dev/null +++ b/paddle/operators/send_recv_op_test.cc @@ -0,0 +1,128 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +// TODO(typhoonzero): add python bindings for this test as +// a RemoteOptimizer. + +#include +#include + +#include "gtest/gtest.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" +#include "paddle/framework/program_desc.h" + +USE_NO_KERNEL_OP(send); +USE_NO_KERNEL_OP(recv); +USE_OP(sum); + +// global for simplicity. +std::unique_ptr recv_op; + +void InitTensorsInScope(paddle::framework::Scope &scope, + paddle::platform::CPUPlace &place) { + paddle::platform::CPUDeviceContext ctx(place); + auto var = scope.Var("X"); + auto tensor = var->GetMutable(); + tensor->Resize({10, 10}); + float *expect = tensor->mutable_data(place); + for (int64_t i = 0; i < tensor->numel(); ++i) { + expect[i] = static_cast(i); + } + + auto out_var = scope.Var("Out"); + auto out_tensor = out_var->GetMutable(); + out_tensor->Resize({10, 10}); + tensor->mutable_data(place); // allocate +} + +void AddOp(const std::string &type, + const paddle::framework::VariableNameMap &inputs, + const paddle::framework::VariableNameMap &outputs, + paddle::framework::AttributeMap attrs, + paddle::framework::BlockDescBind *block) { + // insert output + for (auto kv : outputs) { + for (auto v : kv.second) { + auto var = block->Var(v); + var->SetDataType(paddle::framework::DataType::FP32); + } + } + + // insert op + auto op = block->AppendOp(); + op->SetType(type); + for (auto &kv : inputs) { + op->SetInput(kv.first, kv.second); + } + for (auto &kv : outputs) { + op->SetOutput(kv.first, kv.second); + } + op->SetAttrMap(attrs); +} + +void StartServerNet() { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + InitTensorsInScope(scope, place); + + // sub program run in recv_op, for simple test we use sum + paddle::framework::ProgramDescBind program; + paddle::framework::BlockDescBind *block = program.MutableBlock(0); + // X for server side tensors, RX for received tensers, must be of same shape. + AddOp("sum", {{"X", {"X", "RX"}}}, {{"Out", {"Out"}}}, {}, block); + + paddle::framework::AttributeMap attrs; + attrs.insert({"endpoint", std::string("127.0.0.1:6174")}); + std::string program_proto; + PADDLE_ENFORCE(program.Proto()->SerializeToString(&program_proto)); + + attrs.insert({"OptimizeProgram", program_proto}); + recv_op = paddle::framework::OpRegistry::CreateOp("recv", {{"RX", {"RX"}}}, + {{"Out", {"Out"}}}, attrs); + paddle::platform::CPUDeviceContext ctx(place); + recv_op->Run(scope, ctx); +} + +TEST(SendRecvOp, CPU) { + std::thread server_thread(StartServerNet); + sleep(5); // wait server to start + // local net + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + InitTensorsInScope(scope, place); + + paddle::framework::AttributeMap attrs; + attrs.insert({"endpoint", std::string("127.0.0.1:6174")}); + + auto send_op = paddle::framework::OpRegistry::CreateOp( + "send", {{"X", {"X"}}}, {{"Out", {"Out"}}}, attrs); + paddle::platform::CPUDeviceContext ctx(place); + send_op->Run(scope, ctx); + + auto in_var = scope.Var("X"); + auto tensor = in_var->GetMutable(); + float *expected = tensor->data(); + + auto out_var = scope.Var("Out"); + auto target = out_var->GetMutable(); + // send fail cause output is none. + EXPECT_NE(target->memory_size(), size_t(0)); + float *actual = target->data(); + for (int64_t i = 0; i < target->numel(); ++i) { + EXPECT_EQ(expected[i] * 2, actual[i]); + } + recv_op.reset(); // dtor can shutdown and join server thread. + server_thread.join(); +} diff --git a/paddle/operators/seq_expand_op.cc b/paddle/operators/seq_expand_op.cc index b862056ad400290a60e8a75a23dceeb1d4422ea4..ede9754697429a4d24c51cf494b0ea8f4e408b44 100644 --- a/paddle/operators/seq_expand_op.cc +++ b/paddle/operators/seq_expand_op.cc @@ -148,8 +148,9 @@ class SeqExpandOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(seq_expand, ops::SeqExpandOp, ops::SeqExpandOpMaker, seq_expand_grad, ops::SeqExpandOpGrad); -REGISTER_OP_CPU_KERNEL(seq_expand, - ops::SeqExpandKernel); +REGISTER_OP_CPU_KERNEL( + seq_expand, + ops::SeqExpandKernel); REGISTER_OP_CPU_KERNEL( seq_expand_grad, - ops::SeqExpandGradKernel); + ops::SeqExpandGradKernel); diff --git a/paddle/operators/seq_expand_op.cu b/paddle/operators/seq_expand_op.cu index f1e4b82a76e628c4d9fb83bc93f3dcfd2f98ea5b..8e67ce9ccb29497a957508a9ecdc6b810a7de543 100644 --- a/paddle/operators/seq_expand_op.cu +++ b/paddle/operators/seq_expand_op.cu @@ -16,8 +16,9 @@ #include "paddle/operators/seq_expand_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(seq_expand, - ops::SeqExpandKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( + seq_expand, + ops::SeqExpandKernel); +REGISTER_OP_CUDA_KERNEL( seq_expand_grad, - ops::SeqExpandGradKernel); + ops::SeqExpandGradKernel); diff --git a/paddle/operators/seq_expand_op.h b/paddle/operators/seq_expand_op.h index 4ef0d02cf85c43e95335660be65a67df66b4f55c..fbee0db454f9701e3f58a41008efd24e728d0600 100644 --- a/paddle/operators/seq_expand_op.h +++ b/paddle/operators/seq_expand_op.h @@ -23,7 +23,7 @@ namespace operators { using LoDTensor = framework::LoDTensor; -template +template class SeqExpandKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -37,7 +37,8 @@ class SeqExpandKernel : public framework::OpKernel { "The size of last lod level in Input(Y)" "must be equal to dims[0] of Input(X)."); out->set_lod(y->lod()); - auto place = context.GetEigenDevice(); + auto* place = + context.template device_context().eigen_device(); size_t element_len = framework::product(x_dims) / x_dims[0]; T* out_data = out->mutable_data(context.GetPlace()); auto out_starts = out->lod().back(); @@ -50,7 +51,7 @@ class SeqExpandKernel : public framework::OpKernel { Eigen::TensorMap> out_t(out_data, scale, element_len); Eigen::array cast({{scale, 1}}); - out_t.device(place) = x_t.broadcast(cast); + out_t.device(*place) = x_t.broadcast(cast); x_data += element_len; out_data += element_len * scale; } @@ -69,7 +70,7 @@ class SeqExpandKernel : public framework::OpKernel { * Grad(X).lod = Input(X).lod * * */ -template +template class SeqExpandGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -89,8 +90,9 @@ class SeqExpandGradKernel : public framework::OpKernel { d_out_t(d_out_data, static_cast(repeat), element_len); Eigen::TensorMap> d_x_t(d_x_data, static_cast(element_len)); - auto place = context.GetEigenDevice(); - d_x_t.device(place) = d_out_t.sum(Eigen::array({{0}})); + auto place = + context.template device_context().eigen_device(); + d_x_t.device(*place) = d_out_t.sum(Eigen::array({{0}})); d_out_data += (repeat * element_len); d_x_data += element_len; } diff --git a/paddle/operators/sequence_concat_op.cc b/paddle/operators/sequence_concat_op.cc index d1de0b444712a8c304c33bd194e306dfe3c41f02..9c7e5456e8238af70f920aaaa9cc652d5d12d3e9 100644 --- a/paddle/operators/sequence_concat_op.cc +++ b/paddle/operators/sequence_concat_op.cc @@ -129,7 +129,7 @@ REGISTER_OP(sequence_concat, ops::SequenceConcatOp, ops::SequenceConcatOpMaker, sequence_concat_grad, ops::SequenceConcatGradOp); REGISTER_OP_CPU_KERNEL( sequence_concat, - ops::SequenceConcatOpKernel); + ops::SequenceConcatOpKernel); REGISTER_OP_CPU_KERNEL( sequence_concat_grad, - ops::SequenceConcatGradOpKernel); + ops::SequenceConcatGradOpKernel); diff --git a/paddle/operators/sequence_concat_op.cu.cc b/paddle/operators/sequence_concat_op.cu.cc index 9ca99c2258f547e6f9c23be0d394bc3ea2bb6678..144bdb5af635b0cb75bcd1f654700041186dae46 100644 --- a/paddle/operators/sequence_concat_op.cu.cc +++ b/paddle/operators/sequence_concat_op.cu.cc @@ -15,9 +15,9 @@ limitations under the License. */ #include "paddle/operators/sequence_concat_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( sequence_concat, - ops::SequenceConcatOpKernel); -REGISTER_OP_GPU_KERNEL( - sequence_concat_grad, - ops::SequenceConcatGradOpKernel); + ops::SequenceConcatOpKernel); +REGISTER_OP_CUDA_KERNEL(sequence_concat_grad, + ops::SequenceConcatGradOpKernel< + paddle::platform::CUDADeviceContext, float>); diff --git a/paddle/operators/sequence_concat_op.h b/paddle/operators/sequence_concat_op.h index 09212070aa90b0f080f6140a312924229162aaec..8445224f46aba6110280783c9080ed4691266b8b 100644 --- a/paddle/operators/sequence_concat_op.h +++ b/paddle/operators/sequence_concat_op.h @@ -59,7 +59,7 @@ LoD ConcatLoD(const std::vector ins, const size_t level) { return out_lod; } -template +template class SequenceConcatOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -119,7 +119,7 @@ class SequenceConcatOpKernel : public framework::OpKernel { } }; -template +template class SequenceConcatGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { diff --git a/paddle/operators/sequence_conv_op.cc b/paddle/operators/sequence_conv_op.cc index c5533732d44737bb8cc71fd8ac46f3c36c72ada1..f5c4f1c13331f45183d2810a95f773ad52aca13b 100644 --- a/paddle/operators/sequence_conv_op.cc +++ b/paddle/operators/sequence_conv_op.cc @@ -179,9 +179,10 @@ REGISTER_OP(sequence_conv, ops::SequenceConvOp, ops::SequenceConvOpMaker, sequence_conv_grad, ops::SequenceConvGradOp); REGISTER_OP_CPU_KERNEL( - sequence_conv, ops::SequenceConvKernel, - ops::SequenceConvKernel); + sequence_conv, + ops::SequenceConvKernel, + ops::SequenceConvKernel); REGISTER_OP_CPU_KERNEL( sequence_conv_grad, - ops::SequenceConvGradKernel, - ops::SequenceConvGradKernel); + ops::SequenceConvGradKernel, + ops::SequenceConvGradKernel); diff --git a/paddle/operators/sequence_conv_op.cu.cc b/paddle/operators/sequence_conv_op.cu.cc index c8136dbcb35be4f1236dddc3d24546f9d91670c8..eacba79ace3e60a408d5f5e21a6fe2658da56ca7 100644 --- a/paddle/operators/sequence_conv_op.cu.cc +++ b/paddle/operators/sequence_conv_op.cu.cc @@ -15,10 +15,11 @@ #include "paddle/operators/sequence_conv_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - sequence_conv, ops::SequenceConvKernel, - ops::SequenceConvKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( + sequence_conv, + ops::SequenceConvKernel, + ops::SequenceConvKernel); +REGISTER_OP_CUDA_KERNEL( sequence_conv_grad, - ops::SequenceConvGradKernel, - ops::SequenceConvGradKernel); + ops::SequenceConvGradKernel, + ops::SequenceConvGradKernel); diff --git a/paddle/operators/sequence_conv_op.h b/paddle/operators/sequence_conv_op.h index b8fbe2647c4338a2fa16aa655ebab64dd8d5417d..bb584b7bfa5fb8f6eb0a452468d24ca034be6f1b 100644 --- a/paddle/operators/sequence_conv_op.h +++ b/paddle/operators/sequence_conv_op.h @@ -23,7 +23,7 @@ namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; -template +template class SequenceConvKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -56,21 +56,23 @@ class SequenceConvKernel : public framework::OpKernel { Tensor col; col.mutable_data(col_shape, context.GetPlace()); // Because if padding_trainable is false, padding data should be zeros. - math::SetConstant set_zero; - set_zero(context.device_context(), &col, static_cast(0)); + math::SetConstant set_zero; + auto& dev_ctx = context.template device_context(); + set_zero(dev_ctx, &col, static_cast(0)); - math::ContextProjectFunctor seq_project_functor; + math::ContextProjectFunctor seq_project_functor; - seq_project_functor(context.device_context(), *in, *padding_data, - padding_trainable, context_start, context_length, - context_stride, up_pad, down_pad, &col); + seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable, + context_start, context_length, context_stride, up_pad, + down_pad, &col); - math::matmul(context.device_context(), col, false, filter, false, - static_cast(1.0), out, static_cast(0.0)); + math::matmul(dev_ctx, col, false, filter, false, + static_cast(1.0), out, + static_cast(0.0)); } }; -template +template class SequenceConvGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -95,7 +97,8 @@ class SequenceConvGradKernel : public framework::OpKernel { int down_pad = std::max(0, context_start + context_length - 1); int sequence_width = static_cast(in->dims()[1]); - math::SetConstant set_zero; + math::SetConstant set_zero; + auto& dev_ctx = context.template device_context(); // use col_shape in the im2col calculation framework::DDim col_shape = {in->dims()[0], sequence_width * context_length}; @@ -104,38 +107,36 @@ class SequenceConvGradKernel : public framework::OpKernel { if (in_g || filter_g || (padding_trainable && padding_data_g)) { col.mutable_data(col_shape, context.GetPlace()); // Because if padding_trainable is false, padding data should be zeros. - set_zero(context.device_context(), &col, static_cast(0)); - math::matmul(context.device_context(), *out_g, false, *filter, - true, T(1.0), &col, T(1.0)); + set_zero(dev_ctx, &col, static_cast(0)); + math::matmul(dev_ctx, *out_g, false, *filter, true, + T(1.0), &col, T(1.0)); } - math::ContextProjectFunctor seq_project_functor; - math::ContextProjectGradFunctor seq_project_grad_functor; + math::ContextProjectFunctor seq_project_functor; + math::ContextProjectGradFunctor seq_project_grad_functor; if (in_g) { in_g->mutable_data(context.GetPlace()); in_g->set_lod(in->lod()); - set_zero(context.device_context(), in_g, static_cast(0)); + set_zero(dev_ctx, in_g, static_cast(0)); - seq_project_grad_functor(context.device_context(), *in_g, - padding_trainable, context_start, context_length, - context_stride, up_pad, down_pad, false, true, - padding_data_g, &col); + seq_project_grad_functor(dev_ctx, *in_g, padding_trainable, context_start, + context_length, context_stride, up_pad, down_pad, + false, true, padding_data_g, &col); } if (padding_trainable && padding_data_g) { padding_data_g->mutable_data(context.GetPlace()); - set_zero(context.device_context(), padding_data_g, static_cast(0)); + set_zero(dev_ctx, padding_data_g, static_cast(0)); LoDTensor* input = const_cast(in); - seq_project_grad_functor(context.device_context(), *input, - padding_trainable, context_start, context_length, - context_stride, up_pad, down_pad, true, false, - padding_data_g, &col); + seq_project_grad_functor( + dev_ctx, *input, padding_trainable, context_start, context_length, + context_stride, up_pad, down_pad, true, false, padding_data_g, &col); } if (filter_g) { filter_g->mutable_data(context.GetPlace()); - set_zero(context.device_context(), filter_g, static_cast(0)); + set_zero(dev_ctx, filter_g, static_cast(0)); Tensor filter_grad = *filter_g; LoDTensor out_grad = *out_g; @@ -145,12 +146,12 @@ class SequenceConvGradKernel : public framework::OpKernel { padding_data = context.Input("PaddingData"); } - seq_project_functor(context.device_context(), *in, *padding_data, - padding_trainable, context_start, context_length, - context_stride, up_pad, down_pad, &col); + seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable, + context_start, context_length, context_stride, up_pad, + down_pad, &col); - math::matmul(context.device_context(), col, true, out_grad, - false, T(1.0), &filter_grad, T(1.0)); + math::matmul(dev_ctx, col, true, out_grad, false, + T(1.0), &filter_grad, T(1.0)); } } }; diff --git a/paddle/operators/sequence_pool_op.cc b/paddle/operators/sequence_pool_op.cc index 2a000ac60b176737277605c3ac812ea65a0e03fc..3526e45a1b6565bc21413d381d15c02f08c587bd 100644 --- a/paddle/operators/sequence_pool_op.cc +++ b/paddle/operators/sequence_pool_op.cc @@ -58,12 +58,12 @@ Sequence Pool Operator. The SequencePoolOp pools features of all time-steps of each instance. It supports six pooling types: -1. AVERAGE: Out[i] = $$avg(X_i)$$ -2. SUM: Out[i] = $$\sum_jX_{ij}$$ -3. SQRT: Out[i] = $$\frac{\sum_jX_{ij}}{\sqrt{len(X_i)}}$$ +1. AVERAGE: $$Out[i] = \frac{\sum_i X_i}{N}$$ +2. SUM: $$Out[i] = \sum_jX_{ij}$$ +3. SQRT: $$Out[i] = \frac{\sum_jX_{ij}}{\sqrt{len(X_i)}}$$ 4. LAST: Out[i] = last instance in i-th sequence X[i] 5. FIRST: Out[i] = first instance in i-th sequence X[i] -6. MAX: Out[i] = $$max(X_i)$$ +6. MAX: $$Out[i] = max(X_i)$$ The following example explains how this works: For a mini-batch of 3 variable-length sentences, @@ -104,6 +104,7 @@ class SequencePoolGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(og_dims[i], x_dims[i], "The dimension mismatch."); } ctx->SetOutputDim(framework::GradVarName("X"), x_dims); + ctx->ShareLoD("X", framework::GradVarName("X")); } protected: @@ -122,7 +123,8 @@ namespace ops = paddle::operators; REGISTER_OP(sequence_pool, ops::SequencePoolOp, ops::SequencePoolOpMaker, sequence_pool_grad, ops::SequencePoolGradOp); REGISTER_OP_CPU_KERNEL( - sequence_pool, ops::SequencePoolKernel); + sequence_pool, + ops::SequencePoolKernel); REGISTER_OP_CPU_KERNEL( sequence_pool_grad, - ops::SequencePoolGradKernel); + ops::SequencePoolGradKernel); diff --git a/paddle/operators/sequence_pool_op.cu b/paddle/operators/sequence_pool_op.cu index 66850772d501f873cf754205c19e9d0c0090370a..fcd65084353744dc836ff1dc5a3aa4b03a205130 100644 --- a/paddle/operators/sequence_pool_op.cu +++ b/paddle/operators/sequence_pool_op.cu @@ -17,8 +17,9 @@ #include "paddle/operators/sequence_pool_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - sequence_pool, ops::SequencePoolKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( + sequence_pool, + ops::SequencePoolKernel); +REGISTER_OP_CUDA_KERNEL( sequence_pool_grad, - ops::SequencePoolGradKernel); + ops::SequencePoolGradKernel); diff --git a/paddle/operators/sequence_pool_op.h b/paddle/operators/sequence_pool_op.h index 7f136d8cf0e1eaae7b4de32988b60ae8a5034cc6..7519aa1d7208b9832f7a3d3afbc59a2eb4e8e13a 100644 --- a/paddle/operators/sequence_pool_op.h +++ b/paddle/operators/sequence_pool_op.h @@ -30,7 +30,7 @@ template using EigenMatrix = framework::EigenMatrix; -template +template class SequencePoolKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -54,17 +54,18 @@ class SequencePoolKernel : public framework::OpKernel { auto lod_level_0 = lod[0]; out->mutable_data(context.GetPlace()); - + auto& dev_ctx = context.template device_context(); if (pooltype == "MAX") { - math::MaxSeqPoolFunctor max_pool; + math::MaxSeqPoolFunctor max_pool; auto* index = context.Output("MaxIndex"); index->Resize({dims}); index->mutable_data(context.GetPlace()); - max_pool(context.device_context(), *in, out, index); + max_pool(dev_ctx, *in, out, index); return; } - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); for (int i = 0; i < static_cast(lod_level_0.size()) - 1; ++i) { Tensor in_t = in->Slice(static_cast(lod_level_0[i]), static_cast(lod_level_0[i + 1])); @@ -91,7 +92,7 @@ class SequencePoolKernel : public framework::OpKernel { } }; -template +template class SequencePoolGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -105,20 +106,23 @@ class SequencePoolGradKernel : public framework::OpKernel { int64_t w = in->numel() / dims[0]; in_g->mutable_data(context.GetPlace()); + auto& dev_ctx = context.template device_context(); if (pooltype == "MAX") { - math::MaxSeqPoolGradFunctor max_pool_grad; + math::MaxSeqPoolGradFunctor max_pool_grad; auto* index = context.Input("MaxIndex"); - max_pool_grad(context.device_context(), *out_g, *index, in_g); + max_pool_grad(dev_ctx, *out_g, *index, in_g); return; } if (pooltype == "LAST" || pooltype == "FIRST") { // set X@Grad be zero at first when pooltype is LAST/FIRST - math::SetConstant functor; - functor(context.device_context(), in_g, 0); + math::SetConstant functor; + functor(dev_ctx, in_g, 0); } - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); + for (int i = 0; i < static_cast(lod.size()) - 1; ++i) { auto in_g_t = in_g->Slice(static_cast(lod[i]), static_cast(lod[i + 1])); diff --git a/paddle/operators/sequence_slice_op.cc b/paddle/operators/sequence_slice_op.cc index 255683a572c0e8d54791cb0c905d85239920d992..481db8f9e548de68c102210035d4ff037ab56261 100644 --- a/paddle/operators/sequence_slice_op.cc +++ b/paddle/operators/sequence_slice_op.cc @@ -125,7 +125,7 @@ REGISTER_OP(sequence_slice, ops::SequenceSliceOp, ops::SequenceSliceOpMaker, sequence_slice_grad, ops::SequenceSliceGradOp); REGISTER_OP_CPU_KERNEL( sequence_slice, - ops::SequenceSliceOpKernel); + ops::SequenceSliceOpKernel); REGISTER_OP_CPU_KERNEL( sequence_slice_grad, - ops::SequenceSliceGradOpKernel); + ops::SequenceSliceGradOpKernel); diff --git a/paddle/operators/sequence_slice_op.cu b/paddle/operators/sequence_slice_op.cu index a9f59dadba74d900fa5cc0601fb5b264ea19e34d..43a21d619f4116874c329eb968f09dc230975c05 100755 --- a/paddle/operators/sequence_slice_op.cu +++ b/paddle/operators/sequence_slice_op.cu @@ -15,9 +15,9 @@ limitations under the License. */ #include "paddle/operators/sequence_slice_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( sequence_slice, - ops::SequenceSliceOpKernel); -REGISTER_OP_GPU_KERNEL( + ops::SequenceSliceOpKernel); +REGISTER_OP_CUDA_KERNEL( sequence_slice_grad, - ops::SequenceSliceGradOpKernel); + ops::SequenceSliceGradOpKernel); diff --git a/paddle/operators/sequence_slice_op.h b/paddle/operators/sequence_slice_op.h index 6411e0a46630beb0a9abb6aa5e517978b25a5254..14bcaebbb402cb47507f1bf60035bc2d37f9baf7 100644 --- a/paddle/operators/sequence_slice_op.h +++ b/paddle/operators/sequence_slice_op.h @@ -39,7 +39,7 @@ inline LoD SequenceSliceLoD(const T& in, const int64_t* offset_data, return out_lod; } -template +template class SequenceSliceOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -54,10 +54,10 @@ class SequenceSliceOpKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now."); PADDLE_ENFORCE_EQ( n, static_cast(length->dims()[0]), - "The size of input-sequence and length-array should be the same") + "The size of input-sequence and length-array should be the same"); PADDLE_ENFORCE_EQ( n, static_cast(offset->dims()[0]), - "The size of input-sequence and offset-array should be the same") + "The size of input-sequence and offset-array should be the same"); const int64_t* offset_data = offset->data(); const int64_t* length_data = length->data(); @@ -78,11 +78,11 @@ class SequenceSliceOpKernel : public framework::OpKernel { for (size_t i = 0; i < n; ++i) { PADDLE_ENFORCE_LT(0, offset_data[i], - "The offset[%d] must greater than zero.", i) + "The offset[%d] must greater than zero.", i); PADDLE_ENFORCE_LT(0, length_data[i], - "The length[%d] must greater than zero.", i) + "The length[%d] must greater than zero.", i); PADDLE_ENFORCE_LT(lod[0][i] + offset_data[i] + length_data[i], - lod[0][i + 1], "The target tensor's length overflow.") + lod[0][i + 1], "The target tensor's length overflow."); } out->mutable_data(ctx.GetPlace()); @@ -108,7 +108,7 @@ class SequenceSliceOpKernel : public framework::OpKernel { } }; -template +template class SequenceSliceGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -143,8 +143,9 @@ class SequenceSliceGradOpKernel : public framework::OpKernel { if (x_grad) { x_grad->mutable_data(ctx.GetPlace()); x_grad->set_lod(in->lod()); - math::SetConstant set_zero; - set_zero(ctx.device_context(), x_grad, static_cast(0)); + math::SetConstant set_zero; + set_zero(ctx.template device_context(), x_grad, + static_cast(0)); auto out_grad_stride = framework::stride(out_grad->dims()); diff --git a/paddle/operators/sequence_softmax_op.cc b/paddle/operators/sequence_softmax_op.cc index 32c15025660ebf0baf317e269a33c047e6844219..37d5452e6ba59411f9ab2e1460fc8584583f0321 100644 --- a/paddle/operators/sequence_softmax_op.cc +++ b/paddle/operators/sequence_softmax_op.cc @@ -99,7 +99,7 @@ REGISTER_OP(sequence_softmax, ops::SequenceSoftmaxOp, ops::SequenceSoftmaxGradOp); REGISTER_OP_CPU_KERNEL( sequence_softmax, - ops::SequenceSoftmaxKernel); + ops::SequenceSoftmaxKernel); REGISTER_OP_CPU_KERNEL( sequence_softmax_grad, - ops::SequenceSoftmaxGradKernel); + ops::SequenceSoftmaxGradKernel); diff --git a/paddle/operators/sequence_softmax_op.cu.cc b/paddle/operators/sequence_softmax_op.cu.cc index 7023795a3b5777c250a9323a304a54849d763e9e..5f65b4daf97cf025b975d2d95212375b5fca01f8 100644 --- a/paddle/operators/sequence_softmax_op.cu.cc +++ b/paddle/operators/sequence_softmax_op.cu.cc @@ -15,9 +15,9 @@ limitations under the License. */ #include "paddle/operators/sequence_softmax_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( sequence_softmax, - ops::SequenceSoftmaxKernel) -REGISTER_OP_GPU_KERNEL( + ops::SequenceSoftmaxKernel) +REGISTER_OP_CUDA_KERNEL( sequence_softmax_grad, - ops::SequenceSoftmaxGradKernel); + ops::SequenceSoftmaxGradKernel); diff --git a/paddle/operators/sequence_softmax_op.h b/paddle/operators/sequence_softmax_op.h index 1b68dd0662ddfffc57b187945fe131e202c55174..e889e88cb34719b6648e3032754645fbb2807741 100644 --- a/paddle/operators/sequence_softmax_op.h +++ b/paddle/operators/sequence_softmax_op.h @@ -23,7 +23,7 @@ namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; -template +template class SequenceSoftmaxKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -52,12 +52,13 @@ class SequenceSoftmaxKernel : public framework::OpKernel { framework::DDim dims_i = framework::make_ddim({1UL, end_pos - start_pos}); x_i.Resize(dims_i); out_i.Resize(dims_i); - math::SoftmaxFunctor()(ctx.device_context(), &x_i, &out_i); + math::SoftmaxFunctor()( + ctx.template device_context(), &x_i, &out_i); } } }; -template +template class SequenceSoftmaxGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -83,8 +84,9 @@ class SequenceSoftmaxGradKernel : public framework::OpKernel { out_i.Resize(dims_i); out_grad_i.Resize(dims_i); x_grad_i.Resize(dims_i); - math::SoftmaxGradFunctor()(ctx.device_context(), &out_i, - &out_grad_i, &x_grad_i); + math::SoftmaxGradFunctor()( + ctx.template device_context(), &out_i, &out_grad_i, + &x_grad_i); } } }; diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 72f4e4d5cbcd692423fa2a3e9ec8e7033b552c3c..121bf60b27c62c1b0dd4c34c12962b7098e29ae2 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -55,15 +55,15 @@ SGD operator This operator implements one step of the stochastic gradient descent algorithm. -$$param_out = param - learning_rate * grad$$ +$$param\_out = param - learning\_rate * grad$$ )DOC"); } }; template -struct SparseSGDFunctor { - void operator()(const platform::DeviceContext& context, +struct SparseSGDFunctor { + void operator()(const platform::CPUDeviceContext& context, const framework::SelectedRows& input, const framework::Tensor& learning_rate, framework::Tensor* output) { @@ -90,13 +90,14 @@ struct SparseSGDFunctor { } }; -template struct SparseSGDFunctor; -template struct SparseSGDFunctor; +template struct SparseSGDFunctor; +template struct SparseSGDFunctor; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(sgd, ops::SGDOp, ops::SGDOpMaker); -REGISTER_OP_CPU_KERNEL(sgd, ops::SGDOpKernel, - ops::SGDOpKernel); +REGISTER_OP_CPU_KERNEL( + sgd, ops::SGDOpKernel, + ops::SGDOpKernel); diff --git a/paddle/operators/sgd_op.cu b/paddle/operators/sgd_op.cu index 7b6c5ec30628b521b594ceaa3b7f1e0e03e497e4..a3c0db7e50ecaabd6d4b83c43e5436e6be491676 100644 --- a/paddle/operators/sgd_op.cu +++ b/paddle/operators/sgd_op.cu @@ -41,8 +41,8 @@ __global__ void SparseSGDFunctorKernel(const T* selected_rows, } // namespace template -struct SparseSGDFunctor { - void operator()(const platform::DeviceContext& context, +struct SparseSGDFunctor { + void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& input, const framework::Tensor& learning_rate, framework::Tensor* output) { @@ -62,21 +62,19 @@ struct SparseSGDFunctor { const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(1, in_rows.size()); - SparseSGDFunctorKernel< - T, 256><<(context) - .stream()>>>(in_data, in_rows.data(), - learning_rate.data(), out_data, - in_row_numel); + SparseSGDFunctorKernel<<>>( + in_data, in_rows.data(), learning_rate.data(), out_data, + in_row_numel); } }; -template struct SparseSGDFunctor; -template struct SparseSGDFunctor; +template struct SparseSGDFunctor; +template struct SparseSGDFunctor; } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(sgd, ops::SGDOpKernel, - ops::SGDOpKernel); +REGISTER_OP_CUDA_KERNEL( + sgd, ops::SGDOpKernel, + ops::SGDOpKernel); diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index 78b595fc6c63d775b627f23cafa9458f1dadd4e5..c920025a91cd0b68019bcb05558398093f31e206 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -20,15 +20,15 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template struct SparseSGDFunctor { - void operator()(const platform::DeviceContext& context, + void operator()(const DeviceContext& context, const framework::SelectedRows& input, const framework::Tensor& learning_rate, framework::Tensor* output); }; -template +template class SGDOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -46,7 +46,8 @@ class SGDOpKernel : public framework::OpKernel { auto g = framework::EigenVector::Flatten(*grad); auto o = framework::EigenVector::Flatten(*param_out); auto lr = framework::EigenVector::Flatten(*learning_rate); - auto place = ctx.GetEigenDevice(); + auto& place = + *ctx.template device_context().eigen_device(); Eigen::DSizes grad_dsize(grad->numel()); o.device(place) = p - lr.broadcast(grad_dsize) * g; @@ -56,8 +57,9 @@ class SGDOpKernel : public framework::OpKernel { // It's better to find a more elegant solution. PADDLE_ENFORCE_EQ(param, param_out); auto* grad = ctx.Input("Grad"); - SparseSGDFunctor functor; - functor(ctx.device_context(), *grad, *learning_rate, param_out); + SparseSGDFunctor functor; + functor(ctx.template device_context(), *grad, + *learning_rate, param_out); } else { PADDLE_THROW("Unsupported Variable Type of Grad"); } diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc index d9e40546523c60b0a7eec2e0593446258996ba58..b8a1bf122a78df1e0d8291c77a61b3f917d40960 100644 --- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -25,20 +25,19 @@ class SigmoidCrossEntropyWithLogitsOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Labels"), - "Input(Labels) should be not null."); + PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should be not null."); auto x_dims = ctx->GetInputDim("X"); - auto labels_dims = ctx->GetInputDim("Labels"); + auto labels_dims = ctx->GetInputDim("Label"); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); PADDLE_ENFORCE_EQ(labels_dims.size(), 2, - "Input(Labels)'s rank should be 2."); + "Input(Label)'s rank should be 2."); PADDLE_ENFORCE_EQ(x_dims[0], labels_dims[0], - "The 1st dimension of Input(X) and Input(Labels) should " + "The 1st dimension of Input(X) and Input(Label) should " "be equal."); PADDLE_ENFORCE_EQ(x_dims[1], labels_dims[1], - "The 2nd dimension of Input(X) and Input(Labels) should " + "The 2nd dimension of Input(X) and Input(Label) should " "be equal."); ctx->SetOutputDim("Out", x_dims); @@ -53,26 +52,25 @@ class SigmoidCrossEntropyWithLogitsGradOp void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Labels"), - "Input(Labels) should be not null."); + PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) shoudl be not null."); PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), "Output(X@GRAD) should be not null."); auto x_dims = ctx->GetInputDim("X"); - auto labels_dims = ctx->GetInputDim("Labels"); + auto labels_dims = ctx->GetInputDim("Label"); auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out")); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); PADDLE_ENFORCE_EQ(labels_dims.size(), 2, - "Input(Labels)'s rank should be 2."); + "Input(Label)'s rank should be 2."); PADDLE_ENFORCE_EQ(dout_dims.size(), 2, "Input(Out@Grad)'s rank should be 2."); PADDLE_ENFORCE_EQ(x_dims[0], labels_dims[0], - "The 1st dimension of Input(X) and Input(Labels) should " + "The 1st dimension of Input(X) and Input(Label) should " "be equal."); PADDLE_ENFORCE_EQ(x_dims[1], labels_dims[1], - "The 2nd dimension of Input(X) and Input(Labels) should " + "The 2nd dimension of Input(X) and Input(Label) should " "be equal."); PADDLE_ENFORCE_EQ(x_dims[0], dout_dims[0], "The 1st dimension of Input(X) and Input(Out@Grad) " @@ -97,7 +95,7 @@ class SigmoidCrossEntropyWithLogitsOpMaker "This input is a tensor of logits computed by the previous " " operator. Logits are unscaled log probabilities given as " "log(p/(1-p))."); - AddInput("Labels", + AddInput("Label", "(Tensor, default Tensor), a 2-D tensor of the same type " "and shape as X. This input is a tensor of probabalistic labels " "for each logit"); @@ -144,7 +142,7 @@ REGISTER_OP(sigmoid_cross_entropy_with_logits, ops::SigmoidCrossEntropyWithLogitsGradOp); REGISTER_OP_CPU_KERNEL(sigmoid_cross_entropy_with_logits, ops::SigmoidCrossEntropyWithLogitsKernel< - paddle::platform::CPUPlace, float>); + paddle::platform::CPUDeviceContext, float>); REGISTER_OP_CPU_KERNEL(sigmoid_cross_entropy_with_logits_grad, ops::SigmoidCrossEntropyWithLogitsGradKernel< - paddle::platform::CPUPlace, float>); + paddle::platform::CPUDeviceContext, float>); diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu index 32a39956a14a206373b7b4c141dad19577d171f0..1b569c93ed9568a26824defef0d25bb1c3dadad4 100644 --- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu @@ -16,9 +16,9 @@ #include "paddle/operators/sigmoid_cross_entropy_with_logits_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(sigmoid_cross_entropy_with_logits, - ops::SigmoidCrossEntropyWithLogitsKernel< - paddle::platform::GPUPlace, float>); -REGISTER_OP_GPU_KERNEL(sigmoid_cross_entropy_with_logits_grad, - ops::SigmoidCrossEntropyWithLogitsGradKernel< - paddle::platform::GPUPlace, float>); +REGISTER_OP_CUDA_KERNEL(sigmoid_cross_entropy_with_logits, + ops::SigmoidCrossEntropyWithLogitsKernel< + paddle::platform::CUDADeviceContext, float>); +REGISTER_OP_CUDA_KERNEL(sigmoid_cross_entropy_with_logits_grad, + ops::SigmoidCrossEntropyWithLogitsGradKernel< + paddle::platform::CUDADeviceContext, float>); diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.h b/paddle/operators/sigmoid_cross_entropy_with_logits_op.h index 41c619f181c878f08959a8ca461c60af5ffdff2a..8fe7c5ba8224f8dac5de8d7ee772ebc71f987d69 100644 --- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.h +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.h @@ -20,20 +20,19 @@ namespace paddle { namespace operators { // Out = max(X, 0) - X * Labels + log(1 + exp(-abs(X))) -template +template class SigmoidCrossEntropyWithLogitsKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { const framework::Tensor *X = context.Input("X"); - const framework::Tensor *Labels = - context.Input("Labels"); + const framework::Tensor *Labels = context.Input("Label"); framework::Tensor *Out = context.Output("Out"); Out->mutable_data(context.GetPlace()); auto x = framework::EigenVector::Flatten(*X); auto labels = framework::EigenVector::Flatten(*Labels); auto out = framework::EigenVector::Flatten(*Out); - auto place = context.GetEigenDevice(); + auto &place = *context.device_context().eigen_device(); // term1 = max(x, 0) auto term1 = x.cwiseMax(static_cast(0)); @@ -47,13 +46,12 @@ class SigmoidCrossEntropyWithLogitsKernel : public framework::OpKernel { }; // dX = sigmoid(X) - labels -template +template class SigmoidCrossEntropyWithLogitsGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { const framework::Tensor *X = context.Input("X"); - const framework::Tensor *Labels = - context.Input("Labels"); + const framework::Tensor *Labels = context.Input("Label"); const framework::Tensor *dOut = context.Input(framework::GradVarName("Out")); framework::Tensor *dX = @@ -64,7 +62,8 @@ class SigmoidCrossEntropyWithLogitsGradKernel : public framework::OpKernel { auto labels = framework::EigenVector::Flatten(*Labels); auto dout = framework::EigenVector::Flatten(*dOut); auto dx = framework::EigenVector::Flatten(*dX); - auto place = context.GetEigenDevice(); + auto &place = + *context.template device_context().eigen_device(); auto sigmoid_x = static_cast(1) / (static_cast(1) + (-x).exp()); dx.device(place) = dout * (sigmoid_x - labels); diff --git a/paddle/operators/sign_op.cc b/paddle/operators/sign_op.cc index 08bf2e4e7cc101a3bcc907d3b40ee82347b39f80..d5a7ccb77e7d9ad3a93702861dbab295c4ab5bce 100644 --- a/paddle/operators/sign_op.cc +++ b/paddle/operators/sign_op.cc @@ -67,5 +67,5 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(sign, ops::SignOp, ops::SignOpMaker, ops::SignGradMaker); -REGISTER_OP_CPU_KERNEL(sign, - ops::SignKernel); +REGISTER_OP_CPU_KERNEL( + sign, ops::SignKernel); diff --git a/paddle/operators/sign_op.cu b/paddle/operators/sign_op.cu index 4d0638cb97d84bf650fb23e4d2a201adc51a4b68..9bc1c65d214ba8f988dec3b7b11da9e1ec3a6581 100644 --- a/paddle/operators/sign_op.cu +++ b/paddle/operators/sign_op.cu @@ -14,5 +14,6 @@ #include "paddle/operators/sign_op.h" -REGISTER_OP_GPU_KERNEL( - sign, paddle::operators::SignKernel); +REGISTER_OP_CUDA_KERNEL( + sign, + paddle::operators::SignKernel); diff --git a/paddle/operators/sign_op.h b/paddle/operators/sign_op.h index ab5cd4bac019d602c63ea51629fb85fa7e206841..2e476ed6658491b3dcec3cf1388ccc4a0813449c 100644 --- a/paddle/operators/sign_op.h +++ b/paddle/operators/sign_op.h @@ -19,7 +19,7 @@ namespace paddle { namespace operators { -template +template class SignKernel : public framework::OpKernel { public: virtual void Compute(const framework::ExecutionContext& context) const { @@ -29,7 +29,8 @@ class SignKernel : public framework::OpKernel { auto eigen_out = framework::EigenVector::Flatten(*out); auto eigen_in = framework::EigenVector::Flatten(*in); - auto& place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); eigen_out.device(place) = eigen_in.sign(); } }; diff --git a/paddle/operators/smooth_l1_loss_op.cc b/paddle/operators/smooth_l1_loss_op.cc index ebf7b43700a7498aa18b5f648b0b8c2c4e7b442b..56e8d9058fcc035c28e74daff778c4e034f46b44 100644 --- a/paddle/operators/smooth_l1_loss_op.cc +++ b/paddle/operators/smooth_l1_loss_op.cc @@ -22,22 +22,20 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized."); - PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized."); + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null."); auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims, "The shape of X and Y must be the same."); + PADDLE_ENFORCE_EQ(x_dims, y_dims); PADDLE_ENFORCE_GE(x_dims.size(), 2, - "The tensor rank of X must be at least 2."); + "The tensor rank of Input(X) should not be less than 2."); if (ctx->HasInput("InsideWeight")) { PADDLE_ENFORCE(ctx->HasInput("OutsideWeight"), "If weights are provided, must specify both " "inside and outside weights."); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("InsideWeight"), x_dims, - "The shape of InsideWeight must be same as X."); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("OutsideWeight"), x_dims, - "The shape of OutsideWeight must be same as X."); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("InsideWeight"), x_dims); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("OutsideWeight"), x_dims); } ctx->SetOutputDim("Diff", x_dims); @@ -53,25 +51,29 @@ class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", - "The input tensor of smooth l1 loss op." - "The rank should be greater or equal to 2 with shape " - "[batch_size, value_dim1, value_dim2, ..., value_dimN]"); + "(Tensor, default Tensor) A tensor with rank at least 2. " + "The input value of smooth l1 loss op with shape " + "[batch_size, dim1, ..., dimN]."); AddInput("Y", - "The target tensor of smooth l1 loss op " - "with the same shape as X."); + "(Tensor, default Tensor) A tensor with rank at least 2. " + "The target value of smooth l1 loss op with same shape as X."); AddInput("InsideWeight", - "Optional input tensor of smooth l1 loss op with the same shape " - "as X. If provided, the result of (X - Y) will be multiplied " + "(Tensor, default Tensor) A tensor with rank at least 2. " + "This input is optional and should have same shape with X. " + "If provided, the result of (X - Y) will be multiplied " "by this tensor element by element.") .AsDispensable(); AddInput("OutsideWeight", - "Optinal input of smooth l1 loss op with the same shape as X." - "If provided, the output smooth l1 loss will be multiplied by " - "this tensor element by element.") + "(Tensor, default Tensor) A tensor with rank at least 2. " + "This input is optional and should have same shape with X. " + "If provided, the out smooth l1 loss will be multiplied by this " + "tensor element by element.") .AsDispensable(); - AddOutput("Diff", "Intermediate variable to cache InsideWeight*(X-Y).") + AddOutput("Diff", "Intermediate variable to cache InsideWeight * (X - Y).") .AsIntermediate(); - AddOutput("Out", "Smooth l1 loss."); + AddOutput("Out", + "(Tensor, default Tensor) A tensor with rank be 2. " + "The output smooth l1 loss with shape [batch_size, 1]."); AddAttr("sigma", "Hyper parameter of smooth l1 loss op." "A float scalar with default value 3.0.") @@ -79,15 +81,23 @@ class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Smooth L1 Loss Operator. -This operator computes the smooth l1 loss for input and target. -The operator takes the first dimension of input as the batch size. +This operator computes the smooth l1 loss for X and Y. +The operator takes the first dimension of X and Y as batch size. For each instance, it computes the smooth l1 loss element by element first -and then sums all the losses. So the resulting output shape -is [batch_size, 1]. +and then sums all the losses. So the shape of Out is [batch_size, 1]. The equation is: -loss = $$0.5 * (\sigma * (x-y))^2$$ if $$|x - y| < 1 /({\sigma}^2)$$ - $$\frac{|x - y| - 0.5}{{\sigma}^2}$$ otherwise +$$ +Out_{\sigma}(X, Y)_i = \begin{cases} +0.5 * (\sigma * (X_i - Y_i)) ^ 2 +\quad |X_i - Y_i| \lt \frac{1} {{\sigma} ^ 2} \\ +\frac{|X_i - Y_i| - 0.5}{{\sigma}^2}, +\quad otherwise +\end{cases} +$$ + +In the above equation, $Out_{\sigma}(X, Y)_i$, $X_i$ and $Y_i$ represent the ith +element of Out, X and Y. )DOC"); } @@ -128,7 +138,8 @@ REGISTER_OP(smooth_l1_loss, ops::SmoothL1LossOp, ops::SmoothL1LossOpMaker, smooth_l1_loss_grad, ops::SmoothL1LossGradOp); REGISTER_OP_CPU_KERNEL( - smooth_l1_loss, ops::SmoothL1LossKernel); + smooth_l1_loss, + ops::SmoothL1LossKernel); REGISTER_OP_CPU_KERNEL( smooth_l1_loss_grad, - ops::SmoothL1LossGradKernel); + ops::SmoothL1LossGradKernel); diff --git a/paddle/operators/smooth_l1_loss_op.cu b/paddle/operators/smooth_l1_loss_op.cu index 1c3172f43867741cd1f26979a366b2425f326321..8e94ebac644d1047920827250c4313c657b22ea0 100644 --- a/paddle/operators/smooth_l1_loss_op.cu +++ b/paddle/operators/smooth_l1_loss_op.cu @@ -17,8 +17,9 @@ #include "paddle/operators/smooth_l1_loss_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - smooth_l1_loss, ops::SmoothL1LossKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( + smooth_l1_loss, + ops::SmoothL1LossKernel); +REGISTER_OP_CUDA_KERNEL( smooth_l1_loss_grad, - ops::SmoothL1LossGradKernel); + ops::SmoothL1LossGradKernel); diff --git a/paddle/operators/smooth_l1_loss_op.h b/paddle/operators/smooth_l1_loss_op.h index 39d0070b6c8909b8f433de48038240e851d9d6cf..1a70c9c63c340d66b6bf0db97cc8ab35a663f816 100644 --- a/paddle/operators/smooth_l1_loss_op.h +++ b/paddle/operators/smooth_l1_loss_op.h @@ -44,7 +44,7 @@ struct SmoothL1LossForward { T sigma2; }; -template +template class SmoothL1LossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -57,7 +57,8 @@ class SmoothL1LossKernel : public framework::OpKernel { out0->mutable_data(context.GetPlace()); out1->mutable_data(context.GetPlace()); - auto place = context.GetEigenDevice(); + auto* place = + context.template device_context().eigen_device(); auto sigma = static_cast(context.Attr("sigma")); T sigma2 = sigma * sigma; @@ -67,12 +68,12 @@ class SmoothL1LossKernel : public framework::OpKernel { auto y = EigenVector::Flatten(*in1); auto diff = EigenVector::Flatten(*out0); - diff.device(place) = x - y; + diff.device(*place) = x - y; // multiply inside weight if (has_weight) { auto inside_weight = EigenVector::Flatten(*in2); // cache diff, reused in bp - diff.device(place) = diff * inside_weight; + diff.device(*place) = diff * inside_weight; } auto in_counts = in0->numel(); @@ -81,12 +82,12 @@ class SmoothL1LossKernel : public framework::OpKernel { context.GetPlace()); auto errors = EigenVector::Flatten(ptensor_errors); // apply smooth l1 forward - errors.device(place) = diff.unaryExpr(SmoothL1LossForward(sigma2)); + errors.device(*place) = diff.unaryExpr(SmoothL1LossForward(sigma2)); // multiply outside weight if (has_weight) { auto outside_weight = EigenVector::Flatten(*in3); - errors.device(place) = errors * outside_weight; + errors.device(*place) = errors * outside_weight; } auto loss = EigenVector::Flatten(*out1); // first dimension of 'X' is the number of samples @@ -94,7 +95,7 @@ class SmoothL1LossKernel : public framework::OpKernel { framework::make_ddim({static_cast(in0->dims()[0]), static_cast(in_counts / in0->dims()[0])}); auto errors_mat_view = EigenMatrix::From(ptensor_errors, mat_dims); - loss.device(place) = errors_mat_view.sum(Eigen::array({{1}})); + loss.device(*place) = errors_mat_view.sum(Eigen::array({{1}})); } }; @@ -114,7 +115,7 @@ struct SmoothL1LossBackward { T sigma2; }; -template +template class SmoothL1LossGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -126,7 +127,8 @@ class SmoothL1LossGradKernel : public framework::OpKernel { T sigma2 = sigma * sigma; bool has_weight = (in0 != nullptr) && (in1 != nullptr); - auto place = context.GetEigenDevice(); + auto* place = + context.template device_context().eigen_device(); auto in_dims = in2->dims(); auto counts = in2->numel(); @@ -139,7 +141,7 @@ class SmoothL1LossGradKernel : public framework::OpKernel { context.GetPlace()); auto diff = EigenVector::Flatten(ptensor_diff); // apply smooth l1 backwoard - diff.device(place) = EigenVector::Flatten(*in2).unaryExpr( + diff.device(*place) = EigenVector::Flatten(*in2).unaryExpr( SmoothL1LossBackward(sigma2)); // compute weights @@ -147,11 +149,11 @@ class SmoothL1LossGradKernel : public framework::OpKernel { ptensor_weights.mutable_data(mat_dims, context.GetPlace()); auto weights = EigenMatrix::From(ptensor_weights); // initialize to 1.0 - weights.device(place) = weights.constant(static_cast(1.0)); + weights.device(*place) = weights.constant(static_cast(1.0)); if (has_weight) { auto inside_weight = EigenMatrix::From(*in0, mat_dims); auto outside_weight = EigenMatrix::From(*in1, mat_dims); - weights.device(place) = inside_weight * outside_weight; + weights.device(*place) = inside_weight * outside_weight; } // compute gradients @@ -167,13 +169,13 @@ class SmoothL1LossGradKernel : public framework::OpKernel { if (out0) { out0->mutable_data(context.GetPlace()); auto x_grad = EigenMatrix::From(*out0, mat_dims); - x_grad.device(place) = gradients; + x_grad.device(*place) = gradients; } if (out1) { out1->mutable_data(context.GetPlace()); auto y_grad = EigenMatrix::From(*out1, mat_dims); - y_grad.device(place) = -1 * gradients; + y_grad.device(*place) = -1 * gradients; } } }; diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 93e0525badc26808f0dca70cc1153ac728f1fe9c..0988c83d43535d7ee1bcef87bf506e5db1a3ecc0 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -89,7 +89,8 @@ namespace ops = paddle::operators; REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, softmax_grad, ops::SoftmaxOpGrad); -REGISTER_OP_CPU_KERNEL(softmax, - ops::SoftmaxKernel); REGISTER_OP_CPU_KERNEL( - softmax_grad, ops::SoftmaxGradKernel); + softmax, ops::SoftmaxKernel); +REGISTER_OP_CPU_KERNEL( + softmax_grad, + ops::SoftmaxGradKernel); diff --git a/paddle/operators/softmax_op.cu.cc b/paddle/operators/softmax_op.cu.cc index 013ace19ae3d4a1af29b570ba33fea3e4595fe5b..7b9882cbcfe1a0381541386f76867c6bb0f1fe55 100644 --- a/paddle/operators/softmax_op.cu.cc +++ b/paddle/operators/softmax_op.cu.cc @@ -16,7 +16,8 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(softmax, - ops::SoftmaxKernel); -REGISTER_OP_GPU_KERNEL( - softmax_grad, ops::SoftmaxGradKernel); +REGISTER_OP_CUDA_KERNEL( + softmax, ops::SoftmaxKernel); +REGISTER_OP_CUDA_KERNEL( + softmax_grad, + ops::SoftmaxGradKernel); diff --git a/paddle/operators/softmax_op.h b/paddle/operators/softmax_op.h index 44d1e63f1bb4798144218cd1caf01f133825bcff..0f8998b99e93b5ed6c9b43ad7adabc2d515c1ff1 100644 --- a/paddle/operators/softmax_op.h +++ b/paddle/operators/softmax_op.h @@ -21,7 +21,7 @@ namespace operators { using Tensor = framework::Tensor; -template +template class SoftmaxKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -31,11 +31,12 @@ class SoftmaxKernel : public framework::OpKernel { // allocate memory on device. Y->mutable_data(context.GetPlace()); - math::SoftmaxFunctor()(context.device_context(), X, Y); + math::SoftmaxFunctor()( + context.template device_context(), X, Y); } }; -template +template class SoftmaxGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -46,7 +47,8 @@ class SoftmaxGradKernel : public framework::OpKernel { // allocate memory on device. dX->mutable_data(context.GetPlace()); - math::SoftmaxGradFunctor()(context.device_context(), Y, dY, dX); + math::SoftmaxGradFunctor()( + context.template device_context(), Y, dY, dX); } }; diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index fc027d6f95cdbc24af59ef1188b6f16f6a93e85c..0c302288637ad1713e133d37faa0fb338e1f7022 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/softmax_with_cross_entropy_op.h" -#include namespace paddle { namespace operators { diff --git a/paddle/operators/softmax_with_cross_entropy_op.cu b/paddle/operators/softmax_with_cross_entropy_op.cu index b1faddac3fd21aaf817caf9d3e57e664f4e0e2d5..6100c63f9aba006d9739173a8a5a2fb398187e55 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/operators/softmax_with_cross_entropy_op.cu @@ -69,10 +69,10 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { softmax->mutable_data(context.GetPlace()); loss->mutable_data(context.GetPlace()); - math::SoftmaxFunctor()(context.device_context(), - logits, softmax); - math::CrossEntropyFunctor()( - context.device_context(), loss, softmax, labels, + math::SoftmaxFunctor()( + context.cuda_device_context(), logits, softmax); + math::CrossEntropyFunctor()( + context.cuda_device_context(), loss, softmax, labels, context.Attr("soft_label")); } }; @@ -98,18 +98,18 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { if (context.Attr("soft_label")) { const T* label_data = labels->data(); - SoftCrossEntropyGradientKernel<<< - grid, block, 0, reinterpret_cast( - context.device_context()) - .stream()>>>(logit_grad_data, loss_grad_data, - label_data, batch_size, class_num); + SoftCrossEntropyGradientKernel< + T><<() + .stream()>>>(logit_grad_data, loss_grad_data, label_data, + batch_size, class_num); } else { const int64_t* label_data = labels->data(); - CrossEntropyGrad<<< - grid, block, 0, reinterpret_cast( - context.device_context()) - .stream()>>>(logit_grad_data, loss_grad_data, - label_data, batch_size, class_num); + CrossEntropyGrad< + T><<() + .stream()>>>(logit_grad_data, loss_grad_data, label_data, + batch_size, class_num); } } }; @@ -118,9 +118,9 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(softmax_with_cross_entropy, - ops::SoftmaxWithCrossEntropyCUDAKernel, - ops::SoftmaxWithCrossEntropyCUDAKernel); -REGISTER_OP_GPU_KERNEL(softmax_with_cross_entropy_grad, - ops::SoftmaxWithCrossEntropyGradCUDAKernel, - ops::SoftmaxWithCrossEntropyGradCUDAKernel); +REGISTER_OP_CUDA_KERNEL(softmax_with_cross_entropy, + ops::SoftmaxWithCrossEntropyCUDAKernel, + ops::SoftmaxWithCrossEntropyCUDAKernel); +REGISTER_OP_CUDA_KERNEL(softmax_with_cross_entropy_grad, + ops::SoftmaxWithCrossEntropyGradCUDAKernel, + ops::SoftmaxWithCrossEntropyGradCUDAKernel); diff --git a/paddle/operators/softmax_with_cross_entropy_op.h b/paddle/operators/softmax_with_cross_entropy_op.h index c4ab3f74b4b07d13957d99e01aa4868fac719f61..9c3431605b2f2285b2e7d71c5ff2f4a53c6c6f30 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.h +++ b/paddle/operators/softmax_with_cross_entropy_op.h @@ -40,11 +40,12 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { softmax->mutable_data(context.GetPlace()); loss->mutable_data(context.GetPlace()); - math::SoftmaxFunctor()(context.device_context(), - logits, softmax); - math::CrossEntropyFunctor()( - context.device_context(), loss, softmax, labels, - context.Attr("soft_label")); + auto& dev_ctx = + context.template device_context(); + math::SoftmaxFunctor()(dev_ctx, logits, + softmax); + math::CrossEntropyFunctor()( + dev_ctx, loss, softmax, labels, context.Attr("soft_label")); } }; @@ -62,14 +63,15 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { const int class_num = logit_grad->dims()[1]; auto out_grad_mat = EigenMatrix::From(*out_grad); auto logit_grad_mat = EigenMatrix::From(*logit_grad); - + auto& place = *context.template device_context() + .eigen_device(); if (context.Attr("soft_label")) { auto lbl_mat = EigenMatrix::From(*labels); - logit_grad_mat.device(context.GetEigenDevice()) = + logit_grad_mat.device(place) = out_grad_mat.broadcast(Eigen::DSizes(1, class_num)) * (logit_grad_mat - lbl_mat); } else { - logit_grad_mat.device(context.GetEigenDevice()) = + logit_grad_mat.device(place) = logit_grad_mat * out_grad_mat.broadcast(Eigen::DSizes(1, class_num)); diff --git a/paddle/operators/split_op.cu.cc b/paddle/operators/split_op.cu.cc index 93d1fc3c44cbc146c945c51af1abe6494572d1ae..dbad0bbf68d7924cfba80721bb3294b7e0cfac00 100644 --- a/paddle/operators/split_op.cu.cc +++ b/paddle/operators/split_op.cu.cc @@ -14,5 +14,5 @@ limitations under the License. */ #include "paddle/operators/split_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(split, - ops::SplitOpKernel); +REGISTER_OP_CUDA_KERNEL( + split, ops::SplitOpKernel); diff --git a/paddle/operators/split_op.h b/paddle/operators/split_op.h index fa26e5f677b18c84b45dd583004d02cab4c1d375..a38c435d531d7da2a1a60eb2c455bc1782c1cd4c 100644 --- a/paddle/operators/split_op.h +++ b/paddle/operators/split_op.h @@ -21,7 +21,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template +template class SplitOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index bec2a2c18ae8da892ee7d71f45afe53c887c0f57..50bc6da196e642e3860874cfb883390dd2e93215 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -115,7 +115,7 @@ REGISTER_OP(squared_l2_distance, ops::SquaredL2DistanceOp, ops::SquaredL2DistanceGradOp); REGISTER_OP_CPU_KERNEL( squared_l2_distance, - ops::SquaredL2DistanceKernel); -REGISTER_OP_CPU_KERNEL( - squared_l2_distance_grad, - ops::SquaredL2DistanceGradKernel); + ops::SquaredL2DistanceKernel); +REGISTER_OP_CPU_KERNEL(squared_l2_distance_grad, + ops::SquaredL2DistanceGradKernel< + paddle::platform::CPUDeviceContext, float>); diff --git a/paddle/operators/squared_l2_distance_op.cu b/paddle/operators/squared_l2_distance_op.cu index 3fe62f1a9cb56722ea544b0fed052ac384e799aa..ecc82ed1e49501b05e0cf54e5b44114db150a427 100644 --- a/paddle/operators/squared_l2_distance_op.cu +++ b/paddle/operators/squared_l2_distance_op.cu @@ -17,9 +17,9 @@ #include "paddle/operators/squared_l2_distance_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( squared_l2_distance, - ops::SquaredL2DistanceKernel); -REGISTER_OP_GPU_KERNEL( - squared_l2_distance_grad, - ops::SquaredL2DistanceGradKernel); + ops::SquaredL2DistanceKernel); +REGISTER_OP_CUDA_KERNEL(squared_l2_distance_grad, + ops::SquaredL2DistanceGradKernel< + paddle::platform::CUDADeviceContext, float>); diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h index 259ef4029646914f83a112b9c6d7fdf8401483f6..5bd5f4819a35966b73038f433d38c06031e18715 100644 --- a/paddle/operators/squared_l2_distance_op.h +++ b/paddle/operators/squared_l2_distance_op.h @@ -27,7 +27,7 @@ template using EigenMatrix = framework::EigenMatrix; -template +template class SquaredL2DistanceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -51,7 +51,8 @@ class SquaredL2DistanceKernel : public framework::OpKernel { auto sub_result = EigenMatrix::From(*out0); auto z = EigenVector::Flatten(*out1); - auto place = context.GetEigenDevice(); + auto& place = + *context.template device_context().eigen_device(); auto x_dims = x.dimensions(); auto y_dims = y.dimensions(); // buffer the substraction result @@ -67,7 +68,7 @@ class SquaredL2DistanceKernel : public framework::OpKernel { } }; -template +template class SquaredL2DistanceGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -89,7 +90,8 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { sub_result; // propagate back to input - auto eigen_place = context.GetEigenDevice(); + auto& eigen_place = + *context.template device_context().eigen_device(); if (x_g) { x_g->mutable_data(context.GetPlace()); // eigen matrix diff --git a/paddle/operators/squared_l2_norm_op.cc b/paddle/operators/squared_l2_norm_op.cc index 3c10e6159f44bc8c21b1e79aefaa962c7a2b64ed..3cff61a02f71fadf99f73787e2b2c179f7d441a8 100644 --- a/paddle/operators/squared_l2_norm_op.cc +++ b/paddle/operators/squared_l2_norm_op.cc @@ -72,7 +72,7 @@ REGISTER_OP(squared_l2_norm, ops::SquaredL2NormOp, ops::SquaredL2NormOpMaker, squared_l2_norm_grad, ops::SquaredL2NormGradOp); REGISTER_OP_CPU_KERNEL( squared_l2_norm, - ops::SquaredL2NormKernel); + ops::SquaredL2NormKernel); REGISTER_OP_CPU_KERNEL( squared_l2_norm_grad, - ops::SquaredL2NormGradKernel); + ops::SquaredL2NormGradKernel); diff --git a/paddle/operators/squared_l2_norm_op.cu b/paddle/operators/squared_l2_norm_op.cu index d384e9c28c9150fa901404478739ff809f29126f..2d6567d090a96a43cbda203fb8176041d719e55f 100644 --- a/paddle/operators/squared_l2_norm_op.cu +++ b/paddle/operators/squared_l2_norm_op.cu @@ -16,9 +16,9 @@ #include "paddle/operators/squared_l2_norm_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( squared_l2_norm, - ops::SquaredL2NormKernel); -REGISTER_OP_GPU_KERNEL( + ops::SquaredL2NormKernel); +REGISTER_OP_CUDA_KERNEL( squared_l2_norm_grad, - ops::SquaredL2NormGradKernel); + ops::SquaredL2NormGradKernel); diff --git a/paddle/operators/squared_l2_norm_op.h b/paddle/operators/squared_l2_norm_op.h index 48d7b1c2d56882f04330dbf27b0a92e37cb8874c..0ced7e7d70ab3627a337d70890db6842ba0f7768 100644 --- a/paddle/operators/squared_l2_norm_op.h +++ b/paddle/operators/squared_l2_norm_op.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { // Out = sum(square(X)) -template +template class SquaredL2NormKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -30,14 +30,15 @@ class SquaredL2NormKernel : public framework::OpKernel { auto x = framework::EigenVector::Flatten(*X); auto out = framework::EigenScalar::From(*Out); - auto place = context.GetEigenDevice(); + auto *place = + context.template device_context().eigen_device(); - out.device(place) = x.square().sum(); + out.device(*place) = x.square().sum(); } }; // dX = X -template +template class SquaredL2NormGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -53,10 +54,11 @@ class SquaredL2NormGradKernel : public framework::OpKernel { auto x = framework::EigenVector::Flatten(*X); auto dout = framework::EigenVector::Flatten(*dOut); auto dx = framework::EigenVector::Flatten(*dX); - auto place = context.GetEigenDevice(); + auto *place = + context.template device_context().eigen_device(); Eigen::DSizes x_dsize(X->numel()); - dx.device(place) = (dout.broadcast(x_dsize) * x) * static_cast(2.0); + dx.device(*place) = (dout.broadcast(x_dsize) * x) * static_cast(2.0); } }; diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index ddc210c26e69566fef9baa20f49ba1052e993b3f..cd52672f78e3e5826e8dfff92fb8e4668c5c6dcd 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -37,10 +37,16 @@ class SumOp : public framework::OperatorWithKernel { size_t N = x_dims.size(); PADDLE_ENFORCE_GT(N, 1, "Input tensors count should > 1."); - auto in_dim = x_dims[0]; - for (size_t i = 1; i < N; i++) { - auto dim = x_dims[i]; - PADDLE_ENFORCE_EQ(in_dim, dim, "Input tensors must have same shape"); + framework::DDim in_dim({0}); + for (auto& x_dim : x_dims) { + if (framework::product(x_dim) == 0) { + continue; + } + if (framework::product(in_dim) == 0) { + in_dim = x_dim; + } else { + PADDLE_ENFORCE_EQ(in_dim, x_dim, "Input tensors must have same shape"); + } } ctx->SetOutputDim("Out", in_dim); ctx->ShareLoD("X", /*->*/ "Out"); @@ -51,9 +57,23 @@ class SumOp : public framework::OperatorWithKernel { const framework::ExecutionContext& ctx) const override { auto x_vars = ctx.MultiInputVar("X"); if (x_vars[0]->IsType()) { - return framework::OpKernelType( - framework::ToDataType(x_vars[0]->Get().type()), - ctx.device_context()); + int dtype = -1; + for (auto& x_var : x_vars) { + auto& lod_tensor = x_var->Get(); + if (lod_tensor.numel() == 0) { + continue; + } + if (dtype == -1) { + dtype = framework::ToDataType(lod_tensor.type()); + } else { + PADDLE_ENFORCE_EQ(dtype, framework::ToDataType(lod_tensor.type())); + } + } + PADDLE_ENFORCE_NE(dtype, -1, + "Sum operator should have at least one tensor"); + + return framework::OpKernelType(static_cast(dtype), + ctx.device_context()); } else if (x_vars[0]->IsType()) { return framework::OpKernelType( framework::ToDataType( @@ -175,7 +195,8 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(sum, ops::SumOp, ops::SumOpMaker, ops::SumGradMaker, ops::SumOpVarTypeInference); -REGISTER_OP_CPU_KERNEL(sum, ops::SumKernel, - ops::SumKernel, - ops::SumKernel, - ops::SumKernel); +REGISTER_OP_CPU_KERNEL( + sum, ops::SumKernel, + ops::SumKernel, + ops::SumKernel, + ops::SumKernel); diff --git a/paddle/operators/sum_op.cu b/paddle/operators/sum_op.cu index 5c30dd4d470c2e0acecef18524a4a81f9eb786a9..873155076c179d5280a418e25fd39fdaf4b0a2b2 100644 --- a/paddle/operators/sum_op.cu +++ b/paddle/operators/sum_op.cu @@ -13,7 +13,8 @@ limitations under the License. */ #include "paddle/operators/sum_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(sum, ops::SumKernel, - ops::SumKernel, - ops::SumKernel, - ops::SumKernel); +REGISTER_OP_CUDA_KERNEL( + sum, ops::SumKernel, + ops::SumKernel, + ops::SumKernel, + ops::SumKernel); diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index 4afec03ecef168077c9964f5cb1da7cd61861f40..eaa36aa1aea53e0b37ef6c578d8bb1cda230ded0 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -26,7 +26,7 @@ template using EigenVector = framework::EigenVector; -template +template class SumKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { @@ -43,21 +43,26 @@ class SumKernel : public framework::OpKernel { auto result = EigenVector::Flatten(*out); if (!in_place) { - math::SetConstant constant_functor; - constant_functor(context.device_context(), out, 0.0); + math::SetConstant constant_functor; + constant_functor(context.template device_context(), out, + 0.0); } - math::SelectedRowsAddToTensor functor; - auto place = context.GetEigenDevice(); + math::SelectedRowsAddToTensor functor; + auto &place = + *context.template device_context().eigen_device(); // If in_place, just skip the first tensor for (int i = in_place ? 1 : 0; i < N; i++) { if (in_vars[i]->IsType()) { auto &in_t = in_vars[i]->Get(); + if (in_t.numel() == 0) { + continue; + } auto in = EigenVector::Flatten(in_t); result.device(place) = result + in; } else if (in_vars[i]->IsType()) { auto &in_t = in_vars[i]->Get(); - functor(context.device_context(), in_t, out); + functor(context.template device_context(), in_t, out); } else { PADDLE_THROW("Variable type must be LoDTensor/SelectedRows."); } @@ -79,14 +84,14 @@ class SumKernel : public framework::OpKernel { out_value->Resize(framework::make_ddim(in_dim_vec)); out_value->mutable_data(context.GetPlace()); - math::SelectedRowsAddTo functor; + math::SelectedRowsAddTo functor; int64_t offset = 0; for (int i = 0; i < N; i++) { PADDLE_ENFORCE_EQ(out->height(), - in_vars[i]->Get().height()) - functor(context.device_context(), in_vars[i]->Get(), - offset, out); + in_vars[i]->Get().height()); + functor(context.template device_context(), + in_vars[i]->Get(), offset, out); offset += in_vars[i]->Get().value().numel(); } } else if (out_var->IsType()) { @@ -109,7 +114,8 @@ class SumKernel : public framework::OpKernel { PADDLE_ENFORCE(out_array[i].lod() == in_array[i].lod()); auto in = EigenVector::Flatten(in_array[i]); auto result = EigenVector::Flatten(out_array[i]); - result.device(context.GetEigenDevice()) = result + in; + result.device(*context.template device_context() + .eigen_device()) = result + in; } } } diff --git a/paddle/operators/tensor_array_read_write_op.cc b/paddle/operators/tensor_array_read_write_op.cc index ad09fb53ce8c9bf0187e595fe3cdcb6685ab9889..2835b84f75cad6c8fb01d02b93bb9ff79edb1088 100644 --- a/paddle/operators/tensor_array_read_write_op.cc +++ b/paddle/operators/tensor_array_read_write_op.cc @@ -27,7 +27,7 @@ class WriteToArrayOp : public ArrayOp { void Run(const framework::Scope &scope, const platform::DeviceContext &dev_ctx) const override { auto *x = scope.FindVar(Input("X")); - PADDLE_ENFORCE(x != nullptr, "X must be set"); + if (x == nullptr) return; auto &x_tensor = x->Get(); size_t offset = GetOffset(scope, dev_ctx); auto *out = @@ -37,9 +37,15 @@ class WriteToArrayOp : public ArrayOp { << " to " << offset + 1; out->resize(offset + 1); } - auto *out_tensor = &out->at(offset); - CopyFrom(x_tensor, dev_ctx.GetPlace(), dev_ctx, out_tensor); - out_tensor->set_lod(x_tensor.lod()); + if (x_tensor.memory_size() > 0) { + auto *out_tensor = &out->at(offset); + CopyFrom(x_tensor, dev_ctx.GetPlace(), dev_ctx, out_tensor); + out_tensor->set_lod(x_tensor.lod()); + } else { + VLOG(10) << "WARNING: The input tensor 'x_tensor' holds no memory, so " + "nothing has been written to output array[" + << offset << "]."; + } } }; @@ -54,12 +60,16 @@ class WriteToArrayOpProtoMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) the subscript index in tensor array. The number of element " "should be 1"); AddOutput("Out", "(TensorArray) the tensor array will be written"); - AddComment(R"DOC(Write a LoDTensor to a LoDTensor array. + AddComment(R"DOC( +WriteToArray Operator. + +This operator writes a LoDTensor to a LoDTensor array. -Assume T is LoDTensor, i is the subscript of the array, and A is the array. The +Assume $T$ is LoDTensor, $i$ is the subscript of the array, and $A$ is the array. The equation is -A[i] = T +$$A[i] = T$$ + )DOC"); } }; @@ -70,7 +80,9 @@ class WriteToArrayInferShape : public framework::InferShapeBase { PADDLE_ENFORCE(context->HasInput("I"), "Must set the subscript index"); PADDLE_ENFORCE_EQ(framework::product(context->GetInputDim("I")), 1, "The number of element of subscript index must be 1"); - PADDLE_ENFORCE(context->HasInput("X"), NotHasXError()); + if (!context->HasInput("X")) { + return; + } PADDLE_ENFORCE(context->HasOutput("Out"), NotHasOutError()); context->SetOutputDim("Out", context->GetInputDim("X")); } @@ -93,9 +105,10 @@ class WriteToArrayInferVarType : public framework::VarTypeInference { auto &out = detail::Ref(block->FindRecursiveOrCreateVar(out_name), "Cannot found %s", out_name); out.SetType(framework::VarDesc::LOD_TENSOR_ARRAY); - auto &x = - detail::Ref(block->FindVarRecursive(x_name), "Cannot found %s", x_name); - out.SetDataType(x.GetDataType()); + auto *x = block->FindVarRecursive(x_name); + if (x != nullptr) { + out.SetDataType(x->GetDataType()); + } } }; @@ -115,10 +128,13 @@ class ReadFromArrayOp : public ArrayOp { PADDLE_ENFORCE(out != nullptr, "Out must be set"); auto *out_tensor = out->GetMutable(); size_t offset = GetOffset(scope, dev_ctx); - PADDLE_ENFORCE_LT(offset, x_array.size()); - framework::CopyFrom(x_array[offset], dev_ctx.GetPlace(), dev_ctx, - out_tensor); - out_tensor->set_lod(x_array[offset].lod()); + if (offset < x_array.size()) { + framework::CopyFrom(x_array[offset], dev_ctx.GetPlace(), dev_ctx, + out_tensor); + out_tensor->set_lod(x_array[offset].lod()); + } else { + VLOG(10) << "offset " << offset << " >= " << x_array.size(); + } } }; @@ -132,12 +148,16 @@ class ReadFromArrayProtoMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) the subscript index in tensor array. The number of " "element should be 1"); AddOutput("Out", "(LoDTensor) the tensor will be read from."); - AddComment(R"DOC(Read a LoDTensor from a LoDTensor Array + AddComment(R"DOC( +ReadFromArray Operator. -Assume T is LoDTensor, i is th e subscript of the array, and A is the array. The +Read a LoDTensor from a LoDTensor Array. + +Assume $T$ is LoDTensor, $i$ is the subscript of the array, and $A$ is the array. The equation is -T = A[i] +$$T = A[i]$$ + )DOC"); } }; diff --git a/paddle/operators/top_k_op.cu b/paddle/operators/top_k_op.cu index 7851c71bbe9fe73402968ce14f6db0df523cd6d3..453bd07267e3a6e33211117368dd9aff10a9e23f 100644 --- a/paddle/operators/top_k_op.cu +++ b/paddle/operators/top_k_op.cu @@ -317,4 +317,4 @@ class TopkOpCUDAKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -REGISTER_OP_GPU_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel); diff --git a/paddle/operators/top_k_op.h b/paddle/operators/top_k_op.h index bc8563717a21bd5b3d8fc87f689657990066957b..e9cd9bbd4d964c28f305fb4ab4c4733ed27ebfff 100644 --- a/paddle/operators/top_k_op.h +++ b/paddle/operators/top_k_op.h @@ -27,7 +27,7 @@ template using EigenMatrix = framework::EigenMatrix; -template +template class TopkKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { diff --git a/paddle/operators/transpose_op.cc b/paddle/operators/transpose_op.cc index 94de3d5069017a7ca818e246ad574c4db92d8006..de5ff561add6183828f6bb4c44e30f6bb13079fa 100644 --- a/paddle/operators/transpose_op.cc +++ b/paddle/operators/transpose_op.cc @@ -112,8 +112,8 @@ class TransposeOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(transpose, ops::TransposeOp, ops::TransposeOpMaker, transpose_grad, ops::TransposeOpGrad); -REGISTER_OP_CPU_KERNEL(transpose, - ops::TransposeKernel); +REGISTER_OP_CPU_KERNEL( + transpose, ops::TransposeKernel); REGISTER_OP_CPU_KERNEL( transpose_grad, - ops::TransposeGradKernel); + ops::TransposeGradKernel); diff --git a/paddle/operators/transpose_op.cu.cc b/paddle/operators/transpose_op.cu.cc index af3f581462c919bbd2dd1067e536cc638f9c267d..7d23f1493ec2d548438aeb2493fda8a4ff8c6e80 100644 --- a/paddle/operators/transpose_op.cu.cc +++ b/paddle/operators/transpose_op.cu.cc @@ -15,8 +15,9 @@ #include "paddle/operators/transpose_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(transpose, - ops::TransposeKernel); -REGISTER_OP_GPU_KERNEL( +REGISTER_OP_CUDA_KERNEL( + transpose, + ops::TransposeKernel); +REGISTER_OP_CUDA_KERNEL( transpose_grad, - ops::TransposeGradKernel); + ops::TransposeGradKernel); diff --git a/paddle/operators/transpose_op.h b/paddle/operators/transpose_op.h index e296032f4147f9f8338148f9e4fef100c7cf816f..d995271a6be3266e05c742ab18c34636da384e66 100644 --- a/paddle/operators/transpose_op.h +++ b/paddle/operators/transpose_op.h @@ -20,33 +20,33 @@ namespace paddle { namespace operators { -template -inline void TransCompute(const int dim, const platform::DeviceContext& dev_ctx, +template +inline void TransCompute(const int dim, const DeviceContext& dev_ctx, const framework::Tensor& in, framework::Tensor* out, const std::vector& axis) { switch (dim) { case 1: - math::Transpose trans1; + math::Transpose trans1; trans1(dev_ctx, in, out, axis); break; case 2: - math::Transpose trans2; + math::Transpose trans2; trans2(dev_ctx, in, out, axis); break; case 3: - math::Transpose trans3; + math::Transpose trans3; trans3(dev_ctx, in, out, axis); break; case 4: - math::Transpose trans4; + math::Transpose trans4; trans4(dev_ctx, in, out, axis); break; case 5: - math::Transpose trans5; + math::Transpose trans5; trans5(dev_ctx, in, out, axis); break; case 6: - math::Transpose trans6; + math::Transpose trans6; trans6(dev_ctx, in, out, axis); break; default: @@ -54,7 +54,7 @@ inline void TransCompute(const int dim, const platform::DeviceContext& dev_ctx, } } -template +template class TransposeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -64,12 +64,12 @@ class TransposeKernel : public framework::OpKernel { std::vector axis = context.Attr>("axis"); int ndims = axis.size(); - auto& dev_ctx = context.device_context(); - TransCompute(ndims, dev_ctx, *x, out, axis); + auto& dev_ctx = context.template device_context(); + TransCompute(ndims, dev_ctx, *x, out, axis); } }; -template +template class TransposeGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -88,8 +88,9 @@ class TransposeGradKernel : public framework::OpKernel { } int ndims = axis.size(); - auto& dev_ctx = context.device_context(); - TransCompute(ndims, dev_ctx, *out_grad, x_grad, reversed_axis); + auto& dev_ctx = context.template device_context(); + TransCompute(ndims, dev_ctx, *out_grad, x_grad, + reversed_axis); } }; diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index fff1dc7ccddf1d8cee0c8311828fd38888283cd1..2a49ee471f67cda87415db0e1440a4992c0cd088 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -67,7 +67,7 @@ class UniformRandomOp : public framework::OperatorWithKernel { const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( static_cast(ctx.Attr("dtype")), - ctx.device_context()); + ctx.GetPlace()); } }; diff --git a/paddle/operators/uniform_random_op.cu b/paddle/operators/uniform_random_op.cu index 8b20bb8287807aca673817c503fee6db04b55753..cfe9d293cff2108cf25749d0e78e2e86e6e198a5 100644 --- a/paddle/operators/uniform_random_op.cu +++ b/paddle/operators/uniform_random_op.cu @@ -63,6 +63,6 @@ class GPUUniformRandomKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -REGISTER_OP_GPU_KERNEL(uniform_random, - paddle::operators::GPUUniformRandomKernel, - paddle::operators::GPUUniformRandomKernel); +REGISTER_OP_CUDA_KERNEL(uniform_random, + paddle::operators::GPUUniformRandomKernel, + paddle::operators::GPUUniformRandomKernel); diff --git a/paddle/operators/unpool_op.cc b/paddle/operators/unpool_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..49df2a530cd0c5c13f08db4b1e7db62679081e9b --- /dev/null +++ b/paddle/operators/unpool_op.cc @@ -0,0 +1,144 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +Indicesou may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/unpool_op.h" +namespace paddle { +namespace operators { + +class Unpool2dOpMaker : public framework::OpProtoAndCheckerMaker { + public: + Unpool2dOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "X", + "(Tensor) The input tensor of unpool operator. " + "The format of input tensor is NCHW. Where N is batch size, C is the " + "number of channels, H and W is the height and width of feature."); + AddInput( + "Indices", + "(Tensor) The input tensor of the indices given out by MaxPool2d. " + "The format of input tensor is NCHW. Where N is batch size, C is the " + "number of channels, H and W is the height and width of feature."); + AddOutput("Out", + "(Tensor) The output tensor of unpool operator." + "The format of output tensor is also NCHW." + "Where N is batch size, C is " + "the number of channels, H and W is the height and " + "width of feature."); + AddAttr>( + "ksize", + "(vector), the unpooling window size(height, width) " + "of unpooling operator."); + AddAttr>("strides", + "(vector, default:{1, 1}), " + "strides (height, width) of unpooling operator.") + .SetDefault({1, 1}); + AddAttr>("paddings", + "(vector defalut:{0,0}), " + "paddings (height, width) of unpooling operator.") + .SetDefault({0, 0}); + AddAttr( + "unpooling_type", + "(string), unpooling type, can be \"max\" for max-unpooling ") + .InEnum({"max"}); + AddComment(R"DOC( + "Input shape: $(N, C_{in}, H_{in}, W_{in})$ + Output shape: $(N, C_{out}, H_{out}, W_{out})$ + Where + $$ + H_{out} = (H_{in}−1) * strides[0] − 2 * paddings[0] + ksize[0] \\ + W_{out} = (W_{in}−1) * strides[1] − 2 * paddings[1] + ksize[1] + $$ + Paper: http://www.matthewzeiler.com/wp-content/uploads/2017 + /07/iccv2011.pdf + )DOC"); + } +}; + +int OutputSize(int input_size, int ksize, int padding, int stride) { + int output_size = (input_size - 1) * stride - 2 * padding + ksize; + return output_size; +} + +class UnpoolOp : public framework::OperatorWithKernel { + protected: + framework::OpKernelType GetKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } + + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of UnpoolOp" + "should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Indices"), + "Input(Indices) of UnpoolOp" + "should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of UnpoolOp should not be null."); + auto in_x_dims = ctx->GetInputDim("X"); + auto in_y_dims = ctx->GetInputDim("Indices"); + std::string unpooling_type = + ctx->Attrs().Get("unpooling_type"); + std::vector ksize = ctx->Attrs().Get>("ksize"); + std::vector strides = ctx->Attrs().Get>("strides"); + std::vector paddings = ctx->Attrs().Get>("paddings"); + PADDLE_ENFORCE(in_x_dims.size() == 4, + "Unpooling intput must be of 4-dimensional."); + PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims); + std::vector output_shape({in_x_dims[0], in_x_dims[1]}); + for (size_t i = 0; i < ksize.size(); ++i) { + output_shape.push_back( + OutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i])); + } + ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); + } +}; + +class UnpoolOpGrad : public framework::OperatorWithKernel { + protected: + framework::OpKernelType GetKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } + + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), + "Input(X@GRAD) should not be null."); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(unpool, ops::UnpoolOp, ops::Unpool2dOpMaker, unpool_grad, + ops::UnpoolOpGrad); +REGISTER_OP_CPU_KERNEL( + unpool, ops::UnpoolKernel, + ops::UnpoolKernel); +REGISTER_OP_CPU_KERNEL( + unpool_grad, + ops::UnpoolGradKernel, + ops::UnpoolGradKernel); diff --git a/paddle/operators/unpool_op.cu.cc b/paddle/operators/unpool_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..9b002e35c434db561114dbbafce2f3f934daaf6a --- /dev/null +++ b/paddle/operators/unpool_op.cu.cc @@ -0,0 +1,24 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +Indicesou may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/unpool_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + unpool, ops::UnpoolKernel, + ops::UnpoolKernel); +REGISTER_OP_CUDA_KERNEL( + unpool_grad, + ops::UnpoolGradKernel, + ops::UnpoolGradKernel); diff --git a/paddle/operators/unpool_op.h b/paddle/operators/unpool_op.h new file mode 100644 index 0000000000000000000000000000000000000000..ee18b118c957c7933890000bbe934e6ffdc9e56f --- /dev/null +++ b/paddle/operators/unpool_op.h @@ -0,0 +1,71 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +Indicesou may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/unpooling.h" + +namespace paddle { +namespace operators { +template +class UnpoolKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const framework::Tensor* in_x = context.Input("X"); + const framework::Tensor* in_y = context.Input("Indices"); + auto* out = context.Output("Out"); + std::string unpooling_type = context.Attr("unpooling_type"); + std::vector ksize = context.Attr>("ksize"); + std::vector strides = context.Attr>("strides"); + std::vector paddings = context.Attr>("paddings"); + T* output_data = out->mutable_data(context.GetPlace()); + auto& dev_ctx = context.template device_context(); + if (output_data) { + math::SetConstant set_zero; + set_zero(dev_ctx, out, static_cast(0)); + } + math::Unpool2dMaxFunctor unpool2d_max_forward; + unpool2d_max_forward(dev_ctx, *in_x, *in_y, out); + } +}; +template +class UnpoolGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const framework::Tensor* in_x = context.Input("X"); + const framework::Tensor* in_y = context.Input("Indices"); + const framework::Tensor* out = context.Input("Out"); + const framework::Tensor* out_grad = + context.Input(framework::GradVarName("Out")); + framework::Tensor* in_x_grad = + context.Output(framework::GradVarName("X")); + std::string unpooling_type = context.Attr("unpooling_type"); + std::vector ksize = context.Attr>("ksize"); + std::vector strides = context.Attr>("strides"); + std::vector paddings = context.Attr>("paddings"); + + auto& device_ctx = context.template device_context(); + math::SetConstant zero; + if (in_x_grad) { + in_x_grad->mutable_data(context.GetPlace()); + zero(device_ctx, in_x_grad, static_cast(0)); + } + math::Unpool2dMaxGradFunctor unpool2d_max_backward; + unpool2d_max_backward(device_ctx, *in_x, *in_y, *out, *out_grad, in_x_grad); + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc index 68b4f7705995e5ecb6c9b8216db7373c1777a31e..b8e44bcc5a99380fdf08cc2819b20045695eaf87 100644 --- a/paddle/operators/while_op.cc +++ b/paddle/operators/while_op.cc @@ -98,8 +98,6 @@ class WhileGradOp : public framework::OperatorBase { void Run(const framework::Scope &scope, const platform::DeviceContext &dev_ctx) const override { - // PADDLE_ENFORCE(...) - framework::Executor executor(dev_ctx); auto *block = Attr(kStepBlock); auto *program = block->Program(); @@ -124,8 +122,12 @@ class WhileGradOp : public framework::OperatorBase { auto inside_og_name = inside_og_names[i]; VLOG(10) << "Linking outside " << outside_og_name << " --> inside " << inside_og_name; - auto &og_outside = detail::Ref(scope.FindVar(outside_og_name)); - auto &og_inside = detail::Ref(cur_scope.Var(inside_og_name)); + auto &og_outside = + detail::Ref(scope.FindVar(outside_og_name), + "Cannot find Outside Gradient %s", outside_og_name); + auto &og_inside = + detail::Ref(cur_scope.Var(inside_og_name), + "Cannot find inside gradient %s", inside_og_name); if (og_outside.Type().hash_code() == typeid(framework::LoDTensor).hash_code()) { auto &outside_tensor = og_outside.Get(); @@ -160,7 +162,7 @@ class WhileGradOp : public framework::OperatorBase { PADDLE_ENFORCE_EQ(pg_names.size(), p_names.size()); for (size_t param_id = 0; param_id < pg_names.size(); ++param_id) { if (pg_names[param_id] == framework::kEmptyVarName) { - continue; // iterator doesn't have gradient + continue; // parameter doesn't have gradient } auto inside_grad_name = framework::GradVarName(p_names[param_id]); @@ -185,16 +187,16 @@ class WhileGradOp : public framework::OperatorBase { attrs["value"] = 0.0f; auto zero_op = framework::OpRegistry::CreateOp( - "fill_constant", {}, {{"Out", {pg_names[param_id]}}}, attrs); + "fill_constant", framework::VariableNameMap{}, + {{"Out", {pg_names[param_id]}}}, attrs); zero_op->Run(scope, dev_ctx); } } - // sum gradient auto new_inside_name = cur_scope.Rename(inside_grad_name); auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {pg_names[param_id], new_inside_name}}}, - {{"Out", {pg_names[param_id]}}}, {}); + {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); sum_op->Run(cur_scope, dev_ctx); cur_scope.Rename(new_inside_name, inside_grad_name); } @@ -207,18 +209,35 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - virtual std::unique_ptr Apply() const { + std::unique_ptr Apply() const override { auto *grad = new framework::OpDescBind(); grad->SetType("while_grad"); grad->SetInput(kParameters, Input(kParameters)); - grad->SetOutput( - framework::GradVarName(kParameters), - InputGrad(kParameters, /*do not drop empty gradient*/ false)); + + // Not all of IGs will be generated by inner gradient operators of while op. + // Ignore IGs that is not generated by the inside block. + auto igs = InputGrad(kParameters, /*do not drop empty gradient*/ false); + std::unordered_set all_outs; + for (size_t i = 0; i < grad_block_[0]->OpSize(); ++i) { + for (auto &oname : grad_block_[0]->Op(i)->OutputArgumentNames()) { + all_outs.insert(oname); + } + } + for (auto &each_ig : igs) { + if (all_outs.find(each_ig) == all_outs.end()) { + VLOG(10) << "Ignore " << each_ig; + each_ig = framework::kEmptyVarName; + } + } + + grad->SetOutput(framework::GradVarName(kParameters), igs); + grad->SetInput(kOutputs, Output(kOutputs)); // OG should be re-calculated by step blocks, since many outputs of while op // do not need to calculate gradients. std::unordered_set block_ins; + auto *fwd_block = this->grad_block_[0]->ParentBlock(); { for (auto &p : Input(kParameters)) { block_ins.insert(p); @@ -233,6 +252,13 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { if (block_ins.find(input_name) != block_ins.end()) { continue; } + + // If the input of Op is generated by the forward block, do not make it + // as input again. + if (fwd_block->FindVar(input_name) != nullptr) { + continue; + } + extra_inputs.insert(input_name); } @@ -287,7 +313,6 @@ class WhileGradOpShapeInference : public framework::InferShapeBase { auto p_names = ctx->Inputs(kParameters); auto pg_names = ctx->Outputs(kParamGrads); - auto dims = ctx->GetInputsDim(kParameters); auto var_types = ctx->GetInputsVarType(kParameters); std::vector names_to_set; std::vector dims_to_set; @@ -295,13 +320,14 @@ class WhileGradOpShapeInference : public framework::InferShapeBase { if (pg_names[i] == framework::kEmptyVarName) { continue; } + auto dims = ctx->GetInputsElementDim(kParameters, i); if (var_types[i] == framework::VarDesc::LOD_TENSOR) { names_to_set.push_back(pg_names[i]); - dims_to_set.push_back(dims[i]); + dims_to_set.push_back(dims); } else if (var_types[i] == framework::VarDesc::LOD_TENSOR_ARRAY) { // not sure how to set the dim of LOD_TENSOR_ARRAY names_to_set.push_back(pg_names[i]); - dims_to_set.push_back(dims[i]); + dims_to_set.push_back(dims); } } ctx->SetDims(names_to_set, dims_to_set); diff --git a/paddle/optimizer/parameter_optimizer_test.cc b/paddle/optimizer/parameter_optimizer_test.cc index f29e5317120642e3790a6f6c1976bdda67093a0c..83757a391784453341f22eca73bc73c14ce4174f 100644 --- a/paddle/optimizer/parameter_optimizer_test.cc +++ b/paddle/optimizer/parameter_optimizer_test.cc @@ -127,8 +127,3 @@ TEST_F(OptimizerTest, TestGetWeight) { TestGetWeight(); } TEST_F(OptimizerTest, TestUpdate) { TestUpdate(); } TEST_F(OptimizerTest, TestCheckPoint) { TestCheckPoint(); } - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/paddle/optimizer/serialization_test.cc b/paddle/optimizer/serialization_test.cc index 4c416f55ee0bd70f9ec6e288b08a5399d8b2bf39..940e941e9042d8a37363311867df5bb477b3dac0 100644 --- a/paddle/optimizer/serialization_test.cc +++ b/paddle/optimizer/serialization_test.cc @@ -46,8 +46,3 @@ TEST(TensorToProto, Case2) { EXPECT_EQ(t1[i], t[i]); } } - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/paddle/parameter/FirstOrderOptimizer.h b/paddle/parameter/FirstOrderOptimizer.h index f157188a4f736319ea187052b90a17f8be9e9edb..5b0c52a30dfbc34f0db4a03366e31da54b86c9fc 100644 --- a/paddle/parameter/FirstOrderOptimizer.h +++ b/paddle/parameter/FirstOrderOptimizer.h @@ -38,7 +38,7 @@ public: real torch_learningRate = optConfig_.learning_method() == "torch_momentum" ? 1.0 - paraConfig.momentum() : 1.0; -#ifdef PADDLE_USE_MKLDNN +#ifdef PADDLE_WITH_MKLDNN sgdUpdate(learningRate_ * paraConfig.learning_rate() * (firstTime_ ? 1.0 : torch_learningRate), paraConfig.momentum(), diff --git a/paddle/parameter/ParameterUpdateFunctions.cpp b/paddle/parameter/ParameterUpdateFunctions.cpp index 1898598e49652a2829e57329bab6017304cec662..d60cb363830ff26a1f5054fb4cebf37afdfd1d03 100644 --- a/paddle/parameter/ParameterUpdateFunctions.cpp +++ b/paddle/parameter/ParameterUpdateFunctions.cpp @@ -30,7 +30,7 @@ void sgdUpdateCpu(real learningRate, const real* grad, real* momentumVec) { decayRate *= learningRate; -#ifdef PADDLE_USE_MKLML +#ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (size_t i = 0; i < size; ++i) { diff --git a/paddle/platform/cuda_profiler.h b/paddle/platform/cuda_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..b6311cb23d695c3cd851bcca120c24cced7fdd62 --- /dev/null +++ b/paddle/platform/cuda_profiler.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include +#include + +namespace paddle { +namespace platform { + +void CudaProfilerInit(std::string output_file, std::string output_mode, + std::vector config_flags) { + std::array buf; + std::string tmpl = "/tmp/cuda_profile_config.XXXXXX"; + PADDLE_ENFORCE_LT(tmpl.size(), buf.size()); + memcpy(buf.data(), tmpl.data(), tmpl.size()); + auto result = mktemp(buf.data()); + PADDLE_ENFORCE(strlen(result) != 0); + std::string config_file = result; + + { + std::ofstream ofs(config_file, std::ios::out | std::ios::trunc); + PADDLE_ENFORCE(ofs.is_open(), "ofstream: ", ofs.rdstate()); + for (const auto& line : config_flags) { + ofs << line << std::endl; + } + } + + PADDLE_ENFORCE(output_mode == "kvp" || output_mode == "csv"); + cudaOutputMode_t mode = output_mode == "csv" ? cudaCSV : cudaKeyValuePair; + PADDLE_ENFORCE( + cudaProfilerInitialize(config_file.c_str(), output_file.c_str(), mode)); +} + +void CudaProfilerStart() { PADDLE_ENFORCE(cudaProfilerStart()); } + +void CudaProfilerStop() { PADDLE_ENFORCE(cudaProfilerStop()); } + +} // namespace platform +} // namespace paddle diff --git a/paddle/platform/device_context.cc b/paddle/platform/device_context.cc index 7afcdfce9371e29aad968a1729931173fb2309b5..2c7f96421621b9a34d1ec96c13d9c354a0d4012c 100644 --- a/paddle/platform/device_context.cc +++ b/paddle/platform/device_context.cc @@ -15,12 +15,6 @@ limitations under the License. */ namespace paddle { namespace platform { -template <> -Eigen::DefaultDevice* DeviceContext::GetEigenDevice< - platform::CPUPlace, Eigen::DefaultDevice>() const { - return reinterpret_cast(this)->eigen_device(); -} - CPUDeviceContext::CPUDeviceContext() { eigen_device_.reset(new Eigen::DefaultDevice()); } @@ -37,12 +31,6 @@ Place CPUDeviceContext::GetPlace() const { return CPUPlace(); } #ifdef PADDLE_WITH_CUDA -template <> -Eigen::GpuDevice* -DeviceContext::GetEigenDevice() const { - return reinterpret_cast(this)->eigen_device(); -} - class EigenCudaStreamDevice : public Eigen::StreamInterface { public: EigenCudaStreamDevice() : scratch_(nullptr), semaphore_(nullptr) { @@ -122,10 +110,6 @@ Place CUDADeviceContext::GetPlace() const { return place_; } void CUDADeviceContext::Wait() const { PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); -} - -void CUDADeviceContext::Finish() const { - Wait(); PADDLE_ENFORCE(cudaGetLastError()); } diff --git a/paddle/platform/device_context.h b/paddle/platform/device_context.h index 526d089e35da9c9f89a3852095ad3a4c82d4d85d..596d9d0bba420a47fc10cc9dd96a755daa35dbac 100644 --- a/paddle/platform/device_context.h +++ b/paddle/platform/device_context.h @@ -27,27 +27,12 @@ limitations under the License. */ namespace paddle { namespace platform { -template -struct EigenDeviceConverter; - -template <> -struct EigenDeviceConverter { - using EigenDeviceType = Eigen::DefaultDevice; -}; - class DeviceContext { public: virtual ~DeviceContext() {} virtual Place GetPlace() const = 0; - template ::EigenDeviceType> - DeviceType* GetEigenDevice() const; - virtual void Wait() const {} - - virtual void Finish() const {} }; class CPUDeviceContext : public DeviceContext { @@ -64,10 +49,6 @@ class CPUDeviceContext : public DeviceContext { }; #ifdef PADDLE_WITH_CUDA -template <> -struct EigenDeviceConverter { - using EigenDeviceType = Eigen::GpuDevice; -}; class EigenCudaStreamDevice; @@ -79,9 +60,6 @@ class CUDADeviceContext : public DeviceContext { /*! \brief Wait for all operations completion in the stream. */ void Wait() const override; - /*! \brief Check potential errors for the cuda kernel calls. */ - void Finish() const override; - /*! \brief Return place in the device context. */ Place GetPlace() const override; diff --git a/paddle/platform/device_context_test.cc b/paddle/platform/device_context_test.cc index 8bf5174c4a5579f6f5602dd38e5a87ed3ef444a7..4893cd92f6a74f7992c279ebd51232049f29e853 100644 --- a/paddle/platform/device_context_test.cc +++ b/paddle/platform/device_context_test.cc @@ -22,9 +22,8 @@ TEST(Device, Init) { int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; i++) { - DeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); - Eigen::GpuDevice* gpu_device = - device_context->template GetEigenDevice(); + CUDADeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); + Eigen::GpuDevice* gpu_device = device_context->eigen_device(); ASSERT_NE(nullptr, gpu_device); delete device_context; } diff --git a/paddle/platform/dynload/cudnn.cc b/paddle/platform/dynload/cudnn.cc index d3e4cb567d71b987724366b6a0896f5df0eb6055..76ec82e10840751a654c7d7f57da8d5570d2a9ce 100644 --- a/paddle/platform/dynload/cudnn.cc +++ b/paddle/platform/dynload/cudnn.cc @@ -12,7 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include "paddle/platform/dynload/cudnn.h" +#include "paddle/platform/enforce.h" namespace paddle { namespace platform { @@ -37,6 +38,25 @@ CUDNN_DNN_ROUTINE_EACH_AFTER_R4(DEFINE_WRAP); CUDNN_DNN_ROUTINE_EACH_R5(DEFINE_WRAP); #endif +#ifdef CUDNN_DNN_ROUTINE_EACH_R7 +CUDNN_DNN_ROUTINE_EACH_R7(DEFINE_WRAP); +#endif + +#ifdef PADDLE_USE_DSO +bool HasCUDNN() { + std::call_once(cudnn_dso_flag, GetCudnnDsoHandle, &cudnn_dso_handle); + return cudnn_dso_handle != nullptr; +} + +void EnforceCUDNNLoaded(const char* fn_name) { + PADDLE_ENFORCE(cudnn_dso_handle != nullptr, + "Cannot load cudnn shared library. Cannot invoke method %s", + fn_name); +} +#else +bool HasCUDNN() { return true; } +#endif + } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/platform/dynload/cudnn.h b/paddle/platform/dynload/cudnn.h index b2d69da93bcd4a5c8e694a18ca648ddc4bd947af..8c937b37d714a06c623f4e204bd572fdd200ea5d 100644 --- a/paddle/platform/dynload/cudnn.h +++ b/paddle/platform/dynload/cudnn.h @@ -25,9 +25,11 @@ namespace dynload { extern std::once_flag cudnn_dso_flag; extern void* cudnn_dso_handle; +extern bool HasCUDNN(); #ifdef PADDLE_USE_DSO +extern void EnforceCUDNNLoaded(const char* fn_name); #define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \ struct DynLoad__##__name { \ template \ @@ -36,6 +38,7 @@ extern void* cudnn_dso_handle; std::call_once(cudnn_dso_flag, \ paddle::platform::dynload::GetCudnnDsoHandle, \ &cudnn_dso_handle); \ + EnforceCUDNNLoaded(#__name); \ void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ @@ -135,6 +138,12 @@ CUDNN_DNN_ROUTINE_EACH_AFTER_R4(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) CUDNN_DNN_ROUTINE_EACH_R5(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) #endif +#if CUDNN_VERSION >= 7001 +#define CUDNN_DNN_ROUTINE_EACH_R7(__macro) \ + __macro(cudnnSetConvolutionGroupCount); +CUDNN_DNN_ROUTINE_EACH_R7(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) +#endif + } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/platform/dynload/dynamic_loader.cc b/paddle/platform/dynload/dynamic_loader.cc index 6feba42c0d9d618d27da12e6a6752058b296995e..7a82d06a0acbfa44386d40df97f6b0e43ed46577 100644 --- a/paddle/platform/dynload/dynamic_loader.cc +++ b/paddle/platform/dynload/dynamic_loader.cc @@ -78,12 +78,11 @@ static inline void GetDsoHandleFromDefaultPath(std::string& dso_path, *dso_handle = dlopen(dso_path.c_str(), dynload_flags); if (nullptr == *dso_handle) { if (dso_path == "libcudnn.dylib") { - PADDLE_ENFORCE(true, - "Note: [Recommend] copy cudnn into /usr/local/cuda/ \n " - "For instance, sudo tar -xzf " - "cudnn-7.5-osx-x64-v5.0-ga.tgz -C /usr/local \n sudo " - "chmod a+r /usr/local/cuda/include/cudnn.h " - "/usr/local/cuda/lib/libcudnn*"); + LOG(WARNING) << "Note: [Recommend] copy cudnn into /usr/local/cuda/ \n " + "For instance, sudo tar -xzf " + "cudnn-7.5-osx-x64-v5.0-ga.tgz -C /usr/local \n sudo " + "chmod a+r /usr/local/cuda/include/cudnn.h " + "/usr/local/cuda/lib/libcudnn*"; } } } @@ -92,7 +91,8 @@ static inline void GetDsoHandleFromDefaultPath(std::string& dso_path, static inline void GetDsoHandleFromSearchPath(const std::string& search_root, const std::string& dso_name, - void** dso_handle) { + void** dso_handle, + bool throw_on_error = true) { int dynload_flags = RTLD_LAZY | RTLD_LOCAL; *dso_handle = nullptr; @@ -111,15 +111,19 @@ static inline void GetDsoHandleFromSearchPath(const std::string& search_root, GetDsoHandleFromDefaultPath(dlPath, dso_handle, dynload_flags); } } - PADDLE_ENFORCE(nullptr != *dso_handle, - "Failed to find dynamic library: %s ( %s ) \n Please specify " - "its path correctly using following ways: \n Method. set " - "environment variable LD_LIBRARY_PATH on Linux or " - "DYLD_LIBRARY_PATH on Mac OS. \n For instance, issue command: " - "export LD_LIBRARY_PATH=... \n Note: After Mac OS 10.11, " - "using the DYLD_LIBRARY_PATH is impossible unless System " - "Integrity Protection (SIP) is disabled.", - dlPath, dlerror()); + auto error_msg = + "Failed to find dynamic library: %s ( %s ) \n Please specify " + "its path correctly using following ways: \n Method. set " + "environment variable LD_LIBRARY_PATH on Linux or " + "DYLD_LIBRARY_PATH on Mac OS. \n For instance, issue command: " + "export LD_LIBRARY_PATH=... \n Note: After Mac OS 10.11, " + "using the DYLD_LIBRARY_PATH is impossible unless System " + "Integrity Protection (SIP) is disabled."; + if (throw_on_error) { + PADDLE_ENFORCE(nullptr != *dso_handle, error_msg, dlPath, dlerror()); + } else if (nullptr == *dso_handle) { + LOG(WARNING) << string::Sprintf(error_msg, dlPath, dlerror()); + } } void GetCublasDsoHandle(void** dso_handle) { @@ -132,9 +136,10 @@ void GetCublasDsoHandle(void** dso_handle) { void GetCudnnDsoHandle(void** dso_handle) { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.dylib", dso_handle); + GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.dylib", dso_handle, + false); #else - GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.so", dso_handle); + GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.so", dso_handle, false); #endif } diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 415020ab965fa976c37870b7ad5794aab947fb4e..5abd4d4a345ed2750231841325f2b19a2ee8c4c9 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -234,16 +234,24 @@ inline void throw_on_error(T e) { __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <, >=, __VA_ARGS__) #define PADDLE_ENFORCE_LE(__VAL0, __VAL1, ...) \ __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, <=, >, __VA_ARGS__) -#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...) \ - PADDLE_ENFORCE(nullptr != (__VAL), #__VAL " should not be null\n%s", \ - paddle::string::Sprintf("" __VA_ARGS__)); - -#define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \ - PADDLE_ENFORCE(__VAL0 __CMP __VAL1, \ - "enforce %s " #__CMP " %s failed, %s " #__INV_CMP " %s\n%s", \ - #__VAL0, #__VAL1, paddle::string::to_string(__VAL0), \ - paddle::string::to_string(__VAL1), \ - paddle::string::Sprintf("" __VA_ARGS__)); +#define PADDLE_ENFORCE_NOT_NULL(__VAL, ...) \ + do { \ + if (UNLIKELY(nullptr == (__VAL))) { \ + PADDLE_THROW(#__VAL " should not be null\n%s", \ + paddle::string::Sprintf("" __VA_ARGS__)); \ + } \ + } while (0) + +#define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \ + do { \ + if (UNLIKELY(!((__VAL0)__CMP(__VAL1)))) { \ + PADDLE_THROW("enforce %s " #__CMP " %s failed, %s " #__INV_CMP \ + " %s\n%s", \ + #__VAL0, #__VAL1, paddle::string::to_string(__VAL0), \ + paddle::string::to_string(__VAL1), \ + paddle::string::Sprintf("" __VA_ARGS__)); \ + } \ + } while (0) } // namespace platform } // namespace paddle diff --git a/paddle/platform/gpu_info.cc b/paddle/platform/gpu_info.cc index 36b216d872138d49bfd5ab6e3499d15d49ebd0ca..4fa2eaed31c6e9368459c2da6f8b0667b453d58c 100644 --- a/paddle/platform/gpu_info.cc +++ b/paddle/platform/gpu_info.cc @@ -18,8 +18,8 @@ limitations under the License. */ #include "paddle/platform/enforce.h" -DEFINE_double(fraction_of_gpu_memory_to_use, 0.95, - "Default use 95% of GPU memory for PaddlePaddle," +DEFINE_double(fraction_of_gpu_memory_to_use, 0.92, + "Default use 92% of GPU memory for PaddlePaddle," "reserve the rest for page tables, etc"); namespace paddle { @@ -75,15 +75,19 @@ size_t GpuMaxChunkSize() { GpuMemoryUsage(available, total); // Reserving the rest memory for page tables, etc. - size_t reserving = (1 - FLAGS_fraction_of_gpu_memory_to_use) * total; + size_t reserving = 0.05 * total; // If available less than minimum chunk size, no usable memory exists. - available = std::max(available, GpuMinChunkSize()) - GpuMinChunkSize(); + available = + std::max(std::max(available, GpuMinChunkSize()) - GpuMinChunkSize(), + reserving) - + reserving; - // If available less than reserving, no usable memory exists. - size_t usable = std::max(available, reserving) - reserving; + size_t allocating = FLAGS_fraction_of_gpu_memory_to_use * total; - return usable; + PADDLE_ENFORCE_LT(allocating, available); + + return allocating; } void GpuMemcpyAsync(void *dst, const void *src, size_t count, diff --git a/paddle/platform/transform.h b/paddle/platform/transform.h index bb9d59ec0a18ce013632f128c9b5d230255f1ac4..148ebaed3d893cd03df8cf27b1309d07afd9aa4a 100644 --- a/paddle/platform/transform.h +++ b/paddle/platform/transform.h @@ -31,7 +31,7 @@ namespace paddle { namespace platform { // Transform on host or device. It provides the same API in std library. -template +template struct Transform { template void operator()(const DeviceContext& context, InputIter first, InputIter last, @@ -45,16 +45,16 @@ struct Transform { }; template <> -struct Transform { +struct Transform { template - void operator()(const DeviceContext& context, InputIter first, InputIter last, - OutputIter result, UnaryOperation op) { + void operator()(const platform::CPUDeviceContext& context, InputIter first, + InputIter last, OutputIter result, UnaryOperation op) { std::transform(first, last, result, op); } template - void operator()(const DeviceContext& context, InputIter1 first1, + void operator()(const platform::CPUDeviceContext& context, InputIter1 first1, InputIter1 last1, InputIter2 first2, OutputIter result, BinaryOperation op) { std::transform(first1, last1, first2, result, op); @@ -63,27 +63,25 @@ struct Transform { #ifdef __NVCC__ template <> -struct Transform { +struct Transform { template - void operator()(const DeviceContext& context, InputIter first, InputIter last, - OutputIter result, UnaryOperation op) { + void operator()(const platform::CUDADeviceContext& context, InputIter first, + InputIter last, OutputIter result, UnaryOperation op) { auto place = context.GetPlace(); PADDLE_ENFORCE(is_gpu_place(place), "It must use GPU place."); - auto& ctx = reinterpret_cast(context); - thrust::transform(thrust::cuda::par.on(ctx.stream()), + thrust::transform(thrust::cuda::par.on(context.stream()), details::DevPtrCast(first), details::DevPtrCast(last), details::DevPtrCast(result), op); } template - void operator()(const DeviceContext& context, InputIter1 first1, + void operator()(const platform::CUDADeviceContext& context, InputIter1 first1, InputIter1 last1, InputIter2 first2, OutputIter result, BinaryOperation op) { auto place = context.GetPlace(); PADDLE_ENFORCE(is_gpu_place(place), "It must use GPU place."); - auto& ctx = reinterpret_cast(context); - thrust::transform(thrust::cuda::par.on(ctx.stream()), + thrust::transform(thrust::cuda::par.on(context.stream()), details::DevPtrCast(first1), details::DevPtrCast(last1), details::DevPtrCast(first2), details::DevPtrCast(result), op); diff --git a/paddle/platform/transform_test.cu b/paddle/platform/transform_test.cu index c76cab80e4b0e8df98a7be15f86699cfb6f93af2..d36eac8379ebedb284b36012a46186cd3ac43b91 100644 --- a/paddle/platform/transform_test.cu +++ b/paddle/platform/transform_test.cu @@ -39,7 +39,7 @@ TEST(Transform, CPUUnary) { using namespace paddle::platform; CPUDeviceContext ctx; float buf[4] = {0.1, 0.2, 0.3, 0.4}; - Transform trans; + Transform trans; trans(ctx, buf, buf + 4, buf, Scale(10)); for (int i = 0; i < 4; ++i) { ASSERT_NEAR(buf[i], static_cast(i + 1), 1e-5); @@ -54,7 +54,7 @@ TEST(Transform, GPUUnary) { float cpu_buf[4] = {0.1, 0.2, 0.3, 0.4}; float* gpu_buf = static_cast(Alloc(gpu0, sizeof(float) * 4)); Copy(gpu0, gpu_buf, CPUPlace(), cpu_buf, sizeof(cpu_buf)); - Transform trans; + Transform trans; trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, Scale(10)); ctx.Wait(); Copy(CPUPlace(), cpu_buf, gpu0, gpu_buf, sizeof(cpu_buf)); @@ -68,7 +68,7 @@ TEST(Transform, CPUBinary) { using namespace paddle::platform; using namespace paddle::memory; int buf[4] = {1, 2, 3, 4}; - Transform trans; + Transform trans; CPUDeviceContext ctx; trans(ctx, buf, buf + 4, buf, buf, Multiply()); for (int i = 0; i < 4; ++i) { @@ -84,7 +84,7 @@ TEST(Transform, GPUBinary) { CUDADeviceContext ctx(gpu0); int* gpu_buf = static_cast(Alloc(gpu0, sizeof(buf))); Copy(gpu0, gpu_buf, CPUPlace(), buf, sizeof(buf)); - Transform trans; + Transform trans; trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, gpu_buf, Multiply()); ctx.Wait(); Copy(CPUPlace(), buf, gpu0, gpu_buf, sizeof(buf)); diff --git a/paddle/pserver/CMakeLists.txt b/paddle/pserver/CMakeLists.txt index ccfc0e76020c7b4f54a493cc4048e7571379ec1a..f75475a88f7224ee3889827795088c8aa920b63b 100644 --- a/paddle/pserver/CMakeLists.txt +++ b/paddle/pserver/CMakeLists.txt @@ -49,7 +49,7 @@ if(WITH_TESTING) add_subdirectory(test) endif() -if(NOT WITH_C_API) +if(NOT MOBILE_INFERENCE) add_executable(paddle_pserver_main ${PSERVER_MAIN_SOURCES}) link_paddle_exe(paddle_pserver_main) diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index a54dc0d9fdb3c30391b01966ad493540c8ad1375..fd55f410d3f0fee418e7efffa927e46c38d23a07 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -5,4 +5,6 @@ if(WITH_PYTHON) ${GLOB_OP_LIB}) endif(WITH_PYTHON) -cc_binary(print_operators_doc SRCS print_operators_doc.cc DEPS ${GLOB_OP_LIB}) +if(WITH_DOC) + cc_binary(print_operators_doc SRCS print_operators_doc.cc DEPS ${GLOB_OP_LIB}) +endif(WITH_DOC) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index f55a1edce31ccf2498dcfcf0b30ba1012d7a7d1a..c16d3e0cbe01f90a5aa9a5d7a523cd4e282e4771 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -37,6 +37,7 @@ limitations under the License. */ #ifdef PADDLE_WITH_CUDA #include "paddle/operators/nccl/nccl_gpu_common.h" +#include "paddle/platform/cuda_profiler.h" #include "paddle/platform/gpu_info.h" #endif @@ -460,6 +461,10 @@ All parameter, weight, gradient are variables in Paddle. m.def("op_support_gpu", OpSupportGPU); #ifdef PADDLE_WITH_CUDA m.def("get_cuda_device_count", platform::GetCUDADeviceCount); + + m.def("nvprof_init", platform::CudaProfilerInit); + m.def("nvprof_start", platform::CudaProfilerStart); + m.def("nvprof_stop", platform::CudaProfilerStop); #endif return m.ptr(); diff --git a/paddle/scripts/check_env.sh b/paddle/scripts/check_env.sh new file mode 100755 index 0000000000000000000000000000000000000000..af16b84ca8a18151f0fa36d39fd201d3cab21a5f --- /dev/null +++ b/paddle/scripts/check_env.sh @@ -0,0 +1,261 @@ +#!/bin/bash + +if [ "`uname -s`" != "Linux" ]; then + echo "Current scenario only support in Linux yet!" + exit 0 +fi + +echo "========================= Hardware Information =========================" +sockets=`grep 'physical id' /proc/cpuinfo | sort -u | wc -l` +cores_per_socket=`grep 'core id' /proc/cpuinfo | sort -u | wc -l` +ht=`lscpu |grep "per core" |awk -F':' '{print $2}'|xargs` +physical_cores=$((sockets * cores_per_socket)) +virtual_cores=`grep 'processor' /proc/cpuinfo | sort -u | wc -l` +numa_nodes=`lscpu |grep "NUMA node(s)"|awk -F':' '{print $2}'|xargs` +echo "CPU Name : `cat /proc/cpuinfo |grep -i "model name" |uniq |awk -F ':' '{print $2}'|xargs`" +echo "CPU Family : `lscpu |grep \"CPU family\" |awk -F':' '{print $2}'|xargs`" +echo "Socket Number : $sockets" +echo "Cores Per Socket : $cores_per_socket" +echo "Total Physical Cores : $physical_cores" +echo "Total Virtual Cores : $virtual_cores" +if [ $ht -eq 1 ]; then + echo "Hyper Threading : OFF" + if [ $physical_cores -ne $virtual_cores ]; then + echo "Error: HT logical error" + fi +else + echo "Hyper Threading : ON" + if [ $physical_cores -ge $virtual_cores ]; then + echo "Error: HT logical error" + fi +fi +echo "NUMA Nodes : $numa_nodes" +if [ $numa_nodes -lt $sockets ]; then + echo "Warning: NUMA node is not enough for the best performance,\ + at least $sockets" +fi + +echo "-------------------------- Memory Information --------------------------" +# dmidecode support start from 2.11 +dmi_ver=`dmidecode --version|awk -F '.' '{print $1}'|xargs` +if [ $dmi_ver -lt 2 ]; then + echo "Error: dmidecode unknown or version is too old" + exit 0 +fi +if [ `dmidecode | grep -ic "Permission denied"` -ne 0 ]; then + echo "Error: need root to run dmidecode" + exit 0 +fi +max_dimms=0 +num_dimms_installed=0 +for dimm_id in `dmidecode |grep Locator|sort -u | awk -F ':' '{print $2}'`; do + num_refered=`dmidecode |grep -wc "$dimm_id"` + # the actual dimm id should be refered only once + if [ $num_refered -eq 1 ]; then + num_unknown=`dmidecode | awk '/'$dimm_id'/ {s=1; f=0}; + /Unknown/ {f=1}; + /Manufacturer/ {if (s==1) {print f; exit 0;}};'` + if [ $num_unknown -eq 0 ]; then + dimms_installed="$dimms_installed \n $dimm_id" + ((num_dimms_installed++)) + else + dimms_uninstalled="$dimms_uninstalled \n $dimm_id" + fi + ((max_dimms++)) + fi +done +echo "Installed DIMM number : $num_dimms_installed" +num_dimms_mapped=`dmidecode | grep "Memory Device Mapped" | wc -l` +if [ $num_dimms_installed -ne $num_dimms_mapped ]; then + echo "Error: The installed DIMMs number does ont match the mapped memory device: $num_dimms_mapped" +fi +num_clock_configed=`dmidecode | grep -i "Configured Clock Speed" |grep -ic "Hz"` +if [ $num_dimms_installed -ne $num_clock_configed ]; then + echo "Error: The installed DIMMs number does ont match configured clocks: $num_clock_configed" +fi +echo -e "Installed DIMMs Locator: $dimms_installed" +echo -e "Not installed DIMMs : $dimms_uninstalled" +max_dimm_slots=`dmidecode | grep -c "Bank Locator"` +echo "DIMMs max slots : $max_dimm_slots" +if [ $max_dimms -ne $max_dimm_slots ]; then + echo "Error: The max dimm slots do not match the max dimms: $max_dimms" +fi +free_ver_main=`free -V|awk -F ' ' '{print $NF}'|awk -F '.' '{print $1}'` +free_ver_sub=`free -V|awk -F ' ' '{print $NF}'|awk -F '.' '{print $2}'` +if [ $free_ver_main -lt 3 ] || [ $free_ver_sub -lt 3 ]; then + mem_sz=`free |grep -i mem |awk -F' ' '{print $2}'|xargs` + swap_sz=`free |grep -i swap |awk -F' ' '{print $2}'|xargs` + total_sz=`free -t |grep -i total |tail -n 1| awk -F' ' '{print $2}'|xargs` + mem_sz="`awk 'BEGIN{printf "%.1f\n",('$mem_sz'/1024/1024)}'` GB" + swap_sz="`awk 'BEGIN{printf "%.1f\n",('$swap_sz'/1024/1024)}'` GB" + total_sz="`awk 'BEGIN{printf "%.1f\n",('$total_sz'/1024/1024)}'` GB" +else + mem_sz=`free -h |grep -i mem |awk -F' ' '{print $2}'|xargs` + swap_sz=`free -h |grep -i swap |awk -F' ' '{print $2}'|xargs` + total_sz=`free -th |grep -i total |tail -n 1| awk -F' ' '{print $2}'|xargs` +fi +echo "Memory Size : $mem_sz" +echo "Swap Memory Size : $swap_sz" +echo "Total Memory Size : $total_sz" +echo "Max Memory Capacity : `dmidecode |grep -i \"maximum capacity\"|sort -u|awk -F':' '{print $2}'|xargs`" +# DIMMs fequency +clock_speeds=`dmidecode | grep -i "Configured Clock Speed" | grep -i "Hz" |sort -u | awk -F':' '{print $2}'|xargs` +echo "Configed Clock Speed : $clock_speeds" +num_clock_type=`dmidecode | grep -i "Configured Clock Speed" | grep -i "Hz" |sort -u | wc -l` +if [ $num_clock_type -ne 1 ]; then + echo "Warning: Have more than 1 speed type, all DIMMs should have same fequency: $clock_speeds" +fi + +echo "-------------------------- Turbo Information --------------------------" +scaling_drive=`cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_driver` +echo "Scaling Driver : $scaling_drive" +if [ $scaling_drive == "intel_pstate" ] && [ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]; then + turbo=`cat /sys/devices/system/cpu/intel_pstate/no_turbo` + if [ $turbo -eq 1 ]; then + echo "Turbo Status : OFF" + else + echo "Turbo Status : ON" + fi +else + echo "Warning: Scaling driver is not intel_pstarte, maybe should enable it in BIOS" + echo "Turbo Status : Unknown" +fi +# cpu frequency +num_max_freq=`cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq| sort -u |wc -l` +num_min_freq=`cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq| sort -u |wc -l` +if [ $num_max_freq -ne 1 ]; then + echo "Error: the max_frequency of all CPU should be equal" +fi +if [ $num_min_freq -ne 1 ]; then + echo "Error: the min_frequency of all CPU should be equal" +fi +max_freq=`cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq| uniq|xargs` # kHz +max_freq=`awk 'BEGIN{printf "%.2f",('$max_freq' / 1000000)}'` # GHz +min_freq=`cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq| uniq|xargs` # kHz +min_freq=`awk 'BEGIN{printf "%.2f",('$min_freq' / 1000000)}'` # GHz +echo "CPU Max Frequency : $max_freq GHz" +echo "CPU Min Frequency : $min_freq GHz" +# cpu governor +num_governor=`cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor| sort -u |wc -l` +if [ $num_governor -ne 1 ]; then + echo "Error: the governor of all CPU should be the same" +fi +governor=`cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor| sort -u |uniq` +echo "CPU Freq Governor : $governor" + + +echo "========================= Software Information =========================" +echo "BIOS Release Date : `dmidecode | grep "Release Date"|awk -F ':' '{print $2}'|xargs`" +echo "OS Version : `cat /etc/redhat-release`" +echo "Kernel Release Version : `uname -r`" +echo "Kernel Patch Version : `uname -v`" +echo "GCC Version :`gcc --version | head -n 1|awk -F '\\\(GCC\\\)' '{print $2}'`" +if command -v cmake >/dev/null 2>&1; then + cmake_ver=`cmake --version | head -n 1 | awk -F 'version' '{print $2}'` +else + cmake_ver=" Not installed" +fi +echo "CMake Version :$cmake_ver" +echo "------------------ Environment Variables Information -------------------" +kmp_affinity=`env | grep KMP_AFFINITY` +omp_dynamic=`env | grep OMP_DYNAMIC` +omp_nested=`env | grep OMP_NESTED` +omp_num_threads=`env | grep OMP_NUM_THREADS` +mkl_num_threads=`env | grep MKL_NUM_THREADS` +mkl_dynamic=`env | grep MKL_DYNAMIC` +if [ ! $kmp_affinity ]; then kmp_affinity="unset"; fi +if [ ! $omp_dynamic ]; then omp_dynamic="unset"; fi +if [ ! $omp_nested ]; then omp_nested="unset"; fi +if [ ! $omp_num_threads ]; then omp_num_threads="unset"; fi +if [ ! $mkl_num_threads ]; then mkl_num_threads="unset"; fi +if [ ! $mkl_dynamic ]; then mkl_dynamic="unset"; fi +echo "KMP_AFFINITY : $kmp_affinity" +echo "OMP_DYNAMIC : $omp_dynamic" +echo "OMP_NESTED : $omp_nested" +echo "OMP_NUM_THREADS : $omp_num_threads" +echo "MKL_NUM_THREADS : $mkl_num_threads" +echo "MKL_DYNAMIC : $mkl_dynamic" +# Check if any MKL related libraries have been installed in LD_LIBRARY_PATH +for path in `echo $LD_LIBRARY_PATH | awk -F ':' '{for(i=1;i<=NF;++i)print $i}'`; do + mkldnn_found=`find $path -name "libmkldnn.so"` + if [ "$mkldnn_found" ]; then + echo "Found MKL-DNN : $mkldnn_found" + fi + mklml_found=`find $path -name "libmklml_intel.so"` + if [ "$mklml_found" ]; then + echo "Found MKLML : $mklml_found" + fi + iomp_found=`find $path -name "libiomp5.so"` + if [ "$iomp_found" ]; then + echo "Found IOMP : $iomp_found" + fi +done + +# dump all details for fully check +lscpu > lscpu.dump +dmidecode > dmidecode.dump + +# The expected result would be like: +# ========================= Hardware Information ========================= +# CPU Name : Intel(R) Xeon(R) Gold 6148M CPU @ 2.40GHz +# CPU Family : 6 +# Socket Number : 2 +# Cores Per Socket : 20 +# Total Physical Cores : 40 +# Total Virtual Cores : 40 +# Hyper Threading : OFF +# NUMA Nodes : 2 +# -------------------------- Memory Information -------------------------- +# Installed DIMM number : 12 +# Installed DIMMs Locator: +# CPU1_DIMM_A1 +# CPU1_DIMM_B1 +# CPU1_DIMM_C1 +# CPU1_DIMM_D1 +# CPU1_DIMM_E1 +# CPU1_DIMM_F1 +# CPU2_DIMM_A1 +# CPU2_DIMM_B1 +# CPU2_DIMM_C1 +# CPU2_DIMM_D1 +# CPU2_DIMM_E1 +# CPU2_DIMM_F1 +# Not installed DIMMs : +# CPU1_DIMM_A2 +# CPU1_DIMM_B2 +# CPU1_DIMM_C2 +# CPU1_DIMM_D2 +# CPU1_DIMM_E2 +# CPU1_DIMM_F2 +# CPU2_DIMM_A2 +# CPU2_DIMM_B2 +# CPU2_DIMM_C2 +# CPU2_DIMM_D2 +# CPU2_DIMM_E2 +# CPU2_DIMM_F2 +# DIMMs max slots : 24 +# Memory Size : 376G +# Swap Memory Size : 4.0G +# Total Memory Size : 380G +# Max Memory Capacity : 2304 GB +# Configed Clock Speed : 2666 MHz +# -------------------------- Turbo Information -------------------------- +# Scaling Driver : intel_pstate +# Turbo Status : ON +# CPU Max Frequency : 3.70 GHz +# CPU Min Frequency : 1.00 GHz +# CPU Freq Governor : performance +# ========================= Software Information ========================= +# BIOS Release Date : 03/10/2017 +# OS Version : CentOS Linux release 7.3.1611 (Core) +# Kernel Release Version : 3.10.0-514.el7.x86_64 +# Kernel Patch Version : #1 SMP Tue Nov 22 16:42:41 UTC 2016 +# GCC Version : 4.8.5 20150623 (Red Hat 4.8.5-11) +# CMake Version : 3.5.2 +# ------------------ Environment Variables Information ------------------- +# KMP_AFFINITY : unset +# OMP_DYNAMIC : unset +# OMP_NESTED : unset +# OMP_NUM_THREADS : unset +# MKL_NUM_THREADS : unset +# MKL_DYNAMIC : unset diff --git a/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile index 1a2d19e823541750830fcaa25f65b2f8e1ea2b49..c2f631bdf4ed52a5dfa3fbcf1157d0abbdeadb9b 100644 --- a/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile +++ b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile @@ -1,7 +1,7 @@ # Build this image: docker build -t mpi . # -FROM paddledev/paddle:0.10.0rc3 +FROM paddlepaddle/paddle:0.10.0rc3 ENV DEBIAN_FRONTEND noninteractive diff --git a/paddle/scripts/docker/README.md b/paddle/scripts/docker/README.md index f3a6f1dba7588c6b29c1dcae26ec134c1a7f937d..f0620498cfa6775ce2949cc02fa9f6c9529dec2e 100644 --- a/paddle/scripts/docker/README.md +++ b/paddle/scripts/docker/README.md @@ -20,7 +20,7 @@ binaries. ## Run The Build -### Build Evironments +### Build Environments The pre-built build environment images are: @@ -192,7 +192,7 @@ For developers who are interested in the C++ source code, please use -e "WOBOQ=O - The following command builds PaddlePaddle, generates HTML pages from C++ source code, and writes HTML pages into `$HOME/woboq_out` on the host: ```bash -docker run -v $PWD:/paddle -v $HOME/woboq_out:/woboq_out -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TEST=ON" -e "WOBOQ=ON" paddlepaddle/paddle:latest-dev +docker run -v $PWD:/paddle -v $HOME/woboq_out:/woboq_out -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TESTING=ON" -e "WOBOQ=ON" paddlepaddle/paddle:latest-dev ``` - You can open the generated HTML files in your Web browser. Or, if you want to run a Nginx container to serve them for a wider audience, you can run: diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index fda2a2f1b764106a7a108e8c56bc90ce3459e9b5..e43b9c218a3ecb9e7f20fb7e8b14a85a29947eef 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -16,11 +16,13 @@ function cmake_gen() { echo "using python abi: $1" if [ "$1" == "cp27-cp27m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs4/lib:} + export PATH=/opt/python/cp27-cp27m/bin/:${PATH} PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27m/bin/python -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27m/include/python2.7 -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs2/lib/libpython2.7.so" elif [ "$1" == "cp27-cp27mu" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs2/lib:} + export PATH=/opt/python/cp27-cp27mu/bin/:${PATH} PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27mu/bin/python -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27mu/include/python2.7 -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs4/lib/libpython2.7.so" @@ -34,6 +36,7 @@ function cmake_gen() { ${PYTHON_FLAGS} -DWITH_DOC=OFF -DWITH_GPU=${WITH_GPU:-OFF} + -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} -DWITH_MKL=${WITH_MKL:-ON} -DWITH_AVX=${WITH_AVX:-OFF} -DWITH_GOLANG=${WITH_GOLANG:-ON} @@ -55,6 +58,7 @@ EOF ${PYTHON_FLAGS} \ -DWITH_DOC=OFF \ -DWITH_GPU=${WITH_GPU:-OFF} \ + -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} \ -DWITH_MKL=${WITH_MKL:-ON} \ -DWITH_AVX=${WITH_AVX:-OFF} \ -DWITH_GOLANG=${WITH_GOLANG:-ON} \ @@ -109,7 +113,10 @@ EOF -DWITH_SWIG_PY=ON \ -DWITH_STYLE_CHECK=OFF make -j `nproc` gen_proto_py + make -j `nproc` paddle_python make -j `nproc` paddle_docs paddle_docs_cn + make -j `nproc` print_operators_doc + paddle/pybind/print_operators_doc > doc/en/html/operators.json popd fi @@ -171,7 +178,7 @@ EOF # run paddle version to install python packages first RUN apt-get update &&\ ${NCCL_DEPS}\ - apt-get install -y wget python-pip && pip install -U pip && \ + apt-get install -y wget python-pip dmidecode && pip install -U pip && \ pip install /*.whl; apt-get install -f -y && \ apt-get clean -y && \ rm -f /*.whl && \ diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index d71cb84df3785008ea5793519fc26a174e1b95f7..43d2d1b410fa86dc0ab213cba0c2a488770ea1c7 100755 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -140,7 +140,11 @@ else: sys.exit(0) EOF -cpu_config +if [ "`uname -s`" == "Linux" ]; then + # only support on linux yet, with mac can use v2 + cpu_config +fi + # echo $KMP_AFFINITY $OMP_DYNAMIC case "$1" in diff --git a/paddle/scripts/tools/build_docs/build_docs.sh b/paddle/scripts/tools/build_docs/build_docs.sh index c6cbbc4eef94fb2e2fc3c1ce71734fbb23fc22d7..f9bc8bf63ae9afdfca1ff660bc83e62e71f03005 100755 --- a/paddle/scripts/tools/build_docs/build_docs.sh +++ b/paddle/scripts/tools/build_docs/build_docs.sh @@ -5,4 +5,4 @@ docker run --rm \ -e "WITH_AVX=ON" \ -e "WITH_DOC=ON" \ -e "WOBOQ=ON" \ - ${1:-"paddledev/paddle:dev"} + ${1:-"paddlepaddle/paddle:latest-dev"} diff --git a/paddle/scripts/travis/build_doc.sh b/paddle/scripts/travis/build_doc.sh index 28d82343ed32273740d0c52d0451681e43b3675e..ff0bac6a0740111dfa1a1440daaf1ceaf3a7b0d8 100755 --- a/paddle/scripts/travis/build_doc.sh +++ b/paddle/scripts/travis/build_doc.sh @@ -8,11 +8,15 @@ cd $TRAVIS_BUILD_DIR/build # Compile Documentation only. cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON make -j `nproc` gen_proto_py +make -j `nproc` paddle_python make -j `nproc` paddle_docs paddle_docs_cn +make -j `nproc` print_operators_doc +paddle/pybind/print_operators_doc > doc/en/html/operators.json # check websites for broken links -linkchecker doc/en/html/index.html -linkchecker doc/cn/html/index.html +# It will be failed now! +#linkchecker doc/en/html/index.html +#linkchecker doc/cn/html/index.html # Parse Github URL REPO=`git config remote.origin.url` diff --git a/paddle/testing/CMakeLists.txt b/paddle/testing/CMakeLists.txt index 4245df5ab72bf0fd67261818b307f0babdb5d685..8132742749e4a622720c66692c8d09815714ebea 100644 --- a/paddle/testing/CMakeLists.txt +++ b/paddle/testing/CMakeLists.txt @@ -5,4 +5,8 @@ if(WITH_TESTING) add_dependencies(paddle_test_main paddle_proto ${external_project_dependencies}) add_library(paddle_test_util STATIC TestUtil.cpp) add_dependencies(paddle_test_util paddle_proto ${external_project_dependencies}) + if(NOT MOBILE_INFERENCE) + add_library(paddle_gtest_main STATIC paddle_gtest_main.cc) + add_dependencies(paddle_gtest_main paddle_memory gtest gflags) + endif() endif() diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc new file mode 100644 index 0000000000000000000000000000000000000000..a491322b7e533f7a9c263a249494440269391003 --- /dev/null +++ b/paddle/testing/paddle_gtest_main.cc @@ -0,0 +1,39 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "gflags/gflags.h" +#include "gtest/gtest.h" +#include "paddle/memory/memory.h" + +int main(int argc, char** argv) { + std::vector new_argv; + std::string gflags_env; + new_argv.push_back(argv[0]); +#ifdef PADDLE_WITH_CUDA + new_argv.push_back( + strdup("--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory")); +#else + new_argv.push_back(strdup("--tryfromenv=use_pinned_memory")); +#endif + int new_argc = static_cast(new_argv.size()); + char** new_argv_address = new_argv.data(); + google::ParseCommandLineFlags(&new_argc, &new_argv_address, false); + testing::InitGoogleTest(&argc, argv); + paddle::memory::Used(paddle::platform::CPUPlace()); +#ifdef PADDLE_WITH_CUDA + paddle::memory::Used(paddle::platform::GPUPlace(0)); +#endif + return RUN_ALL_TESTS(); +} diff --git a/paddle/trainer/CMakeLists.txt b/paddle/trainer/CMakeLists.txt index 3d471a0c01ca17cb98272159baf6d489c18824d5..72911695bd4959d73d783897b0c5e674454c30bc 100644 --- a/paddle/trainer/CMakeLists.txt +++ b/paddle/trainer/CMakeLists.txt @@ -54,7 +54,7 @@ if(WITH_TESTING) add_subdirectory(tests) endif() -if(NOT WITH_C_API) +if(NOT MOBILE_INFERENCE) add_paddle_exe(paddle_trainer TrainerMain.cpp) add_paddle_exe(paddle_merge_model MergeModel.cpp) @@ -74,7 +74,5 @@ endif() if(WITH_GOLANG) add_dependencies(paddle_trainer_lib paddle_pserver_cclient) target_link_libraries(paddle_trainer_lib paddle_pserver_cclient) - if(NOT WITH_C_API) - target_link_libraries(paddle_trainer paddle_pserver_cclient) - endif() + target_link_libraries(paddle_trainer paddle_pserver_cclient) endif(WITH_GOLANG) diff --git a/paddle/trainer/tests/CMakeLists.txt b/paddle/trainer/tests/CMakeLists.txt index 2739878b7f2936ea2da689da0b4caa780516ccc1..bd518d8598f5aa7c32298ed2110a96a2743536b3 100644 --- a/paddle/trainer/tests/CMakeLists.txt +++ b/paddle/trainer/tests/CMakeLists.txt @@ -1,19 +1,17 @@ -################# test_Compare ############################ -add_unittest_without_exec(test_Compare - test_Compare.cpp) -add_test(NAME test_Compare - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python - ${CMAKE_CURRENT_BINARY_DIR}/test_Compare - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) +set(PYTHON_PATH + ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d + ${PADDLE_SOURCE_DIR}/python/:${PADDLE_SOURCE_DIR}/paddle/trainer/tests) +function(trainer_test TARGET) + add_unittest_without_exec(${TARGET} ${TARGET}.cpp) + add_test(NAME ${TARGET} + COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/${TARGET} + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) +endfunction() -################# test_Trainer ########################### -add_unittest_without_exec(test_Trainer - test_Trainer.cpp) -add_test(NAME test_Trainer - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/ - ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/ - ${CMAKE_CURRENT_BINARY_DIR}/test_Trainer - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) +trainer_test(test_Compare) +trainer_test(test_PyDataProviderWrapper) +trainer_test(test_recurrent_machine_generation) +trainer_test(test_Trainer) ############### test_TrainerOnePass ########################## if(WITH_PYTHON) @@ -22,32 +20,13 @@ if(WITH_PYTHON) add_unittest_without_exec(test_TrainerOnePass test_TrainerOnePass.cpp) add_test(NAME test_TrainerOnePass - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python/:${PADDLE_SOURCE_DIR}/paddle/trainer/tests - ${PADDLE_SOURCE_DIR}/paddle/.set_port.sh -p port ${CMAKE_CURRENT_BINARY_DIR}/test_TrainerOnePass + COMMAND ${PYTHON_PATH} ${PADDLE_SOURCE_DIR}/paddle/.set_port.sh -p port + ${CMAKE_CURRENT_BINARY_DIR}/test_TrainerOnePass WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) endif() -################# test_recurrent_machine_generation ############### -add_unittest_without_exec(test_recurrent_machine_generation - test_recurrent_machine_generation.cpp) -add_test(NAME test_recurrent_machine_generation - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/ - ${CMAKE_CURRENT_BINARY_DIR}/test_recurrent_machine_generation - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) - -#################### test_PyDataProviderWrapper ######################### -add_unittest_without_exec(test_PyDataProviderWrapper - test_PyDataProviderWrapper.cpp) - -add_test(NAME test_PyDataProviderWrapper - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python/:${PADDLE_SOURCE_DIR}/paddle/trainer/tests - ${CMAKE_CURRENT_BINARY_DIR}/test_PyDataProviderWrapper - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) - #################### test_config_parser ######################### add_test(NAME test_config_parser - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/ - ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/paddle/trainer/tests/config_parser_test.py + COMMAND ${PYTHON_PATH} ${PYTHON_EXECUTABLE} + ${PADDLE_SOURCE_DIR}/paddle/trainer/tests/config_parser_test.py WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) diff --git a/paddle/utils/Flags.cpp b/paddle/utils/Flags.cpp index 8f100f02e90bcbc7fdcf6f053aec6f95cfb09c1a..9a7dc0e35622383a190f8b3a80736e6b42c9c959 100644 --- a/paddle/utils/Flags.cpp +++ b/paddle/utils/Flags.cpp @@ -20,7 +20,7 @@ DEFINE_bool(use_gpu, false, "Only support CPU training"); DEFINE_bool(use_gpu, true, "Whether to use GPU for training"); #endif -#ifdef PADDLE_USE_MKLDNN +#ifdef PADDLE_WITH_MKLDNN // TODO(TJ): change to true when MKLDNN layers support multi-inputs DEFINE_bool(use_mkldnn, false, "Default still keep use CPU training"); #else diff --git a/proto/ModelConfig.proto b/proto/ModelConfig.proto index e2f5592248fd0b6166c2d11af02cef7815673def..1fbdd5bbd82a0ae15f620fa11a14ac3126e52838 100644 --- a/proto/ModelConfig.proto +++ b/proto/ModelConfig.proto @@ -139,6 +139,8 @@ message PoolConfig { optional uint32 output_z = 16 [ default = 1 ]; optional uint32 img_size_z = 17 [ default = 1 ]; optional uint32 padding_z = 18 [ default = 1 ]; + + optional bool exclude_mode = 19; } message SppConfig { @@ -544,6 +546,9 @@ message LayerConfig { // for batch normalization layer // The small constant added to the variance to improve numeric stability. optional double epsilon = 60 [ default = 0.00001 ]; + + // for factorization machine layer + optional uint32 factor_size = 61; } message EvaluatorConfig { diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index c8632295a25b160513a8e154bf1a5453c0005031..6f589e916979584897863db72924c885c258d4b2 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -33,6 +33,12 @@ if(WITH_MKLDNN) list(APPEND MKL_DEPENDS mkldnn) endif() +if(WITH_GPU) + SET(PACKAGE_NAME "paddlepaddle-gpu") +else() + SET(PACKAGE_NAME "paddlepaddle") +endif() + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py) diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index f662d6826321eb840739382558f76327d27b5847..1030c94e16376c326cb8b32926b8c47625cd38f0 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -11,3 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +try: + from version import full_version as __version__ + from version import commit as __git_commit__ +except ImportError: + import sys + sys.stderr.write('''Warning with import paddle: you should not + import paddle from the source directory; please install paddlepaddle*.whl firstly.''' + ) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index cfe2a34a1f34a9c828486a7a6dbe320f230bb986..239fe4204b20a37a0869ba1e0e99adf4293dac7e 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1233,7 +1233,7 @@ def parse_bilinear(bilinear, input_layer_name, bilinear_conf): bilinear_conf.out_size_y = bilinear.out_size_y -def parse_pool(pool, input_layer_name, pool_conf, ceil_mode): +def parse_pool(pool, input_layer_name, pool_conf, ceil_mode, exclude_mode): pool_conf.pool_type = pool.pool_type config_assert(pool.pool_type in [ 'max-projection', 'avg-projection', 'max-pool-with-mask', 'cudnn-max-pool', 'cudnn-avg-pool' @@ -1262,6 +1262,8 @@ def parse_pool(pool, input_layer_name, pool_conf, ceil_mode): pool_conf.output_y = cnn_output_size(pool_conf.img_size_y, pool_conf.size_y, pool_conf.padding_y, pool_conf.stride_y, not ceil_mode) + if exclude_mode != None: + pool_conf.exclude_mode = exclude_mode def parse_pool3d(pool, input_layer_name, pool_conf, ceil_mode): @@ -2287,11 +2289,17 @@ class Conv3DLayer(Conv3DLayerBase): class NormLayer(LayerBase): def __init__(self, name, inputs, **xargs): super(NormLayer, self).__init__(name, 'norm', 0, inputs=inputs, **xargs) + use_mkldnn = bool(int(g_command_config_args.get("use_mkldnn", 0))) + use_mkldnn = True if use_mkldnn and self.inputs[ + 0].norm.norm_type == 'cmrnorm-projection' else False + self.config.type = 'mkldnn_lrn' if use_mkldnn else self.config.type for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) norm_conf = self.config.inputs[input_index].norm_conf parse_norm(self.inputs[input_index].norm, input_layer.name, norm_conf) + norm_conf.scale = self.inputs[ + input_index].norm.scale if use_mkldnn else norm_conf.scale self.set_cnn_layer(name, norm_conf.output_y, norm_conf.output_x, norm_conf.channels, False) if norm_conf.norm_type == "cross-channel-norm": @@ -2303,7 +2311,8 @@ class NormLayer(LayerBase): class PoolLayer(LayerBase): layer_type = 'pool' - def __init__(self, name, inputs, ceil_mode=True, **xargs): + def __init__(self, name, inputs, ceil_mode=True, exclude_mode=None, + **xargs): use_mkldnn = int(g_command_config_args.get("use_mkldnn", 0)) if self.layer_type == "mkldnn_pool": config_assert(use_mkldnn, "mkldnn_pool only support MKLDNN") @@ -2314,7 +2323,7 @@ class PoolLayer(LayerBase): input_layer = self.get_input_layer(input_index) pool_conf = self.config.inputs[input_index].pool_conf parse_pool(self.inputs[input_index].pool, input_layer.name, - pool_conf, ceil_mode) + pool_conf, ceil_mode, exclude_mode) self.set_cnn_layer(name, pool_conf.output_y, pool_conf.output_x, pool_conf.channels) @@ -2400,6 +2409,14 @@ class CropLayer(LayerBase): image_conf.img_size_y = input_layer.height image_conf.channels = input_layer.size / (input_layer.width * input_layer.height) + # only support for 4-dims inputs and NCHW order + if (len(self.config.inputs) == 2): + self.set_layer_height_width( + self.get_input_layer(1).height, self.get_input_layer(1).width) + self.set_layer_size(self.get_input_layer(1).size) + else: + self.set_layer_height_width(shape[-2], shape[-1]) + self.set_layer_size(reduce(lambda x, y: x * y, shape[1:])) @config_layer('batch_norm') @@ -3849,6 +3866,26 @@ class SwitchOrderLayer(LayerBase): name, 'switch_order', 0, inputs=inputs, **xargs) self.config.reshape_conf.height_axis.extend(reshape['height']) self.config.reshape_conf.width_axis.extend(reshape['width']) + input_layer = self.get_input_layer(0) + if reshape is None: + self.set_layer_size(input_layer.size) + else: + in_h = input_layer.height + in_w = input_layer.width + out_dims = None + if input_layer.has_depth(): + in_d = input_layer.depth + in_c = input_layer.size / in_h / in_w / in_d + # batch_size, depth, height, width, channel + out_dims = [0, in_d, in_h, in_w, in_c] + else: + in_c = input_layer.size / in_h / in_w + # batch_size, height, width, channel + out_dims = [0, in_h, in_w, in_c] + # Because (reshape['width'][0] > 0) always be true. + # So out_dims[0] won't be used. + size = reduce(lambda x, y: x * y, out_dims[reshape['width'][0]:]) + self.set_layer_size(size) @config_layer('scale_sub_region') @@ -3870,6 +3907,21 @@ class ScaleSubRegionLayer(LayerBase): image_conf.channels) +@config_layer('factorization_machine') +class FactorizationMachineLayer(LayerBase): + def __init__(self, name, inputs, factor_size, **xargs): + super(FactorizationMachineLayer, self).__init__( + name, 'factorization_machine', size=1, inputs=inputs, **xargs) + config_assert( + len(self.inputs) == 1, + 'factorization machine layer must have one and only one input.') + self.config.factor_size = factor_size + input_layer = self.get_input_layer(0) + psize = input_layer.size * factor_size + dims = [input_layer.size, factor_size] + self.create_input_parameter(0, psize, dims) + + # Deprecated, use a new layer specific class instead @config_func def Layer(name, type, **xargs): diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 469e667e80900b26578db6199e6426be8d0e5945..7e118b24a4330af27ea6aa893fd87985b4443cdb 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -21,7 +21,7 @@ from .activations import LinearActivation, SigmoidActivation, TanhActivation, \ ReluActivation, IdentityActivation, SoftmaxActivation, BaseActivation from .evaluators import * from .poolings import MaxPooling, AvgPooling, MaxWithMaskPooling, BasePoolingType, \ - CudnnAvgPooling, CudnnMaxPooling + CudnnAvgPooling, CudnnAvgInclPadPooling, CudnnMaxPooling from .attrs import * from .default_decorators import * @@ -148,6 +148,7 @@ __all__ = [ 'resize_layer', 'sub_seq_layer', 'scale_sub_region_layer', + 'factorization_machine', ] @@ -264,6 +265,8 @@ class LayerType(object): SCALE_SUB_REGION_LAYER = 'scale_sub_region' + FACTORIZATION_MACHINE = 'factorization_machine' + @staticmethod def is_layer_type(type_name): """ @@ -1516,34 +1519,33 @@ def lstmemory(input, NOTE: This is a low level user interface. You can use network.simple_lstm to config a simple plain lstm layer. - Please refer to **Generating Sequences With Recurrent Neural Networks** for - more details about LSTM. - - Link_ goes as below. - - .. _Link: http://arxiv.org/abs/1308.0850 + Reference: + `Generating Sequences With Recurrent Neural Networks + `_ - :param name: The lstmemory layer name. + :param name: The name of this layer. It is optional. :type name: basestring - :param size: DEPRECATED. size of the lstm cell + :param size: DEPRECATED. The dimension of the lstm cell. :type size: int :param input: The input of this layer. :type input: LayerOutput - :param reverse: is sequence process reversed or not. + :param reverse: Whether the input sequence is processed in a reverse order. :type reverse: bool :param act: Activation type. TanhActivation is the default activation. :type act: BaseActivation - :param gate_act: gate activation type, SigmoidActivation by default. + :param gate_act: Activation type of this layer's gates. SigmoidActivation is the + default activation. :type gate_act: BaseActivation - :param state_act: state activation type, TanhActivation by default. + :param state_act: Activation type of the state. TanhActivation is the default activation. :type state_act: BaseActivation :param bias_attr: The bias attribute. If the parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If the parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param param_attr: Parameter Attribute. - :type param_attr: ParameterAttribute | None | False - :param layer_attr: Extra Layer attribute + :param param_attr: The parameter attribute. See ParameterAttribute for details. + :type param_attr: ParameterAttribute + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute | None :return: LayerOutput object. :rtype: LayerOutput @@ -1632,14 +1634,14 @@ def grumemory(input, h_t = (1 - z_t) h_{t-1} + z_t {\\tilde{h_t}} NOTE: In PaddlePaddle's implementation, the multiplication operations - :math:`W_{r}x_{t}`, :math:`W_{z}x_{t}` and :math:`W x_t` are not computed in - gate_recurrent layer. Consequently, an additional mixed_layer with + :math:`W_{r}x_{t}`, :math:`W_{z}x_{t}` and :math:`W x_t` are not performed + in gate_recurrent layer. Consequently, an additional mixed_layer with full_matrix_projection or a fc_layer must be included before grumemory is called. - More details can be found by referring to `Empirical Evaluation of Gated - Recurrent Neural Networks on Sequence Modeling. - `_ + Reference: + `Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling + `_ The simple usage is: @@ -1647,28 +1649,29 @@ def grumemory(input, gru = grumemory(input) - :param name: The gru layer name. - :type name: None | basestring + :param name: The name of this layer. It is optional. + :type name: basestring :param input: The input of this layer. :type input: LayerOutput. - :param size: DEPRECATED. size of the gru cell + :param size: DEPRECATED. The dimension of the gru cell. :type size: int - :param reverse: Whether sequence process is reversed or not. + :param reverse: Whether the input sequence is processed in a reverse order. :type reverse: bool :param act: Activation type, TanhActivation is the default. This activation affects the :math:`{\\tilde{h_t}}`. :type act: BaseActivation - :param gate_act: gate activation type, SigmoidActivation by default. - This activation affects the :math:`z_t` and :math:`r_t`. It is the - :math:`\\sigma` in the above formula. + :param gate_act: Activation type of this layer's two gates. SigmoidActivation is + the default activation. This activation affects the :math:`z_t` + and :math:`r_t`. It is the :math:`\\sigma` in the above formula. :type gate_act: BaseActivation :param bias_attr: The bias attribute. If the parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If the parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param param_attr: Parameter Attribute. - :type param_attr: ParameterAttribute | None | False - :param layer_attr: Extra Layer attribute + :param param_attr: The parameter attribute. See ParameterAttribute for details. + :type param_attr: ParameterAttribute + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute | None :return: LayerOutput object. :rtype: LayerOutput @@ -1712,10 +1715,10 @@ def last_seq(input, """ Get Last Timestamp Activation of a sequence. - If stride > 0, this layer slides a window whose size is determined by stride, - and return the last value of the window as the output. Thus, a long sequence - will be shorten. Note that for sequence with sub-sequence, the default value - of stride is -1. + If stride > 0, this layer will slide a window whose size is determined by stride, + and return the last value of the sequence in the window as the output. Thus, a + long sequence will be shortened. Note that for sequence with sub-sequence, the + default value of stride is -1. The simple usage is: @@ -1724,14 +1727,16 @@ def last_seq(input, seq = last_seq(input=layer) :param agg_level: Aggregated level + :type agg_level: AggregateLevel :param name: The name of this layer. It is optional. :type name: basestring :param input: The input of this layer. :type input: LayerOutput :param stride: The step size between successive pooling regions. - :type stride: Int - :param layer_attr: extra layer attributes. - :type layer_attr: ExtraLayerAttribute. + :type stride: int + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -1768,10 +1773,10 @@ def first_seq(input, """ Get First Timestamp Activation of a sequence. - If stride > 0, this layer slides a window whose size is determined by stride, - and return the first value of the window as the output. Thus, a long sequence - will be shorten. Note that for sequence with sub-sequence, the default value - of stride is -1. + If stride > 0, this layer will slide a window whose size is determined by stride, + and return the first value of the sequence in the window as the output. Thus, a + long sequence will be shortened. Note that for sequence with sub-sequence, the + default value of stride is -1. The simple usage is: @@ -1780,13 +1785,15 @@ def first_seq(input, seq = first_seq(input=layer) :param agg_level: aggregation level + :type agg_level: AggregateLevel :param name: The name of this layer. It is optional. :type name: basestring :param input: The input of this layer. :type input: LayerOutput :param stride: The step size between successive pooling regions. - :type stride: Int - :param layer_attr: extra layer attributes. + :type stride: int + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput @@ -1844,8 +1851,8 @@ def expand_layer(input, expand_level=ExpandLevel.FROM_NO_SEQUENCE, layer_attr=None): """ - A layer for "Expand Dense data or (sequence data where the length of each - sequence is one) to sequence data." + A layer for expanding dense data or (sequence data where the length of each + sequence is one) to sequence data. The example usage is: @@ -1857,7 +1864,9 @@ def expand_layer(input, :param input: The input of this layer. :type input: LayerOutput - :param expand_as: Expand as this layer's sequence info. + :param expand_as: Expand the input according to this layer's sequence infomation. And + after the operation, the input expanded will have the same number of + elememts as this layer. :type expand_as: LayerOutput :param name: The name of this layer. It is optional. :type name: basestring @@ -1865,9 +1874,10 @@ def expand_layer(input, whose type is not ParameterAttribute, no bias is defined. If the parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param expand_level: whether input layer is timestep(default) or sequence. + :param expand_level: Whether the input layer is a sequence or the element of a sequence. :type expand_level: ExpandLevel - :param layer_attr: extra layer attributes. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute. :return: LayerOutput object. :rtype: LayerOutput @@ -2706,7 +2716,8 @@ def img_pool_layer(input, pool_size_y=None, stride_y=None, padding_y=None, - ceil_mode=True): + ceil_mode=True, + exclude_mode=None): """ Image pooling Layer. @@ -2718,15 +2729,17 @@ def img_pool_layer(input, .. math:: - w = 1 + int(ceil(input\_width + 2 * padding - pool\_size) / float(stride)) - h = 1 + int(ceil(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y)) + w & = 1 + \\frac{ceil(input\_width + 2 * padding - pool\_size)}{stride} + + h & = 1 + \\frac{ceil(input\_height + 2 * padding\_y - pool\_size\_y)}{stride\_y} - ceil_mode=False: .. math:: - w = 1 + int(floor(input\_width + 2 * padding - pool\_size) / float(stride)) - h = 1 + int(floor(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y)) + w & = 1 + \\frac{floor(input\_width + 2 * padding - pool\_size)}{stride} + + h & = 1 + \\frac{floor(input\_height + 2 * padding\_y - pool\_size\_y)}{stride\_y} The example usage is: @@ -2770,10 +2783,15 @@ def img_pool_layer(input, :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for details. :type layer_attr: ExtraLayerAttribute - :param ceil_mode: Wether to use the ceil function to calculate output height and width. + :param ceil_mode: Whether to use the ceil function to calculate output height and width. True is the default. If it is set to False, the floor function will be used. :type ceil_mode: bool + :param exclude_mode: Whether to exclude the padding cells when calculating, but only + work when pool_type is AvgPooling. If None, also exclude the padding + cells. If use cudnn, use CudnnAvgPooling or CudnnAvgInclPadPooling + as pool_type to identify the mode. + :type exclude_mode: bool :return: LayerOutput object. :rtype: LayerOutput """ @@ -2787,7 +2805,7 @@ def img_pool_layer(input, pool_type.name = 'avg' assert type(pool_type) in [AvgPooling, MaxPooling, MaxWithMaskPooling, CudnnAvgPooling, - CudnnMaxPooling], \ + CudnnMaxPooling, CudnnAvgInclPadPooling], \ "only (Cudnn)AvgPooling, (Cudnn)MaxPooling, MaxWithMaskPooling are supported" type_name = pool_type.name + '-projection' \ @@ -2816,6 +2834,7 @@ def img_pool_layer(input, padding_y=padding_y)) ], ceil_mode=ceil_mode, + exclude_mode=exclude_mode, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput( name, @@ -2853,17 +2872,21 @@ def img_pool3d_layer(input, .. math:: - w = 1 + int(ceil(input\_width + 2 * padding - pool\_size) / float(stride)) - h = 1 + int(ceil(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y)) - d = 1 + int(ceil(input\_depth + 2 * padding\_z - pool\_size\_z) / float(stride\_z)) + w & = 1 + \\frac{ceil(input\_width + 2 * padding - pool\_size)}{stride} + + h & = 1 + \\frac{ceil(input\_height + 2 * padding\_y - pool\_size\_y)}{stride\_y} + + d & = 1 + \\frac{ceil(input\_depth + 2 * padding\_z - pool\_size\_z)}{stride\_z} - ceil_mode=False: .. math:: - w = 1 + int(floor(input\_width + 2 * padding - pool\_size) / float(stride)) - h = 1 + int(floor(input\_height + 2 * padding\_y - pool\_size\_y) / float(stride\_y)) - d = 1 + int(floor(input\_depth + 2 * padding\_z - pool\_size\_z) / float(stride\_z)) + w & = 1 + \\frac{floor(input\_width + 2 * padding - pool\_size)}{stride} + + h & = 1 + \\frac{floor(input\_height + 2 * padding\_y - pool\_size\_y)}{stride\_y} + + d & = 1 + \\frac{floor(input\_depth + 2 * padding\_z - pool\_size\_z)}{stride\_z} The example usage is: @@ -2985,8 +3008,8 @@ def spp_layer(input, A layer performs spatial pyramid pooling. Reference: - Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition - https://arxiv.org/abs/1406.4729 + `Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition + `_ The example usage is: @@ -3087,8 +3110,8 @@ def img_cmrnorm_layer(input, Response normalization across feature maps. Reference: - ImageNet Classification with Deep Convolutional Neural Networks - http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf + `ImageNet Classification with Deep Convolutional Neural Networks + `_ The example usage is: @@ -3154,9 +3177,9 @@ def batch_norm_layer(input, y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift Reference: - Batch Normalization: Accelerating Deep Network Training by Reducing + `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift - http://arxiv.org/abs/1502.03167 + `_ The example usage is: @@ -3294,7 +3317,7 @@ def row_l2_norm_layer(input, name=None, layer_attr=None): A layer for L2-normalization in each row. .. math:: - out[i] = \frac{in[i]}{\sqrt{\sum_{k=1}^N in[k]^{2}}} + out[i] = \\frac{in[i]} {\\sqrt{\\sum_{k=1}^N in[k]^{2}}} where the size of :math:`in` is (batchSize x dataDim) , and the size of :math:`out` is a (batchSize x dataDim) . @@ -5413,18 +5436,28 @@ def maxout_layer(input, groups, num_channels=None, name=None, layer_attr=None): to be devided by groups. Reference: - Maxout Networks - http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf - Multi-digit Number Recognition from Street View Imagery using Deep Convolutional Neural Networks - https://arxiv.org/pdf/1312.6082v4.pdf + `Maxout Networks + `_ + `Multi-digit Number Recognition from Street View Imagery using Deep Convolutional Neural Networks + `_ + .. math:: - y_{si+j} = \max_k x_{gsi + sk + j} - g = groups - s = input.size / num_channels - 0 \le i < num_channels / groups - 0 \le j < s - 0 \le k < groups + + & out = \max_k (in[n, k, o_c , s]) + + & out_{i * s + j} = \max_k in_{ k * o_{c} * s + i * s + j} + + & s = \\frac{input.size}{ num\_channels} + + & o_{c} = \\frac{num\_channels}{groups} + + & 0 \le i < o_{c} + + & 0 \le j < s + + & 0 \le k < groups + The simple usage is: @@ -5481,9 +5514,9 @@ def ctc_layer(input, alignment between the inputs and the target labels is unknown. Reference: - Connectionist Temporal Classification: Labelling Unsegmented Sequence Data + `Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks - http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf + `_ Note: Considering the 'blank' label needed by CTC, you need to use (num_classes + 1) @@ -5555,9 +5588,9 @@ def warp_ctc_layer(input, install it to :code:`third_party/install/warpctc` directory. Reference: - Connectionist Temporal Classification: Labelling Unsegmented Sequence Data + `Connectionist Temporal Classification: Labelling Unsegmented Sequence Data with Recurrent Neural Networks - http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf + `_ Note: - Let num_classes represents the category number. Considering the 'blank' @@ -5777,8 +5810,8 @@ def nce_layer(input, Noise-contrastive estimation. Reference: - A fast and simple algorithm for training neural probabilistic language - models. https://www.cs.toronto.edu/~amnih/papers/ncelm.pdf + `A fast and simple algorithm for training neural probabilistic language + models. `_ The example usage is: @@ -5893,8 +5926,8 @@ def rank_cost(left, A cost Layer for learning to rank using gradient descent. Reference: - Learning to Rank using Gradient Descent - http://research.microsoft.com/en-us/um/people/cburges/papers/ICML_ranking.pdf + `Learning to Rank using Gradient Descent + `_ .. math:: @@ -6161,9 +6194,11 @@ def huber_regression_cost(input, Given a prediction f(x), a label y and :math:`\delta`, the loss function is defined as: - .. math: - loss = 0.5*\left ( y-f(x) \right )^2, \left | y-f(x) \right |\leq \delta - loss = \delta \left | y-f(x) \right |-0.5\delta ^2, otherwise + .. math:: + + loss = 0.5*(y-f(x))^{2}, | y-f(x) | < \delta + + loss = \delta | y-f(x) | - 0.5 \delta ^2, otherwise The example usage is: @@ -6210,12 +6245,14 @@ def huber_classification_cost(input, """ For classification purposes, a variant of the Huber loss called modified Huber is sometimes used. Given a prediction f(x) (a real-valued classifier score) and - a true binary class label :math:`y\in \left \{-1, 1 \right \}`, the modified Huber + a true binary class label :math:`y\in \{-1, 1 \}`, the modified Huber loss is defined as: .. math: - loss = \max \left ( 0, 1-yf(x) \right )^2, yf(x)\geq 1 - loss = -4yf(x), \text{otherwise} + + loss = \max ( 0, 1-yf(x) )^2, yf(x) \geq -1 + + loss = -4yf(x), otherwise The example usage is: @@ -6429,8 +6466,8 @@ def smooth_l1_cost(input, label, name=None, coeff=1.0, layer_attr=None): smooth_{L1}(x) = \\begin{cases} 0.5x^2& \\text{if} \\ |x| < 1 \\\\ |x|-0.5& \\text{otherwise} \end{cases} Reference: - Fast R-CNN - https://arxiv.org/pdf/1504.08083v2.pdf + `Fast R-CNN + `_ The example usage is: @@ -6636,8 +6673,8 @@ def prelu_layer(input, The Parametric Relu activation that actives outputs with a learnable weight. Reference: - Delving Deep into Rectifiers: Surpassing Human-Level Performance on - ImageNet Classification http://arxiv.org/pdf/1502.01852v1.pdf + `Delving Deep into Rectifiers: Surpassing Human-Level Performance on + ImageNet Classification `_ .. math:: z_i &\\quad if \\quad z_i > 0 \\\\ @@ -6733,8 +6770,8 @@ def gated_unit_layer(input, product between :match:`X'` and :math:`\sigma` is finally returned. Reference: - Language Modeling with Gated Convolutional Networks - https://arxiv.org/abs/1612.08083 + `Language Modeling with Gated Convolutional Networks + `_ .. math:: y=\\text{act}(X \cdot W + b)\otimes \sigma(X \cdot V + c) @@ -6870,6 +6907,7 @@ def crop_layer(input, offset, axis=2, shape=None, name=None, layer_attr=None): :param input: The input of this layer. If two inputs are given, the second one will be regarded as the reference. + And the input must be 4-dims and in NCHW order. :type input: LayerOutput | Sequence :param offset: The crop offset. :type offset: Sequence @@ -6959,7 +6997,7 @@ def clip_layer(input, min, max, name=None): .. math:: - out[i] = \min\left(\max\left(in[i],p_{1}\right),p_{2}\right) + out[i] = \min (\max (in[i],p_{1} ),p_{2} ) .. code-block:: python @@ -7403,3 +7441,73 @@ def scale_sub_region_layer(input, indices, value, name=None): parents=[input, indices], num_filters=input.num_filters, size=input.size) + + +@wrap_name_default() +@wrap_act_default(act=LinearActivation()) +@wrap_param_attr_default() +@layer_support() +def factorization_machine(input, + factor_size, + act=None, + name=None, + param_attr=None, + layer_attr=None): + """ + The Factorization Machine models pairwise feature interactions as inner + product of the learned latent vectors corresponding to each input feature. + The Factorization Machine can effectively capture feature interactions + especially when the input is sparse. + + This implementation only consider the 2-order feature interactions using + Factorization Machine with the formula: + + .. math:: + y = \sum_{i=1}^{n-1}\sum_{j=i+1}^n\langle v_i, v_j \rangle x_i x_j + + Note: + X is the input vector with size n. V is the factor matrix. Each row of V + is the latent vector corresponding to each input dimesion. The size of + each latent vector is k. + + For details of Factorization Machine, please refer to the paper: + Factorization machines. + + .. code-block:: python + first_order = paddle.layer.fc(input=input, + size=1, + act=paddle.activation.Linear()) + second_order = paddle.layer.factorization_machine(input=input, + factor_size=10) + fm = paddle.layer.addto(input=[first_order, second_order], + act=paddle.activation.Linear(), + bias_attr=False) + + :param input: The input layer. Supported input types: all input data types + on CPU, and only dense input types on GPU. + :type input: LayerOutput + :param factor_size: The hyperparameter that defines the dimensionality of + the latent vector size. + :type context_len: int + :param act: Activation Type. Default is linear activation. + :type act: BaseActivation + :param param_attr: The parameter attribute. See ParameterAttribute for + details. + :type param_attr: ParameterAttribute + :param layer_attr: Extra Layer config. + :type layer_attr: ExtraLayerAttribute|None + :return: LayerOutput object. + :rtype: LayerOutput + """ + assert isinstance(input, LayerOutput) + assert factor_size > 0, "the factor_size must be greater than 0." + + Layer( + inputs=[Input(input.name, **param_attr.attr)], + name=name, + factor_size=factor_size, + type=LayerType.FACTORIZATION_MACHINE, + active_type=act.name, + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.FACTORIZATION_MACHINE, input, activation=act, size=1) diff --git a/python/paddle/trainer_config_helpers/poolings.py b/python/paddle/trainer_config_helpers/poolings.py index f45616551bcd4822c668234c3afaf6aa35cd2953..e0aeb311b3ae842aee337dbbf869e2f947d22bd9 100644 --- a/python/paddle/trainer_config_helpers/poolings.py +++ b/python/paddle/trainer_config_helpers/poolings.py @@ -16,7 +16,8 @@ __all__ = [ "BasePoolingType", "MaxPooling", "AvgPooling", "MaxWithMaskPooling", - "CudnnMaxPooling", "CudnnAvgPooling", "SumPooling", "SquareRootNPooling" + "CudnnMaxPooling", "CudnnAvgPooling", "CudnnAvgInclPadPooling", + "SumPooling", "SquareRootNPooling" ] @@ -88,6 +89,16 @@ class CudnnAvgPooling(BasePoolingType): BasePoolingType.__init__(self, "cudnn-avg-pool") +class CudnnAvgInclPadPooling(BasePoolingType): + """ + Cudnn average pooling only support GPU. Return the average value in the + pooling window taking into account the padding cells. + """ + + def __init__(self): + BasePoolingType.__init__(self, "cudnn-avg-incl-pad-pool") + + class AvgPooling(BasePoolingType): """ Average pooling. diff --git a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh index a21f67a2d99e7eab39708e2a571d30d7e9f20ce6..10c941f707498ec45e79bed9d3f8054eea19887d 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh @@ -11,6 +11,7 @@ test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_l test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer test_seq_slice_layer test_cross_entropy_over_beam test_roi_pool_layer test_pooling3D_layer test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer -test_scale_sub_region_layer test_dot_prod_layer test_l2_distance_layer) +test_scale_sub_region_layer test_dot_prod_layer test_l2_distance_layer +test_factorization_machine) export whole_configs=(test_split_datasource) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_factorization_machine.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_factorization_machine.protostr new file mode 100644 index 0000000000000000000000000000000000000000..4f3002b19942ed58970bfd64e5978c1601273992 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_factorization_machine.protostr @@ -0,0 +1,39 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 1024 + active_type: "" +} +layers { + name: "__factorization_machine_0__" + type: "factorization_machine" + size: 1 + active_type: "" + inputs { + input_layer_name: "data" + input_parameter_name: "___factorization_machine_0__.w0" + } + factor_size: 10 +} +parameters { + name: "___factorization_machine_0__.w0" + size: 10240 + initial_mean: 0.0 + initial_std: 0.03125 + dims: 1024 + dims: 10 + initial_strategy: 0 + initial_smart: true +} +input_layer_names: "data" +output_layer_names: "__factorization_machine_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__factorization_machine_0__" + input_layer_names: "data" + output_layer_names: "__factorization_machine_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py b/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py new file mode 100644 index 0000000000000000000000000000000000000000..b249de0fee3c8ca4ad0520872fa2497c493d31b5 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_factorization_machine.py @@ -0,0 +1,7 @@ +from paddle.trainer_config_helpers import * + +data = data_layer(name='data', size=1024) + +fm = factorization_machine(input=data, factor_size=10) + +outputs(fm) diff --git a/python/paddle/v2/__init__.py b/python/paddle/v2/__init__.py index 33a0829ba8d635ebd68b50f3da07da958fb79dcb..70f61e84997efdbe3d6f268d249be8bac15b9ecd 100644 --- a/python/paddle/v2/__init__.py +++ b/python/paddle/v2/__init__.py @@ -83,11 +83,10 @@ def set_omp_mkl_env_vars(trainer_count): '''Get the number of physical cores''' if platform.system() == "Linux": num_sockets = int( - os.popen("lscpu |grep \"Socket\" |awk -F':' '{print $2}'|xargs") + os.popen("grep 'physical id' /proc/cpuinfo | sort -u | wc -l") .read()) num_cores_per_socket = int( - os.popen( - "lscpu |grep \"per socket\" |awk -F':' '{print $2}'|xargs") + os.popen("grep 'core id' /proc/cpuinfo | sort -u | wc -l") .read()) return num_sockets * num_cores_per_socket else: diff --git a/python/paddle/v2/dataset/common.py b/python/paddle/v2/dataset/common.py index e31e501ce93c5dc20693a8724ee7dd864f9aef55..191d9ecfb127c1851a392bc9ec83734d630d0ac4 100644 --- a/python/paddle/v2/dataset/common.py +++ b/python/paddle/v2/dataset/common.py @@ -71,7 +71,7 @@ def download(url, module_name, md5sum): if retry < retry_limit: retry += 1 else: - raise RuntimeError("Cannot download {0} within retry limit {2}". + raise RuntimeError("Cannot download {0} within retry limit {1}". format(url, retry_limit)) print "Cache file %s not found, downloading %s" % (filename, url) r = requests.get(url, stream=True) diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 9677c9568c6783921545364bca7b2c9c0041d823..59986c9f0ca8e4b793463db0e8c5da0489654ee9 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -13,20 +13,22 @@ import nets import optimizer import backward import regularizer - +from param_attr import ParamAttr +from data_feeder import DataFeeder from core import LoDTensor, CPUPlace, GPUPlace Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + [ 'io', 'initializer', 'layers', 'nets', 'optimizer', 'backward', - 'regularizer', 'LoDTensor', 'CPUPlace', 'GPUPlace', 'Tensor' + 'regularizer', 'LoDTensor', 'CPUPlace', 'GPUPlace', 'Tensor', 'ParamAttr' + 'DataFeeder' ] def __read_gflags_from_env__(): """ Enable reading gflags from environment variables. - + Returns: None """ @@ -35,7 +37,8 @@ def __read_gflags_from_env__(): read_env_flags = ['use_pinned_memory'] if core.is_compile_gpu(): read_env_flags.append('fraction_of_gpu_memory_to_use') - core.init_gflags(sys.argv + ["--tryfromenv=" + ",".join(read_env_flags)]) + core.init_gflags([sys.argv[0]] + + ["--tryfromenv=" + ",".join(read_env_flags)]) __read_gflags_from_env__() diff --git a/python/paddle/v2/fluid/data_feeder.py b/python/paddle/v2/fluid/data_feeder.py new file mode 100644 index 0000000000000000000000000000000000000000..30a542af212926c93381aade426e25f2117e4662 --- /dev/null +++ b/python/paddle/v2/fluid/data_feeder.py @@ -0,0 +1,97 @@ +from __future__ import print_function +import core +import numpy +import six.moves as six + +from framework import Variable + +__all__ = ['DataFeeder'] + + +class DataToLoDTensorConverter(object): + def __init__(self, place, lod_level, shape, dtype): + self.place = place + self.lod_level = lod_level + self.shape = shape + if dtype == core.DataType.FP32: + self.dtype = 'float32' + elif dtype == core.DataType.INT64: + self.dtype = 'int64' + elif dtype == core.DataType.FP64: + self.dtype = 'float64' + elif dtype == core.DataType.INT32: + self.dtype = 'int32' + else: + raise ValueError("dtype must be any of [int32, float32, int64, " + "float64]") + + self.data = [] + self.lod = [] + + for i in six.range(lod_level): + self.lod.append([0]) + + def feed(self, data): + self._feed_impl_(data, self.lod, self.lod_level) + + def _feed_impl_(self, data, lod, lod_level): + if lod_level == 0: + self.data.append(data) + else: + cur_lod_len = len(data) + lod[-1].append(lod[-1][-1] + cur_lod_len) + for each_data in data: + self._feed_impl_(each_data, lod[:-1], lod_level - 1) + + def done(self): + arr = numpy.array(self.data, dtype=self.dtype).reshape(self.shape) + t = core.LoDTensor() + t.set(arr, self.place) + if self.lod_level > 0: + t.set_lod(self.lod) + return t + + +class DataFeeder(object): + def __init__(self, feed_list, place): + self.feed_dtypes = [] + self.feed_names = [] + self.feed_shapes = [] + self.feed_lod_level = [] + for each_var in feed_list: + if not isinstance(each_var, Variable): + raise TypeError("Feed list should contain a list of variable") + self.feed_dtypes.append(each_var.dtype) + self.feed_names.append(each_var.name) + shape = each_var.shape + batch_size_dim = -1 + for i, s in enumerate(shape): + if s < 0: + batch_size_dim = i + break + if batch_size_dim == -1: + raise ValueError("Variable {0} must has a batch size dimension", + each_var.name) + self.feed_lod_level.append(each_var.lod_level) + self.feed_shapes.append(shape) + + self.place = place + + def feed(self, iterable): + converter = [] + for lod_level, shape, dtype in six.zip( + self.feed_lod_level, self.feed_shapes, self.feed_dtypes): + converter.append( + DataToLoDTensorConverter( + place=self.place, + lod_level=lod_level, + shape=shape, + dtype=dtype)) + + for each_sample in iterable: + for each_converter, each_slot in six.zip(converter, each_sample): + each_converter.feed(each_slot) + ret_dict = {} + for each_name, each_converter in six.zip(self.feed_names, converter): + ret_dict[each_name] = each_converter.done() + return ret_dict diff --git a/python/paddle/v2/fluid/evaluator.py b/python/paddle/v2/fluid/evaluator.py index bd4a6fda1fd20e68d5a42e76f6ab516bb5c00cff..137c5736226b689340748d5098ca51659d5acff8 100644 --- a/python/paddle/v2/fluid/evaluator.py +++ b/python/paddle/v2/fluid/evaluator.py @@ -26,9 +26,9 @@ class Evaluator(object): name(str): The name of evaluator. such as, "accuracy". Used for generate temporary variable name. main_program(Program, optional): The evaluator should be added to this - main_program. Default g_main_program + main_program. Default default_main_program() startup_program(Program, optional):The parameter should be added to this - startup_program. Default g_startup_program + startup_program. Default default_startup_program() Attributes: states(list): The list of state variables. states will be reset to zero diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index 3e26d1b983a3c924ce2392c266bcd32e27c7b309..bdc82eede9d93a7cf904999a6b869ce2d23c90dc 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -1,6 +1,6 @@ import numpy as np from . import core -from framework import Program, g_main_program +from framework import Program, default_main_program __all__ = ['Executor', 'g_scope'] @@ -103,7 +103,7 @@ class Executor(object): fetch_list = [] if program is None: - program = g_main_program + program = default_main_program() if not isinstance(program, Program): raise TypeError() diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 6d6ea23f55eebc57cb120582a7c82d77eb1df45c..bf0cd275b62ae2c4d7312592b8a730291c59a071 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -3,19 +3,39 @@ import collections import numpy as np from . import core import proto.framework_pb2 as framework_pb2 +import google.protobuf.message +import contextlib __all__ = [ 'Block', 'Variable', 'Program', 'Operator', 'default_startup_program', - 'default_main_program', 'g_startup_program', 'g_main_program' + 'default_main_program', 'program_guard', 'switch_startup_program', + 'switch_main_program' ] def unique_name(prefix): + """ + Generate unique names with prefix + + Args: + prefix(str): The prefix of return string + + Returns(str): A unique string with the prefix + + """ uid = core.unique_integer(prefix) # unique during whole process. return "_".join([prefix, str(uid)]) def convert_np_dtype_to_dtype_(np_dtype): + """ + Convert the data type in numpy to the data type in Paddle + Args: + np_dtype(np.dtype): the data type in numpy + + Returns(core.DataType): the data type in Paddle + + """ dtype = np.dtype(np_dtype) if dtype == np.float32: return core.DataType.FP32 @@ -36,17 +56,33 @@ def convert_np_dtype_to_dtype_(np_dtype): def dtype_is_floating(dtype): + """ + Check the data type is floating or not. + Args: + dtype(np.dtype|core.DataType): data type. + Could be numpy format or Paddle format + + Returns(bool): True if data type is a float value + + """ if not isinstance(dtype, core.DataType): dtype = convert_np_dtype_to_dtype_(dtype) - if (dtype == core.DataType.FP16 or dtype == core.DataType.FP32 or - dtype == core.DataType.FP64): - return True - else: - return False + return dtype in [core.DataType.FP16, core.DataType.FP32, core.DataType.FP64] def _debug_string_(proto, throw_on_error=True): + """ + Get the debug string of a protobuf message. The message could be not + initialized. + Args: + proto(google.protobuf.message.Message): The protobuf message + throw_on_error(bool): True if raise an error when the protobuf message + is not initialized. + + Returns(str): The debug string of the protobuf message + + """ error_fields = list() if not proto.IsInitialized(error_fields) and throw_on_error: raise ValueError("{0} are not initialized\nThe message is {1}".format( @@ -55,6 +91,38 @@ def _debug_string_(proto, throw_on_error=True): class Variable(object): + """ + Python variable. Every input and output of an operator is a variable. Every + variable belongs to a block. The variable has a name and two variables in + different blocks could have the same name. + + There are many kinds of variables. Please reference the framework.proto for + details. + + Notes: The constructor of Variable should not be invoked directly. Please + use `Block.create_var` to create a variable. + + >>> cur_program = Program() + >>> cur_block = cur_program.current_block() + >>> new_variable = cur_block.create_var( + >>> name="X", shape=[-1, 23, 48], dtype='float32') + + Args: + block(Block): The associated block. It will be passed by + `Block.create_var` automatically. + type(core.VarDesc.VarType): Variable type. Please reference the + framework.proto for details. + shape(tuple|list|None): The shape of variable. -1 means the batch size. + Some kinds of variable do not contain shape, just set it to None. + dtype(np.dtype|core.DataType|str): The data type of variable. + lod_level(int): The level of lod tensor. 0 means there is not a time + series data. + persistable(bool): True if the variable should be saved as check point. + Defaults to False. + stop_gradient(bool): True if the variable will stop to calculate + gradients when backward. Defaults to False. + """ + def __init__(self, block, type=core.VarDesc.VarType.LOD_TENSOR, @@ -138,6 +206,16 @@ class Variable(object): return self.to_string(True) def to_string(self, throw_on_error): + """ + Get debug string. + + Args: + throw_on_error(bool): True if raise an exception when self is not + intialized. + + Returns(str): The debug string. + + """ protostr = self.desc.serialize_to_string() proto = framework_pb2.VarDesc.FromString(str(protostr)) return _debug_string_(proto, throw_on_error) @@ -183,7 +261,9 @@ class Variable(object): def get_all_op_protos(): """ Get all registered op proto from PaddlePaddle C++ end. - :return: A list of registered OpProto. + + Returns(list): list of OpProto + """ protostrs = core.get_all_op_protos() ret_values = [] @@ -194,6 +274,10 @@ def get_all_op_protos(): class OpProtoHolder(object): + """ + A global variable to hold all OpProtos from C++ as a map + """ + @classmethod def instance(cls): if not hasattr(cls, '_instance'): @@ -210,12 +294,26 @@ class OpProtoHolder(object): self.op_proto_map[proto.type] = proto def get_op_proto(self, type): + """ + Get OpProto by a type string. + Args: + type(str): The type that operator registered in C++ side. + + Returns(framework_pb2.OpProto): The OpProto + + """ if type not in self.op_proto_map: raise ValueError("Operator \"%s\" has not been registered." % type) return self.op_proto_map[type] class Operator(object): + """ + Python Operator class. The operator represents the build in instructs in a + Block. Users can use the build in instructs to describe their neural + network. + """ + def __init__(self, block, desc, @@ -223,6 +321,30 @@ class Operator(object): inputs=None, outputs=None, attrs=None): + """ + Constructor. + + Notes: The constructor of operator should not be invoked directly. Use + Block.append_op or Block.prepend_op instead. + + >>> cur_program = Program() + >>> cur_block = cur_program.current_block() + >>> # var1 += var2 + var3 + >>> cur_block.append_op(type="sum", + >>> inputs={"X": [var1, var2, var3]}, + >>> outputs={"Out": [var1]}) + + Args: + block(Block): The block has the current operator + desc(core.OpDesc): The protobuf description + type(str): The type of operator. + inputs(dict): The input dictionary. Key is the input parameter name. + Value is a list of variables. + outputs(dict): The output dictionary. Has same format with inputs + attrs(dict): The attributes dictionary. Key is attribute name. Value + is the attribute value. The attribute type should be as same as + the type registered in C++ + """ self.block = block self.desc = desc if len(self.desc.type()) != 0: @@ -235,7 +357,7 @@ class Operator(object): def find_name(var_list, name): for var_name in var_list: - if var_name == name: + if var_list[var_name] is not None and var_name == name: return True return False @@ -309,6 +431,15 @@ class Operator(object): self.desc.infer_shape(self.block.desc) def to_string(self, throw_on_error): + """ + To debug string. + Args: + throw_on_error(bool): raise exception when self is not initialized + when throw_on_error is True + + Returns(str): The debug string. + + """ protostr = self.desc.serialize_to_string() proto = framework_pb2.OpDesc.FromString(str(protostr)) return _debug_string_(proto, throw_on_error) @@ -323,21 +454,55 @@ class Operator(object): return self.desc.type() def input(self, name): + """ + Get input arguments by the input parameter name + Args: + name(str): The input parameter name + + Returns(list): return the list of argument names associated with the + specific parameter name. + + """ return self.desc.input(name) @property def input_names(self): + """ + Get all input parameter names + Returns(list): return a list of input parameter names + + """ return self.desc.input_names() def output(self, name): + """ + Get output arguments by the output parameter name + Args: + name(str): The output parameter name + + Returns(list): return the list of argument names associated with the + specific parameter name. + + """ return self.desc.output(name) @property def output_names(self): + """ + Get all output parameter names + Returns(list): return a list of output parameter names + + """ return self.desc.output_names() @property def idx(self): + """ + Return the array index of current operator. + Returns(int): The array index in block.ops array + Raises: + ValueError: when the operator is not found. + """ for i, op in enumerate(self.block.ops): if op == self: return i @@ -345,19 +510,57 @@ class Operator(object): "Can't find op itself in it's block. It could be a bug of Paddle.") def has_attr(self, name): + """ + operator has the attribute with name or not. + Args: + name(str): the attribute name + + Returns(bool): True if has this attribute. + + """ return self.desc.has_attr(name) def attr_type(self, name): + """ + Get the type of attribute by attribute name + Args: + name(str): the attribute name + + Returns(core.AttrType): the attribute type + + """ return self.desc.attr_type(name) @property def attr_names(self): + """ + Get all attribute names + Returns(list): The list of attribute name + + """ return self.desc.attr_names() def attr(self, name): + """ + Get attribute by name + Args: + name(str): the attribute name + + Returns(bool|int|str|float|list): The attribute value. The return value + can be any valid attribute type. + + """ return self.desc.attr(name) def block_attr(self, name): + """ + Get the block attribute by name + Args: + name(str): the attribute name + + Returns(int): the block index + + """ return self.desc.block_attr(name) @@ -477,7 +680,7 @@ class Block(object): """ Copy the information of parameters from other block Args: - other(Block): other block + other(Block): other block Returns: None @@ -510,6 +713,7 @@ class Program(object): self.desc = core.ProgramDesc() self.blocks = [Block(self, 0)] self.current_block_idx = 0 + self._seed = 0 def __str__(self): return self.to_string(True) @@ -562,6 +766,16 @@ class Program(object): p.sync_with_cpp() return p + @property + def random_seed(self): + return self._seed + + @random_seed.setter + def random_seed(self, seed): + if not isinstance(seed, int): + raise ValueError("Seed must be a integer.") + self._seed = seed + def __repr__(self): return str(self) @@ -610,7 +824,7 @@ class Program(object): def copy_param_info_from(self, other): """ - Copy the information of parameters from other program. + Copy the information of parameters from other program. Args: other(Program): Other program @@ -654,13 +868,88 @@ class Parameter(Variable): # program is a global instance. -g_main_program = Program() -g_startup_program = Program() +_main_program_ = Program() +_startup_program_ = Program() def default_startup_program(): - return g_startup_program + """ + Get default startup program. In startup program, Paddle will initialize + parameters, initialize nccl handle, etc. + + Returns: + Program: startup program + """ + return _startup_program_ def default_main_program(): - return g_main_program + """ + Get default main program. The main program is used for training or testing. + + Returns: + Program: main program + """ + return _main_program_ + + +def switch_main_program(program): + """ + Switch the main program to a new program. + + Args: + program(Program): The new main program + + Returns: + Program: The previous main program + """ + global _main_program_ + prev_program = _main_program_ + _main_program_ = program + return prev_program + + +def switch_startup_program(program): + """ + Switch the startup program to a new program + Args: + program(Program): The new startup program + + Returns: + Program: The previous startup program + """ + global _startup_program_ + prev_program = _startup_program_ + _startup_program_ = program + return prev_program + + +@contextlib.contextmanager +def program_guard(main_program, startup_program=None): + """ + Switch program with `with` statement + + Examples: + >>> with program_guard(Program()): + >>> data = fluid.layers.data(...) + >>> hidden = fluid.layers.fc(...) + + Args: + main_program(Program): New main program inside `with` statement + startup_program(Program): New startup program inside `with` statement. + None means do not change startup program. + + Returns: + None + """ + if not isinstance(main_program, Program): + raise TypeError("main_program should be Program") + main_program = switch_main_program(main_program) + if startup_program is not None: + if not isinstance(startup_program, Program): + raise TypeError("startup_program should be Program") + startup_program = switch_startup_program(startup_program) + yield + switch_main_program(main_program) + if startup_program is not None: + switch_startup_program(startup_program) diff --git a/python/paddle/v2/fluid/initializer.py b/python/paddle/v2/fluid/initializer.py index d3f648f8460814a3f251d7aa9560d748af85235c..c0839caaf2bb5bc43a76a13b5782cc519a4afe63 100644 --- a/python/paddle/v2/fluid/initializer.py +++ b/python/paddle/v2/fluid/initializer.py @@ -132,6 +132,8 @@ class UniformInitializer(Initializer): assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) # Initialization Ops should be prepended and not appended + if self._seed == 0: + self._seed = block.program.random_seed op = block.prepend_op( type="uniform_random", outputs={"Out": var}, @@ -180,6 +182,8 @@ class NormalInitializer(Initializer): assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) # Initialization Ops should be prepended and not appended + if self._seed == 0: + self._seed = block.program.random_seed op = block.prepend_op( type="gaussian_random", outputs={"Out": var}, @@ -255,6 +259,9 @@ class XavierInitializer(Initializer): fan_in = f_in if self._fan_in is None else self._fan_in fan_out = f_out if self._fan_out is None else self._fan_out + if self._seed == 0: + self._seed = block.program.random_seed + if self._uniform: limit = np.sqrt(6.0 / float(fan_in + fan_out)) op = block.prepend_op( @@ -338,6 +345,9 @@ class MSRAInitializer(Initializer): # If fan_in is passed, use it fan_in = f_in if self._fan_in is None else self._fan_in + if self._seed == 0: + self._seed = block.program.random_seed + if self._uniform: limit = np.sqrt(6.0 / float(fan_in)) op = block.prepend_op( diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index e5b2aa3b919df4cec1091c0bbd39b7e400cc6867..e147ac22ad289eb00c83def66974d875fcdc31f8 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -1,8 +1,7 @@ import os import cPickle as pickle -from paddle.v2.fluid.framework import Program, Parameter, g_main_program, \ - Variable +from paddle.v2.fluid.framework import Program, Parameter, default_main_program, Variable __all__ = [ 'save_vars', 'save_params', 'save_persistables', 'load_vars', 'load_params', @@ -46,7 +45,7 @@ def save_vars(executor, dirname, main_program=None, vars=None, predicate=None): """ if vars is None: if main_program is None: - main_program = g_main_program + main_program = default_main_program() if not isinstance(main_program, Program): raise TypeError("program should be as Program type or None") @@ -98,7 +97,7 @@ def load_vars(executor, dirname, main_program=None, vars=None, predicate=None): :param executor: executor that save variable :param dirname: directory path :param main_program: program. If vars is None, then filter all variables in this - program which fit `predicate`. Default g_program. + program which fit `predicate`. Default default_main_program(). :param predicate: The Predicate describes a callable that returns a variable as a bool. If it returns true, the variables will be loaded. :param vars: variables need to be loaded. If specify vars, program & @@ -107,7 +106,7 @@ def load_vars(executor, dirname, main_program=None, vars=None, predicate=None): """ if vars is None: if main_program is None: - main_program = g_main_program + main_program = default_main_program() if not isinstance(main_program, Program): raise TypeError("program's type should be Program") @@ -154,7 +153,7 @@ def load_persistables(executor, dirname, main_program=None): def get_inference_program(target_vars, main_program=None): if main_program is None: - main_program = g_main_program + main_program = default_main_program() if not isinstance(target_vars, list): target_vars = [target_vars] @@ -177,12 +176,12 @@ def save_inference_model(dirname, :param target_vars: Variables from which we can get inference results. :param executor: executor that save inference model :param main_program: original program, which will be pruned to build the inference model. - Default g_main_program. + Default default_main_program(). :return: None """ if main_program is None: - main_program = g_main_program + main_program = default_main_program() if not isinstance(target_vars, list): target_vars = [target_vars] @@ -272,10 +271,10 @@ def get_parameter_value_by_name(name, executor, program=None): :param executor: executor for retrieving the value :param name: the name of the parameter :param program: the program where the variable is found - Default g_main_program. + Default default_main_program(). :return: the LoDTensor for the variable """ if program is None: - program = g_main_program + program = default_main_program() var = program.global_block().var(name) return get_parameter_value(var, executor) diff --git a/python/paddle/v2/fluid/layer_helper.py b/python/paddle/v2/fluid/layer_helper.py index 5f8855551114a9a9b671d1630c9e8a3f0cb5c04b..3963e1322230259230885c097d37b818edda6b13 100644 --- a/python/paddle/v2/fluid/layer_helper.py +++ b/python/paddle/v2/fluid/layer_helper.py @@ -1,9 +1,10 @@ import copy import itertools -from framework import Variable, g_main_program, \ - g_startup_program, unique_name, dtype_is_floating +from framework import Variable, Parameter, default_main_program, default_startup_program, \ + unique_name, dtype_is_floating from paddle.v2.fluid.initializer import Constant, Xavier +from param_attr import ParamAttr class LayerHelper(object): @@ -22,7 +23,7 @@ class LayerHelper(object): def main_program(self): prog = self.kwargs.get('main_program', None) if prog is None: - return g_main_program + return default_main_program() else: return prog @@ -30,7 +31,7 @@ class LayerHelper(object): def startup_program(self): prog = self.kwargs.get('startup_program', None) if prog is None: - return g_startup_program + return default_startup_program() else: return prog @@ -60,31 +61,15 @@ class LayerHelper(object): @property def param_attr(self): - default = {'name': None} - actual = self.kwargs.get('param_attr', None) - if actual is None: - actual = default - for default_field in default.keys(): - if default_field not in actual: - actual[default_field] = default[default_field] - return actual + return ParamAttr.to_attr(self.kwargs.get('param_attr', None)) @property def bias_attr(self): - default = {'name': None} - bias_attr = self.kwargs.get('bias_attr', None) - if bias_attr is None: - bias_attr = default - - if isinstance(bias_attr, dict): - for default_field in default.keys(): - if default_field not in bias_attr: - bias_attr[default_field] = default[default_field] - return bias_attr + return ParamAttr.to_attr(self.kwargs.get('bias_attr', None)) def multiple_param_attr(self, length): param_attr = self.param_attr - if isinstance(param_attr, dict): + if isinstance(param_attr, ParamAttr): param_attr = [param_attr] if len(param_attr) != 1 and len(param_attr) != length: @@ -112,23 +97,36 @@ class LayerHelper(object): raise ValueError("Data Type mismatch") return dtype - def create_parameter(self, attr, shape, dtype, suffix='w', - initializer=None): + def create_parameter(self, + attr, + shape, + dtype, + is_bias=False, + default_initializer=None): # Deepcopy the attr so that parameters can be shared in program - attr_copy = copy.deepcopy(attr) - if initializer is not None: - attr_copy['initializer'] = initializer + assert isinstance(attr, ParamAttr) + suffix = 'b' if is_bias else 'w' + + if default_initializer is None: + if is_bias: + attr.set_default_bias_initializer() + else: + attr.set_default_param_initializer() else: - attr_copy['initializer'] = self._get_default_initializer(dtype) - if attr_copy['name'] is None: - attr_copy['name'] = unique_name(".".join([self.name, suffix])) + attr.set_default_initializer(default_initializer) + if attr.name is None: + attr.name = unique_name(".".join([self.name, suffix])) + self.startup_program.global_block().create_parameter( - dtype=dtype, shape=shape, **attr_copy) + dtype=dtype, shape=shape, **attr.to_kwargs(with_initializer=True)) return self.main_program.global_block().create_parameter( - name=attr_copy['name'], - dtype=dtype, - shape=shape, - trainable=attr_copy.get('trainable', True)) + dtype=dtype, shape=shape, **attr.to_kwargs()) + + def get_parameter(self, name): + param = self.main_program.global_block().var(name) + if not isinstance(param, Parameter): + raise ValueError("no Parameter name %s found" % name) + return param def create_tmp_variable(self, dtype): return self.main_program.current_block().create_var( @@ -153,11 +151,14 @@ class LayerHelper(object): persistable=True, initializer=initializer) - def append_bias_op(self, - input_var, - bias_initializer, - dim_start=1, - dim_end=None): + @property + def to_kwargs(self): + return { + 'main_program': self.main_program, + 'startup_program': self.startup_program + } + + def append_bias_op(self, input_var, dim_start=1, dim_end=None): """ Append bias operator and return its output. If the user does not set bias_attr, append_bias_op will return input_var @@ -177,11 +178,7 @@ class LayerHelper(object): return input_var b = self.create_parameter( - attr=bias_attr, - shape=size, - dtype=input_var.dtype, - suffix='b', - initializer=bias_initializer) + attr=bias_attr, shape=size, dtype=input_var.dtype, is_bias=True) tmp = self.create_tmp_variable(dtype=input_var.dtype) self.append_op( type='elementwise_add', diff --git a/python/paddle/v2/fluid/layers.py b/python/paddle/v2/fluid/layers.py index 28bc3d214b559a089efb2bb736eb49cb1ba4de25..f67d6d08c7557d939f280d19c5b86914885490bd 100644 --- a/python/paddle/v2/fluid/layers.py +++ b/python/paddle/v2/fluid/layers.py @@ -1,25 +1,35 @@ -from . import core +import contextlib + import proto.framework_pb2 as framework_pb2 +import core from framework import OpProtoHolder, Variable, Program, Operator -from initializer import Constant, Normal, Xavier +from initializer import Constant, Normal, Xavier, Initializer from paddle.v2.fluid.layer_helper import LayerHelper, unique_name -import re -import cStringIO +from registry import register_layer +from param_attr import ParamAttr __all__ = [ 'fc', 'data', 'cross_entropy', 'conv2d', 'pool2d', 'embedding', 'concat', 'StaticRNN', 'cast', 'sequence_conv', 'sequence_pool', 'sums', 'cos_sim', - 'batch_norm', 'accuracy', 'split_lod_tensor' + 'batch_norm', 'accuracy', 'split_lod_tensor', 'While' +] + +_REGISTER_LAYER_FROM_OPS = [ + 'mean', 'mul', 'dropout', 'reshape', 'sigmoid', 'scale', 'transpose', + 'sigmoid_cross_entropy_with_logits', 'elementwise_add', 'elementwise_div', + 'elementwise_sub', 'elementwise_mul', 'clip', 'abs' ] +for _OP in set(_REGISTER_LAYER_FROM_OPS): + globals()[_OP] = register_layer(_OP) + __all__.append(_OP) + def fc(input, size, num_flatten_dims=1, param_attr=None, - param_initializer=None, bias_attr=None, - bias_initializer=None, act=None, name=None, main_program=None, @@ -32,11 +42,9 @@ def fc(input, size: The size of the layer num_flatten_dims: Number of columns in input param_attr: The parameters/weights to the FC Layer - param_initializer: Initializer used for the weight/parameter. - If None, XavierInitializer() is used + param_initializer: Initializer used for the weight/parameter. If None, XavierInitializer() is used bias_attr: The bias parameter for the FC layer - bias_initializer: Initializer used for the bias. - If None, then ConstantInitializer() is used + bias_initializer: Initializer used for the bias. If None, then ConstantInitializer() is used act: Activation to be applied to the output of FC layer name: Name/alias of the function main_program: Name of the main program that calls this @@ -54,23 +62,10 @@ def fc(input, to the LayerHelper constructor. """ - - def _get_default_param_initializer(): - return Xavier() - - def _get_default_bias_initializer(): - return Constant() - helper = LayerHelper('fc', **locals()) dtype = helper.input_dtype() - if param_initializer is None: - param_initializer = _get_default_param_initializer() - - if bias_initializer is None: - bias_initializer = _get_default_bias_initializer() - mul_results = [] for input_var, param_attr in helper.iter_inputs_and_params(): input_shape = input_var.shape @@ -78,10 +73,7 @@ def fc(input, reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1) ] + [size] w = helper.create_parameter( - attr=param_attr, - initializer=param_initializer, - shape=param_shape, - dtype=dtype) + attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) tmp = helper.create_tmp_variable(dtype) helper.append_op( type="mul", @@ -102,7 +94,7 @@ def fc(input, helper.append_op( type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias}) # add bias - pre_activation = helper.append_bias_op(pre_bias, bias_initializer) + pre_activation = helper.append_bias_op(pre_bias) # add activation return helper.append_activation(pre_activation) @@ -110,7 +102,6 @@ def fc(input, def embedding(input, size, is_sparse=False, - param_initializer=None, param_attr=None, dtype='float32', main_program=None, @@ -119,6 +110,7 @@ def embedding(input, Embedding Layer. Args: + param_initializer: input: The input to the function size: The size of the layer is_sparse: A flag that decleares whether the input is sparse @@ -136,15 +128,9 @@ def embedding(input, """ - def _get_default_param_initializer(): - return Xavier() - helper = LayerHelper('embedding', **locals()) w = helper.create_parameter( - attr=helper.param_attr, - shape=size, - dtype=dtype, - initializer=param_initializer or _get_default_param_initializer()) + attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False) tmp = helper.create_tmp_variable(dtype) helper.append_op( type='lookup_table', @@ -176,7 +162,7 @@ def dynamic_lstm(input, if not use_peepholes: bias_size[1] = 4 * size bias = helper.create_parameter( - attr=helper.bias_attr, shape=bias_size, dtype=dtype, suffix='b') + attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) hidden = helper.create_tmp_variable(dtype) cell = helper.create_tmp_variable(dtype) @@ -204,10 +190,82 @@ def dynamic_lstm(input, return hidden, cell +def gru_unit(input, + hidden, + size, + weight=None, + bias=None, + activation='tanh', + gate_activation='sigmoid', + main_program=None, + startup_program=None): + """ + GRUUnit Operator implements partial calculations of the GRU unit as following: + + $$ + update \ gate: u_t = actGate(xu_t + W_u * h_{t-1} + b_u) \\ + reset \ gate: r_t = actGate(xr_t + W_r * h_{t-1} + b_r) \\ + output \ candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h_{t-1}) + b_c) \\ + output: h_t = dot((1 - u_t), h_{t-1}) + dot(u_t, {h}_t) + $$ + + which is same as one time step of GRU Operator. + + @note To implement the complete GRU unit, fully-connected operator must be + used before to feed xu, xr and xc as the Input of GRUUnit operator. + + TODO(ChunweiYan) add more document here + """ + activation_dict = dict( + identity=0, + sigmoid=1, + tanh=2, + relu=3, ) + activation = activation_dict[activation] + gate_activation = activation_dict[gate_activation] + + helper = LayerHelper('gru_unit', **locals()) + dtype = helper.input_dtype() + size = size / 3 + + # create weight + if weight is None: + weight = helper.create_parameter( + attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype) + + # create bias + if bias is None: + bias_size = [1, 3 * size] + bias = helper.create_parameter( + attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) + + gate = helper.create_tmp_variable(dtype) + reset_hidden_pre = helper.create_tmp_variable(dtype) + updated_hidden = helper.create_tmp_variable(dtype) + + helper.append_op( + type='gru_unit', + inputs={'Input': input, + 'HiddenPrev': hidden, + 'Weight': weight}, + outputs={ + 'Gate': gate, + 'ResetHiddenPrev': reset_hidden_pre, + 'Hidden': updated_hidden, + }, + attrs={ + 'activation': 0, + 'gate_activation': 1, + }) + + return updated_hidden, reset_hidden_pre, gate + + def data(name, shape, append_batch_size=True, dtype='float32', + lod_level=0, type=core.VarDesc.VarType.LOD_TENSOR, main_program=None, startup_program=None, @@ -221,6 +279,7 @@ def data(name, append_batch_size: Whether or not to append the data as a batch. dtype: The type of data : float32, float_16, int etc type: The output type. By default it is LOD_TENSOR. + lod_level(int): The LoD Level. 0 means the input data is not a sequence. main_program: Name of the main program that calls this startup_program: Name of the startup program stop_gradient: A boolean that mentions whether gradient should flow. @@ -251,7 +310,8 @@ def data(name, shape=shape, dtype=dtype, type=type, - stop_gradient=stop_gradient) + stop_gradient=stop_gradient, + lod_level=lod_level) def create_tensor(dtype, name=None, main_program=None, startup_program=None): @@ -259,172 +319,6 @@ def create_tensor(dtype, name=None, main_program=None, startup_program=None): return helper.create_variable(name=helper.name, dtype=dtype) -def _convert_(name): - """ - Formatting. - - Args: - name: The name/alias - - This function takes in a name and converts it to a standard format of - group1_group2. Where as per the regular expression, group1 can have - alphabets and numbers and group2 has capital alphabets. - - """ - s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() - - -def _generate_doc_string_(op_proto): - """ - Generate docstring by OpProto - - Args: - op_proto (framework_pb2.OpProto): a protobuf message typed OpProto - - Returns: - str: the document string - """ - - def _type_to_str_(tp): - return framework_pb2.AttrType.Name(tp) - - if not isinstance(op_proto, framework_pb2.OpProto): - raise TypeError("OpProto should be `framework_pb2.OpProto`") - - buf = cStringIO.StringIO() - buf.write(op_proto.comment) - buf.write('\nArgs:\n') - for each_input in op_proto.inputs: - line_begin = ' {0}: '.format(_convert_(each_input.name)) - buf.write(line_begin) - buf.write(each_input.comment) - buf.write('\n') - buf.write(' ' * len(line_begin)) - buf.write('Duplicable: ') - buf.write(str(each_input.duplicable)) - buf.write(' Optional: ') - buf.write(str(each_input.dispensable)) - buf.write('\n') - - for each_attr in op_proto.attrs: - buf.write(' ') - buf.write(each_attr.name) - buf.write(' (') - buf.write(_type_to_str_(each_attr.type)) - buf.write('): ') - buf.write(each_attr.comment) - buf.write('\n') - - if len(op_proto.outputs) != 0: - buf.write('\nReturns:\n') - buf.write(' ') - for each_opt in op_proto.outputs: - if not each_opt.intermediate: - break - buf.write(each_opt.comment) - - return buf.getvalue() - - -def _create_op_func_(op_type): - """ - Create an Operator for a Function. - - Args: - op_type: The name of the operator to be created - - This function takes in the operator type (sigmoid, mean , average etc) and - creates the operator functionality. - - """ - op_proto = OpProtoHolder.instance().get_op_proto(op_type) - not_intermediate_outputs = \ - filter(lambda output: not output.intermediate, op_proto.outputs) - intermediate_outputs = \ - filter(lambda output: output.intermediate, op_proto.outputs) - - if len(not_intermediate_outputs) != 1: - raise ValueError("Only one non intermediate output operator can be", - "automatically generated") - - if not_intermediate_outputs[0].duplicable: - raise ValueError( - "Only non duplicable op can be automatically generated") - - for output in intermediate_outputs: - if output.duplicable: - raise ValueError("The op can be automatically generated only when ", - "all intermediate ops are not duplicable") - - o_name = not_intermediate_outputs[0].name - intermediate_output_names = [output.name for output in intermediate_outputs] - - def infer_and_check_dtype(op_proto, **kwargs): - """ - This function performs the sanity check for dtype and - instance type. - """ - dtype = None - for ipt in op_proto.inputs: - name = _convert_(ipt.name) - val = kwargs.pop(name, []) - if not isinstance(val, list) and not isinstance(val, tuple): - val = [val] - for each in val: - if not isinstance(each, Variable): - raise ValueError("input of {0} must be variable".format( - op_type)) - - if dtype is None: - dtype = each.dtype - elif dtype != each.dtype: - raise ValueError( - "operator {0} must input same dtype".format(op_type)) - - return dtype - - def func(**kwargs): - helper = LayerHelper(op_type, **kwargs) - - dtype = infer_and_check_dtype(op_proto, **kwargs) - - inputs = dict() - for ipt in op_proto.inputs: - name = _convert_(ipt.name) - val = kwargs.pop(name, []) - if not isinstance(val, list) and not isinstance(val, tuple): - val = [val] - inputs[ipt.name] = val - - outputs = dict() - out = helper.create_tmp_variable(dtype=dtype) - outputs[o_name] = [out] - for name in intermediate_output_names: - outputs[name] = [helper.create_tmp_variable(dtype=dtype)] - helper.append_op( - type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs) - return helper.append_activation(out) - - func.__name__ = op_type - globals()[op_type] = func - func.__doc__ = _generate_doc_string_(op_proto) - global __all__ - __all__.append(op_type) - - -_create_op_func_('mean') -_create_op_func_('mul') -_create_op_func_('elementwise_add') -_create_op_func_('elementwise_div') -_create_op_func_('dropout') -_create_op_func_('reshape') -_create_op_func_('sigmoid') -_create_op_func_('scale') -_create_op_func_('reshape') -_create_op_func_('transpose') - - def cast(x, dtype, main_program=None): """ This function takes in the input with input_dtype @@ -471,19 +365,14 @@ def sums(input, out=None, main_program=None, startup_program=None): def linear_chain_crf(input, label, param_attr=None, - param_initializer=None, main_program=None, startup_program=None): - def _get_default_param_initializer(): - return Xavier() - helper = LayerHelper('linear_chain_crf', **locals()) size = input.shape[1] transition = helper.create_parameter( attr=helper.param_attr, shape=[size + 2, size], - dtype=helper.input_dtype(), - initializer=param_initializer or _get_default_param_initializer()) + dtype=helper.input_dtype()) alpha = helper.create_tmp_variable(dtype=helper.input_dtype()) emission_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) transition_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) @@ -503,6 +392,24 @@ def linear_chain_crf(input, return log_likelihood +def crf_decoding(input, + param_attr, + label=None, + main_program=None, + startup_program=None): + helper = LayerHelper('crf_decoding', **locals()) + transition = helper.get_parameter(param_attr.name) + viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='crf_decoding', + inputs={"Emission": [input], + "Transition": transition, + "Label": label}, + outputs={"ViterbiPath": [viterbi_path]}) + + return viterbi_path + + def assign(input, output, main_program=None, startup_program=None): helper = LayerHelper('assign', **locals()) helper.append_op( @@ -640,15 +547,47 @@ def accuracy(input, label, k=1, correct=None, total=None, **kwargs): return acc_out +def chunk_eval(input, + label, + chunk_scheme, + num_chunk_types, + excluded_chunk_types=None, + **kwargs): + """ + This function computes the accuracy using the input and label. + The output is the top_k inputs and their indices. + """ + helper = LayerHelper("chunk_eval", **kwargs) + + # prepare output + precision = helper.create_tmp_variable(dtype="float32") + recall = helper.create_tmp_variable(dtype="float32") + f1_score = helper.create_tmp_variable(dtype="float32") + + helper.append_op( + type="chunk_eval", + inputs={"Inference": [input], + "Label": [label]}, + outputs={ + "Precision": [precision], + "Recall": [recall], + "F1-Score": [f1_score] + }, + attrs={ + "num_chunk_types": num_chunk_types, + 'chunk_scheme': chunk_scheme, + 'excluded_chunk_types': excluded_chunk_types or [] + }) + return precision, recall, f1_score + + def sequence_conv(input, num_filters, filter_size=3, filter_stride=1, padding=None, bias_attr=None, - bias_initializer=None, param_attr=None, - param_initializer=None, act=None, main_program=None, startup_program=None): @@ -658,37 +597,22 @@ def sequence_conv(input, in the input parameters to the function. """ - def _get_default_bias_initializer(): - return Constant() - - def _get_default_param_initializer(): - return Xavier() - # FIXME(dzh) : want to unify the argument of python layer # function. So we ignore some unecessary attributes. # such as, padding_trainable, context_start. helper = LayerHelper('sequence_conv', **locals()) dtype = helper.input_dtype() - - if param_initializer is None: - param_initializer = _get_default_param_initializer() - if bias_initializer is None: - bias_initializer = _get_default_bias_initializer() - filter_shape = [filter_size * input.shape[1], num_filters] - filter = helper.create_parameter( - attr=helper.param_attr, - shape=filter_shape, - dtype=dtype, - initializer=param_initializer) + filter_param = helper.create_parameter( + attr=helper.param_attr, shape=filter_shape, dtype=dtype) pre_bias = helper.create_tmp_variable(dtype) helper.append_op( type='sequence_conv', inputs={ 'X': [input], - 'Filter': [filter], + 'Filter': [filter_param], }, outputs={"Out": pre_bias}, attrs={ @@ -696,20 +620,18 @@ def sequence_conv(input, 'contextStart': -int(filter_size / 2), 'contextLength': filter_size }) - pre_act = helper.append_bias_op(pre_bias, bias_initializer) + pre_act = helper.append_bias_op(pre_bias) return helper.append_activation(pre_act) def conv2d(input, num_filters, filter_size, - stride=[1, 1], + stride=None, padding=None, groups=None, param_attr=None, - param_initializer=None, bias_attr=None, - bias_initializer=None, act=None, name=None, main_program=None, @@ -722,13 +644,8 @@ def conv2d(input, conv-2d output, if mentioned in the input parameters. """ - def _get_default_bias_initializer(): - return Constant() - - def _get_default_param_initializer(filter_size, num_channels): - std = (2.0 / (filter_size[0]**2 * num_channels))**0.5 - return Normal(0.0, std, 0) - + if stride is None: + stride = [1, 1] helper = LayerHelper('conv2d', **locals()) dtype = helper.input_dtype() @@ -750,32 +667,30 @@ def conv2d(input, input_shape = input.shape filter_shape = [num_filters, num_filter_channels] + filter_size - if param_initializer is None: - param_initializer = _get_default_param_initializer(filter_size, - num_channels) - if bias_initializer is None: - bias_initializer = _get_default_bias_initializer() + def _get_default_param_initializer(): + std = (2.0 / (filter_size[0]**2 * num_channels))**0.5 + return Normal(0.0, std, 0) - filter = helper.create_parameter( + filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, - initializer=param_initializer) + default_initializer=_get_default_param_initializer()) + pre_bias = helper.create_tmp_variable(dtype) helper.append_op( - type='conv2d', + type='conv2d_cudnn', inputs={ 'Input': input, - 'Filter': filter, + 'Filter': filter_param, }, outputs={"Output": pre_bias}, attrs={'strides': stride, 'paddings': padding, 'groups': groups}) - pre_act = helper.append_bias_op( - pre_bias, bias_initializer, dim_start=1, dim_end=2) + pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) return helper.append_activation(pre_act) @@ -804,8 +719,8 @@ def sequence_pool(input, pool_type, **kwargs): def pool2d(input, pool_size, pool_type, - pool_stride=[1, 1], - pool_padding=[0, 0], + pool_stride=None, + pool_padding=None, global_pooling=False, main_program=None, startup_program=None): @@ -813,6 +728,10 @@ def pool2d(input, This function adds the operator for pooling in 2 dimensions, using the pooling configurations mentioned in input parameters. """ + if pool_padding is None: + pool_padding = [0, 0] + if pool_stride is None: + pool_stride = [1, 1] if pool_type not in ["max", "avg"]: raise ValueError( "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", @@ -876,12 +795,10 @@ def batch_norm(input, attr=helper.param_attr, shape=param_shape, dtype=dtype, - initializer=Constant(1.0)) + default_initializer=Constant(1.0)) + bias = helper.create_parameter( - attr=helper.param_attr, - shape=param_shape, - dtype=dtype, - initializer=Constant(0.0)) + attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=True) mean = helper.create_global_variable( dtype=input.dtype, shape=param_shape, persistable=True) @@ -1356,7 +1273,7 @@ def lod_rank_table(x, level=0, main_program=None): def max_sequence_len(rank_table, main_program=None): """ - This function creates an operator to calculate the length of + This function creates an operator to calculate the length of max seqence through input rank_table(should be a lod_rank_table) """ helper = LayerHelper("max_seqence_len", **locals()) @@ -1399,7 +1316,7 @@ def lod_tensor_to_array(x, table, main_program=None): return array -def array_to_lod_tensor(x, table, main_program=None): +def array_to_lod_tensor(x, table, main_program=None, startup_program=None): """ This function creates an operator to convert an array to a LOD_Tensor. @@ -1480,7 +1397,11 @@ def zeros(shape, dtype, main_program=None): return fill_constant(value=0.0, **locals()) -def increment(x, value=1.0, in_place=True, main_program=None): +def increment(x, + value=1.0, + in_place=True, + main_program=None, + startup_program=None): """ This function creates an operator to increment each value in the input `x` by an amount: `value` as mentioned in the input parameter. This @@ -1495,11 +1416,11 @@ def increment(x, value=1.0, in_place=True, main_program=None): type='increment', inputs={'X': [x]}, outputs={'Out': [out]}, - attrs={'step': value}) + attrs={'step': float(value)}) return out -def array_write(x, i, array=None, main_program=None): +def array_write(x, i, array=None, main_program=None, startup_program=None): """ This function creates an operator to write the data out as a LOD_TENSOR_ARRAY. @@ -1538,7 +1459,7 @@ def less_than(x, y, cond=None, main_program=None, **ignored): return cond -def array_read(array, i, main_program=None): +def array_read(array, i, main_program=None, startup_program=None): """ This function creates an operator to read the data in as a LOD_TENSOR_ARRAY. @@ -1557,7 +1478,7 @@ def array_read(array, i, main_program=None): return out -def shrink_memory(x, i, table, main_program=None): +def shrink_memory(x, i, table, main_program=None, startup_program=None): """ This function creates an operator to shrink_rnn_memory using the RankTable as mentioned in the input parameter. @@ -1587,6 +1508,95 @@ def array_length(array, main_program=None): return tmp +def conv2d_transpose(input, + num_filters, + output_size=None, + filter_size=None, + padding=None, + stride=None, + param_attr=None, + main_program=None, + startup_program=None): + """ + The transpose of conv2d layer. + + This layer is also known as deconvolution layer. + + Args: + input(Variable): The input image with [N, C, H, W] format. + num_filters(int): The number of filter. It is as same as the output + image channel. + output_size(int|tuple|None): The output image size. If output size is a + tuple, it must contain two integers, (image_H, image_W). This + parameter only works when filter_size is None. + filter_size(int|tuple|None): The filter size. If filter_size is a tuple, + it must contain two integers, (filter_size_H, filter_size_W). + Otherwise, the filter will be a square. None if use output size to + calculate filter_size + padding(int|tuple): The padding size. If padding is a tuple, it must + contain two integers, (padding_H, padding_W). Otherwise, the + padding_H = padding_W = padding. + stride(int|tuple): The stride size. If stride is a tuple, it must + contain two integers, (stride_H, stride_W). Otherwise, the + stride_H = stride_W = stride. + param_attr: Parameter Attribute. + main_program(Program): the main program + startup_program(Program): the startup program + + Returns: + Variable: Output image. + """ + helper = LayerHelper("conv2d_transpose", **locals()) + if not isinstance(input, Variable): + raise TypeError("Input of conv2d_transpose must be Variable") + input_channel = input.shape[1] + + op_attr = dict() + + if isinstance(padding, int): + op_attr['paddings'] = [padding, padding] + elif padding is not None: + op_attr['paddings'] = padding + + if isinstance(stride, int): + op_attr['strides'] = stride + elif stride is not None: + op_attr['strides'] = stride + + if filter_size is None: + if output_size is None: + raise ValueError("output_size must be set when filter_size is None") + if isinstance(output_size, int): + output_size = [output_size, output_size] + + padding = op_attr.get('paddings', [0, 0]) + stride = op_attr.get('strides', [1, 1]) + + h_in = input.shape[2] + w_in = input.shape[3] + filter_size_h = output_size[0] - \ + (h_in - 1) * stride[0] + 2 * padding[0] + filter_size_w = output_size[1] - \ + (w_in - 1) * stride[1] + 2 * padding[1] + filter_size = [filter_size_h, filter_size_w] + elif isinstance(filter_size, int): + filter_size = [filter_size, filter_size] + + filter_shape = [input_channel, num_filters] + filter_size + img_filter = helper.create_parameter( + dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) + + out = helper.create_tmp_variable(dtype=input.dtype) + helper.append_op( + type='conv2d_transpose', + inputs={'Input': [input], + 'Filter': [img_filter]}, + outputs={'Output': out}, + attrs=op_attr) + + return out + + class ConditionalBlockGuard(BlockGuard): def __init__(self, block): if not isinstance(block, ConditionalBlock): @@ -1807,3 +1817,209 @@ class IfElse(object): main_program=self.helper.main_program, startup_program=self.helper.startup_program)) return rlist + + +class DynamicRNN(object): + BEFORE_RNN = 0 + IN_RNN = 1 + AFTER_RNN = 2 + + def __init__(self, name=None, main_program=None, startup_program=None): + self.helper = LayerHelper( + 'dynamic_rnn', + name=name, + main_program=main_program, + startup_program=startup_program) + self.status = DynamicRNN.BEFORE_RNN + self.lod_rank_table = None + self.max_seq_len = None + self.step_idx = None + self.zero_idx = fill_constant(shape=[1], value=0, dtype='int64') + self.mem_dict = dict() + self.output_array = [] + self.outputs = [] + self.cond = self.helper.create_tmp_variable(dtype='bool') + self.cond.stop_gradient = False + self.while_op = While(self.cond) + self.input_array = [] + self.mem_link = [] + + def step_input(self, x): + self._assert_in_rnn_block_("step_input") + if not isinstance(x, Variable): + raise TypeError( + "step_input() can only take a Variable as its input") + parent_block = self._parent_block_() + if self.lod_rank_table is None: + self.lod_rank_table = parent_block.create_var( + name=unique_name('lod_rank_table'), + type=core.VarDesc.VarType.LOD_RANK_TABLE) + self.lod_rank_table.stop_gradient = True + parent_block.append_op( + type='lod_rank_table', + inputs={"X": x}, + outputs={"Out": self.lod_rank_table}) + self.max_seq_len = parent_block.create_var( + name=unique_name('dynamic_rnn_max_seq_len'), dtype='int64') + self.max_seq_len.stop_gradient = False + parent_block.append_op( + type='max_sequence_len', + inputs={'RankTable': self.lod_rank_table}, + outputs={"Out": self.max_seq_len}) + self.cond.stop_gradient = True + parent_block.append_op( + type='less_than', + inputs={'X': self.step_idx, + 'Y': self.max_seq_len}, + outputs={'Out': self.cond}) + + input_array = parent_block.create_var( + name=unique_name('dynamic_rnn_input_array'), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=x.dtype) + self.input_array.append((input_array, x.dtype)) + parent_block.append_op( + type='lod_tensor_to_array', + inputs={'X': x, + 'RankTable': self.lod_rank_table}, + outputs={'Out': input_array}) + return array_read( + array=input_array, i=self.step_idx, **self.helper.to_kwargs) + + @contextlib.contextmanager + def block(self): + if self.status != DynamicRNN.BEFORE_RNN: + raise ValueError("rnn.block() can only be invoke once") + self.step_idx = fill_constant(shape=[1], dtype='int64', value=0) + self.step_idx.stop_gradient = False + self.status = DynamicRNN.IN_RNN + with self.while_op.block(): + yield + increment( + x=self.step_idx, + value=1.0, + in_place=True, + **self.helper.to_kwargs) + + for new_mem, mem_array in self.mem_link: + array_write( + x=new_mem, + i=self.step_idx, + array=mem_array, + **self.helper.to_kwargs) + + less_than( + x=self.step_idx, + y=self.max_seq_len, + cond=self.cond, + **self.helper.to_kwargs) + + self.status = DynamicRNN.AFTER_RNN + for each_array in self.output_array: + self.outputs.append( + array_to_lod_tensor( + x=each_array, + table=self.lod_rank_table, + **self.helper.to_kwargs)) + + def __call__(self, *args, **kwargs): + if self.status != DynamicRNN.AFTER_RNN: + raise ValueError( + "Dynamic RNN outputs can only be retrieved after rnn block") + if len(self.outputs) == 1: + return self.outputs[0] + else: + return self.outputs + + def memory(self, init=None, shape=None, value=0.0, dtype='float32'): + self._assert_in_rnn_block_('memory') + if init is not None: + if not isinstance(init, Variable): + raise TypeError( + "The input arg `init` of memory() must be a Variable") + parent_block = self._parent_block_() + mem_array = parent_block.create_var( + name=unique_name('dynamic_rnn_mem_array'), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=init.dtype) + parent_block.append_op( + type='write_to_array', + inputs={'X': init, + 'I': self.zero_idx}, + outputs={'Out': mem_array}) + retv = array_read( + array=mem_array, i=self.step_idx, **self.helper.to_kwargs) + retv = shrink_memory( + x=retv, + i=self.step_idx, + table=self.lod_rank_table, + **self.helper.to_kwargs) + self.mem_dict[retv.name] = mem_array + return retv + else: + if len(self.input_array) == 0: + raise ValueError( + "step_input should be invoked before memory(shape=..., value=...)" + ) + parent_block = self._parent_block_() + init = parent_block.create_var( + name=unique_name('mem_init'), dtype=dtype) + arr, dtype = self.input_array[0] + in0 = parent_block.create_var(name=unique_name('in0'), dtype=dtype) + parent_block.append_op( + type='read_from_array', + inputs={'X': [arr], + 'I': [self.zero_idx]}, + outputs={'Out': [in0]}) + parent_block.append_op( + type='fill_constant_batch_size_like', + inputs={'Input': [in0]}, + outputs={'Out': [init]}, + attrs={ + 'shape': [-1] + shape, + 'value': float(value), + 'dtype': init.dtype + }) + return self.memory(init=init) + + def update_memory(self, ex_mem, new_mem): + self._assert_in_rnn_block_('update_memory') + if not isinstance(ex_mem, Variable): + raise TypeError("The input arg `ex_mem` of update_memory() must " + "be a Variable") + if not isinstance(new_mem, Variable): + raise TypeError("The input arg `new_mem` of update_memory() must " + "be a Variable") + + mem_array = self.mem_dict.get(ex_mem.name, None) + if mem_array is None: + raise ValueError("Please invoke memory before update_memory") + if self.lod_rank_table is None: + raise ValueError("Please invoke step_input before update_memory") + + self.mem_link.append((new_mem, mem_array)) + + def output(self, *outputs): + self._assert_in_rnn_block_('output') + parent_block = self._parent_block_() + for each in outputs: + outside_array = parent_block.create_var( + name=unique_name("_".join( + [self.helper.name, "output_array", each.name])), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=each.dtype) + array_write(x=each, i=self.step_idx, array=outside_array) + self.output_array.append(outside_array) + + def _parent_block_(self): + prog = self.helper.main_program + parent_idx = prog.current_block().parent_idx + assert parent_idx >= 0 + parent_block = prog.block(parent_idx) + + return parent_block + + def _assert_in_rnn_block_(self, method): + if self.status != DynamicRNN.IN_RNN: + raise ValueError("{0} can only be invoked inside rnn block.".format( + method)) diff --git a/python/paddle/v2/fluid/nets.py b/python/paddle/v2/fluid/nets.py index 05728ad75a5bd1e87aa3c75ffcc4eac34b6b956c..7ef524318e637604cc22ba9d8d7cafe1b7505261 100644 --- a/python/paddle/v2/fluid/nets.py +++ b/python/paddle/v2/fluid/nets.py @@ -9,6 +9,7 @@ def simple_img_conv_pool(input, pool_size, pool_stride, act, + param_attr=None, pool_type='max', main_program=None, startup_program=None): @@ -16,6 +17,7 @@ def simple_img_conv_pool(input, input=input, num_filters=num_filters, filter_size=filter_size, + param_attr=param_attr, act=act, main_program=main_program, startup_program=startup_program) @@ -36,6 +38,7 @@ def img_conv_group(input, conv_padding=1, conv_filter_size=3, conv_act=None, + param_attr=None, conv_with_batchnorm=False, conv_batchnorm_drop_rate=None, pool_stride=1, @@ -57,6 +60,7 @@ def img_conv_group(input, conv_padding = __extend_list__(conv_padding) conv_filter_size = __extend_list__(conv_filter_size) + param_attr = __extend_list__(param_attr) conv_with_batchnorm = __extend_list__(conv_with_batchnorm) conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate) @@ -70,6 +74,7 @@ def img_conv_group(input, num_filters=conv_num_filter[i], filter_size=conv_filter_size[i], padding=conv_padding[i], + param_attr=param_attr[i], act=local_conv_act, main_program=main_program, startup_program=startup_program) @@ -101,6 +106,7 @@ def img_conv_group(input, def sequence_conv_pool(input, num_filters, filter_size, + param_attr=None, act="sigmoid", pool_type="max", main_program=None, @@ -109,6 +115,7 @@ def sequence_conv_pool(input, input=input, num_filters=num_filters, filter_size=filter_size, + param_attr=param_attr, act=act, main_program=main_program, startup_program=startup_program) diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index 934e024742fd00bf05cc0d7caaaa870c18a68074..bbdfab2df9519b77e5df184c00aadf703ec765e0 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -18,8 +18,9 @@ class Optimizer(object): but need to use one of it's implementation. """ - def __init__(self, global_step=None): + def __init__(self, global_step=None, regularization=None): self._global_step = global_step + self.regularization = regularization # Dictionary of accumulators. Some optimizer subclasses need to # allocate and manage extra variables associated with the parameters # to train. These variables are called accumulators. @@ -197,10 +198,10 @@ class Optimizer(object): This method combines interface `append_backward_ops()` and `create_optimization_pass()` into one. """ - params_grads = append_backward_ops(loss, parameter_list, no_grad_set or - set()) + params_grads = append_backward_ops(loss, parameter_list, no_grad_set) # Add regularization if any - params_grads = append_regularization_ops(params_grads) + params_grads = append_regularization_ops(params_grads, + self.regularization) optimize_ops = self.create_optimization_pass(params_grads, loss, startup_program) return optimize_ops @@ -210,9 +211,9 @@ class SGDOptimizer(Optimizer): """ Simple SGD optimizer without any state. """ - def __init__(self, learning_rate, global_step=None): + def __init__(self, learning_rate, **kwargs): assert learning_rate is not None - super(SGDOptimizer, self).__init__(global_step) + super(SGDOptimizer, self).__init__(**kwargs) self.type = "sgd" self._learning_rate = learning_rate @@ -237,14 +238,10 @@ class MomentumOptimizer(Optimizer): """ _velocity_acc_str = "velocity" - def __init__(self, - learning_rate, - momentum, - use_nesterov=False, - global_step=None): + def __init__(self, learning_rate, momentum, use_nesterov=False, **kwargs): assert learning_rate is not None assert momentum is not None - super(MomentumOptimizer, self).__init__(global_step) + super(MomentumOptimizer, self).__init__(**kwargs) self.type = "momentum" self._learning_rate = learning_rate self._momentum = momentum @@ -285,10 +282,10 @@ class AdagradOptimizer(Optimizer): """ _moment_acc_str = "moment" - def __init__(self, learning_rate, epsilon=1.0e-6, global_step=None): + def __init__(self, learning_rate, epsilon=1.0e-6, **kwargs): assert learning_rate is not None assert epsilon is not None - super(AdagradOptimizer, self).__init__(global_step) + super(AdagradOptimizer, self).__init__(**kwargs) self.type = "adagrad" self._learning_rate = learning_rate self._epsilon = epsilon @@ -332,12 +329,12 @@ class AdamOptimizer(Optimizer): beta1=0.9, beta2=0.999, epsilon=1e-8, - global_step=None): + **kwargs): assert learning_rate is not None assert beta1 is not None assert beta2 is not None assert epsilon is not None - super(AdamOptimizer, self).__init__(global_step) + super(AdamOptimizer, self).__init__(**kwargs) self.type = "adam" self._learning_rate = learning_rate self._beta1 = beta1 @@ -437,12 +434,12 @@ class AdamaxOptimizer(Optimizer): beta1=0.9, beta2=0.999, epsilon=1e-8, - global_step=None): + **kwargs): assert learning_rate is not None assert beta1 is not None assert beta2 is not None assert epsilon is not None - super(AdamaxOptimizer, self).__init__() + super(AdamaxOptimizer, self).__init__(**kwargs) self.type = "adamax" self._learning_rate = learning_rate self._beta1 = beta1 @@ -515,16 +512,12 @@ class DecayedAdagradOptimizer(Optimizer): """ _moment_acc_str = "moment" - def __init__(self, - learning_rate, - decay=0.95, - epsilon=1.0e-6, - global_step=None): + def __init__(self, learning_rate, decay=0.95, epsilon=1.0e-6, **kwargs): assert learning_rate is not None assert decay is not None assert epsilon is not None - super(DecayedAdagradOptimizer, self).__init__(global_step) + super(DecayedAdagradOptimizer, self).__init__(**kwargs) self.type = "decayed_adagrad" self._learning_rate = learning_rate self._decay = decay diff --git a/python/paddle/v2/fluid/param_attr.py b/python/paddle/v2/fluid/param_attr.py new file mode 100644 index 0000000000000000000000000000000000000000..7952a5ea51c00f72664443fb26faa455e89da7be --- /dev/null +++ b/python/paddle/v2/fluid/param_attr.py @@ -0,0 +1,63 @@ +from initializer import Initializer, Xavier, Constant +from regularizer import WeightDecayRegularizer + + +class ParamAttr(object): + def __init__(self, + name=None, + initializer=None, + learning_rate=1.0, + regularizer=None, + trainable=True): + self.name = name + self.initializer = initializer + self.learning_rate = learning_rate + self.regularizer = regularizer + self.trainable = trainable + + def set_default_initializer(self, initializer): + if initializer is None: + if self.initializer is None: + raise ValueError("ParamAttr.initializer is not set") + return + + if self.initializer is not None: + return + + self.initializer = initializer + + def set_default_param_initializer(self): + self.set_default_initializer(Xavier()) + + def set_default_bias_initializer(self): + self.set_default_initializer(Constant(0.0)) + + @staticmethod + def to_attr(arg): + if arg is None: + return ParamAttr() + elif isinstance(arg, list) or isinstance(arg, tuple): + return [ParamAttr.to_attr(a) for a in arg] + elif isinstance(arg, ParamAttr): + return arg + elif isinstance(arg, str) or isinstance(arg, unicode): + return ParamAttr(name=arg) + elif isinstance(arg, Initializer): + return ParamAttr(initializer=arg) + elif isinstance(arg, WeightDecayRegularizer): + return ParamAttr(regularizer=arg) + elif isinstance(arg, bool): + return ParamAttr.to_attr(None) if arg else False + else: + raise TypeError("{0} cast to ParamAttr".format(type(arg))) + + def to_kwargs(self, with_initializer=False): + kwargs = { + 'name': self.name, + 'learning_rate': self.learning_rate, + 'regularizer': self.regularizer, + 'trainable': self.trainable + } + if with_initializer: + kwargs['initializer'] = self.initializer + return kwargs diff --git a/python/paddle/v2/fluid/profiler.py b/python/paddle/v2/fluid/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..2069b713faf41c5c00ceaf47e030864b98c678da --- /dev/null +++ b/python/paddle/v2/fluid/profiler.py @@ -0,0 +1,46 @@ +import paddle.v2.fluid.core as core +from contextlib import contextmanager + +__all__ = ['CudaProfiler'] + +NVPROF_CONFIG = [ + "gpustarttimestamp", + "gpuendtimestamp", + "gridsize3d", + "threadblocksize", + "streamid", + "enableonstart 0", + "conckerneltrace", +] + + +@contextmanager +def cuda_profiler(output_file, output_mode=None, config=None): + """The CUDA profiler. + This fuctions is used to profile CUDA program by CUDA runtime application + programming interface. The profiling result will be written into + `output_file` with Key-Value pair format or Comma separated values format. + The user can set the output mode by `output_mode` argument and set the + counters/options for profiling by `config` argument. The default config + is ['gpustarttimestamp', 'gpustarttimestamp', 'gridsize3d', + 'threadblocksize', 'streamid', 'enableonstart 0', 'conckerneltrace']. + + Args: + output_file (string) : The output file name, the result will be + written into this file. + output_mode (string) : The output mode has Key-Value pair format and + Comma separated values format. It should be 'kvp' or 'csv'. + config (string) : The profiler options and counters can refer to + "Compute Command Line Profiler User Guide". + """ + if output_mode is None: + output_mode = 'csv' + if output_mode not in ['kvp', 'csv']: + raise ValueError("The output mode must be 'kvp' or 'csv'.") + config = NVPROF_CONFIG if config is None else config + core.nvprof_init(output_file, output_mode, config) + # Enables profiler collection by the active CUDA profiling tool. + core.nvprof_start() + yield + # Disables profiler collection. + core.nvprof_stop() diff --git a/python/paddle/v2/fluid/registry.py b/python/paddle/v2/fluid/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..6f5dd365ded628ad49800f0a04f208ec49cca4c5 --- /dev/null +++ b/python/paddle/v2/fluid/registry.py @@ -0,0 +1,186 @@ +import re +import cStringIO +import warnings +import functools +import inspect + +import proto.framework_pb2 as framework_pb2 +from framework import OpProtoHolder, Variable, Program, Operator +from paddle.v2.fluid.layer_helper import LayerHelper, unique_name + +__all__ = ['deprecated', 'register_layer'] + + +def _convert_(name): + """ + Formatting. + + Args: + name: The name/alias + + This function takes in a name and converts it to a standard format of + group1_group2. Where as per the regular expression, group1 can have + alphabets and numbers and group2 has capital alphabets. + + """ + s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + + +def _generate_doc_string_(op_proto): + """ + Generate docstring by OpProto + + Args: + op_proto (framework_pb2.OpProto): a protobuf message typed OpProto + + Returns: + str: the document string + """ + + def _type_to_str_(tp): + return framework_pb2.AttrType.Name(tp) + + if not isinstance(op_proto, framework_pb2.OpProto): + raise TypeError("OpProto should be `framework_pb2.OpProto`") + + buf = cStringIO.StringIO() + buf.write(op_proto.comment) + buf.write('\nArgs:\n') + for each_input in op_proto.inputs: + line_begin = ' {0}: '.format(_convert_(each_input.name)) + buf.write(line_begin) + buf.write(each_input.comment) + buf.write('\n') + buf.write(' ' * len(line_begin)) + buf.write('Duplicable: ') + buf.write(str(each_input.duplicable)) + buf.write(' Optional: ') + buf.write(str(each_input.dispensable)) + buf.write('\n') + + for each_attr in op_proto.attrs: + buf.write(' ') + buf.write(each_attr.name) + buf.write(' (') + buf.write(_type_to_str_(each_attr.type)) + buf.write('): ') + buf.write(each_attr.comment) + buf.write('\n') + + if len(op_proto.outputs) != 0: + buf.write('\nReturns:\n') + buf.write(' ') + for each_opt in op_proto.outputs: + if not each_opt.intermediate: + break + buf.write(each_opt.comment) + + return buf.getvalue() + + +def register_layer(op_type): + """ + Register an Python layer for an Operator + + Args: + op_type: The name of the operator to be created + + This function takes in the operator type (sigmoid, mean , average etc) and + creates the operator functionality. + + """ + op_proto = OpProtoHolder.instance().get_op_proto(op_type) + not_intermediate_outputs = \ + filter(lambda output: not output.intermediate, op_proto.outputs) + intermediate_outputs = \ + filter(lambda output: output.intermediate, op_proto.outputs) + + if len(not_intermediate_outputs) != 1: + raise ValueError("Only one non intermediate output operator can be", + "automatically generated") + + if not_intermediate_outputs[0].duplicable: + raise ValueError( + "Only non duplicable op can be automatically generated") + + for output in intermediate_outputs: + if output.duplicable: + raise ValueError("The op can be automatically generated only when ", + "all intermediate ops are not duplicable") + + o_name = not_intermediate_outputs[0].name + intermediate_output_names = [output.name for output in intermediate_outputs] + + def infer_and_check_dtype(op_proto, **kwargs): + """ + This function performs the sanity check for dtype and + instance type. + """ + dtype = None + for ipt in op_proto.inputs: + name = _convert_(ipt.name) + val = kwargs.pop(name, []) + if not isinstance(val, list) and not isinstance(val, tuple): + val = [val] + for each in val: + if not isinstance(each, Variable): + raise ValueError("input of {0} must be variable".format( + op_type)) + + if dtype is None: + dtype = each.dtype + elif dtype != each.dtype: + raise ValueError( + "operator {0} must input same dtype. {1} vs {2}".format( + op_type, dtype, each.dtype)) + + return dtype + + def func(**kwargs): + helper = LayerHelper(op_type, **kwargs) + + dtype = infer_and_check_dtype(op_proto, **kwargs) + + inputs = dict() + for ipt in op_proto.inputs: + name = _convert_(ipt.name) + val = kwargs.pop(name, []) + if not isinstance(val, list) and not isinstance(val, tuple): + val = [val] + inputs[ipt.name] = val + + outputs = dict() + out = helper.create_tmp_variable(dtype=dtype) + outputs[o_name] = [out] + for name in intermediate_output_names: + outputs[name] = [helper.create_tmp_variable(dtype=dtype)] + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs) + return helper.append_activation(out) + + func.__name__ = op_type + func.__doc__ = _generate_doc_string_(op_proto) + return func + + +def deprecated(func_or_class): + """ + Deprecated warning decorator. It will result a warning message. + Should be used before class or function, member function + """ + + @functools.wraps(func) + def func_wrapper(*args, **kwargs): + """ + Wrap func with deprecated warning + """ + warnings.simplefilter('always', DeprecationWarning) #turn off filter + warnings.warn( + "Call to deprecated function {}.".format(func.__name__), + category=DeprecationWarning, + stacklevel=2) + warnings.simplefilter('default', DeprecationWarning) #reset filter + return func(*args, **kwargs) + + return func_wrapper diff --git a/python/paddle/v2/fluid/regularizer.py b/python/paddle/v2/fluid/regularizer.py index c2c18e1951234f7160ff9f92d6dd6922a56683dd..d1955b00479676448d99603a31249aa7ac6a0d3f 100644 --- a/python/paddle/v2/fluid/regularizer.py +++ b/python/paddle/v2/fluid/regularizer.py @@ -3,7 +3,7 @@ import framework __all__ = ['append_regularization_ops', 'L1Decay', 'L2Decay'] -def append_regularization_ops(parameters_and_grads): +def append_regularization_ops(parameters_and_grads, regularization=None): """Create and add backward regularization Operators Creates and adds backward regularization operators in the BlockDesc. @@ -14,6 +14,8 @@ def append_regularization_ops(parameters_and_grads): Args: parameters_and_grads: A list of (parameters, gradients) pairs that need to be regularized. + regularization: A global regularizer. If the parameter is not + set. It will be applied with regularizer. Returns: list of (parameters, gradients) pair with the regularized gradient @@ -23,14 +25,19 @@ def append_regularization_ops(parameters_and_grads): """ params_and_grads = [] for param, grad in parameters_and_grads: + regularization_term = None + if param.regularizer is not None: + # Add variable for regularization term in grad block + regularization_term = param.regularizer(param, grad.block) + elif regularization is not None: + regularization_term = regularization(param, grad.block) + # If no gradient or no regularization specified, # then we don't need to do anything - if grad is None or param.regularizer is None: + if grad is None or regularization_term is None: params_and_grads.append((param, grad)) continue - # Add variable for regularization term in grad block - regularization_term = param.regularizer(param, grad.block) assert grad.shape == regularization_term.shape grad.block.append_op( @@ -145,7 +152,7 @@ class L1DecayRegularizer(WeightDecayRegularizer): # import paddle.fluid as fluid # # hidden = fluid.layers.fc(..., -# param_attr=ParamAttr(fluid.regularizer.Xavier())) +# param_attr=fluid.regularizer.Xavier()) # # It is no need to add a `Regularizer` as the class suffix L1Decay = L1DecayRegularizer diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index 9f98493adb21a03b8efde0f88c490e77c9d303e7..fbf46ac6cba8fa4981cc8a6e8f5434a510c52d7d 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -22,6 +22,7 @@ train_reader = paddle.batch( batch_size=BATCH_SIZE) place = fluid.CPUPlace() +feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -31,12 +32,8 @@ for pass_id in range(PASS_NUM): fluid.io.save_persistables(exe, "./fit_a_line.model/") fluid.io.load_persistables(exe, "./fit_a_line.model/") for data in train_reader(): - x_data = np.array(map(lambda _: _[0], data)).astype("float32") - y_data = np.array(map(lambda _: _[1], data)).astype("float32") - avg_loss_value, = exe.run(fluid.default_main_program(), - feed={'x': x_data, - 'y': y_data}, + feed=feeder.feed(data), fetch_list=[avg_cost]) if avg_loss_value[0] < 10.0: diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py index cc45b10b90868858c61334a3a43acf65c3d4eaf5..4e71b6f345ea7a1e6d29bc4ad810bc5b5f99d456 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py @@ -69,8 +69,7 @@ def vgg16_bn_drop(input): drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) fc1 = fluid.layers.fc(input=drop, size=512, act=None) - reshape1 = fluid.layers.reshape(x=fc1, shape=list(fc1.shape + (1, 1))) - bn = fluid.layers.batch_norm(input=reshape1, act='relu') + bn = fluid.layers.batch_norm(input=fc1, act='relu') drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) fc2 = fluid.layers.fc(input=drop2, size=512, act=None) return fc2 @@ -114,23 +113,14 @@ train_reader = paddle.batch( place = fluid.CPUPlace() exe = fluid.Executor(place) - +feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) exe.run(fluid.default_startup_program()) for pass_id in range(PASS_NUM): accuracy.reset(exe) for data in train_reader(): - img_data = np.array(map(lambda x: x[0].reshape(data_shape), - data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - batch_size = 1 - for i in y_data.shape: - batch_size = batch_size * i - y_data = y_data.reshape([batch_size, 1]) - loss, acc = exe.run(fluid.default_main_program(), - feed={"pixel": img_data, - "label": y_data}, + feed=feeder.feed(data), fetch_list=[avg_cost] + accuracy.metrics) pass_acc = accuracy.eval(exe) print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str( diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index 93987a2b80dc9ca304a708d4799bc38b448a68c4..d2693b602ea5de9d2d60fbe114820b25119bfa3f 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -1,3 +1,5 @@ +import math + import numpy as np import paddle.v2 as paddle import paddle.v2.dataset.conll05 as conll05 @@ -28,23 +30,15 @@ def load_parameter(file_name, h, w): return np.fromfile(f, dtype=np.float32).reshape(h, w) -def db_lstm(): +def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, + **ignored): # 8 features - word = fluid.layers.data(name='word_data', shape=[1], dtype='int64') - predicate = fluid.layers.data(name='verb_data', shape=[1], dtype='int64') - ctx_n2 = fluid.layers.data(name='ctx_n2_data', shape=[1], dtype='int64') - ctx_n1 = fluid.layers.data(name='ctx_n1_data', shape=[1], dtype='int64') - ctx_0 = fluid.layers.data(name='ctx_0_data', shape=[1], dtype='int64') - ctx_p1 = fluid.layers.data(name='ctx_p1_data', shape=[1], dtype='int64') - ctx_p2 = fluid.layers.data(name='ctx_p2_data', shape=[1], dtype='int64') - mark = fluid.layers.data(name='mark_data', shape=[1], dtype='int64') - predicate_embedding = fluid.layers.embedding( input=predicate, size=[pred_len, word_dim], dtype='float32', is_sparse=IS_SPARSE, - param_attr={'name': 'vemb'}) + param_attr='vemb') mark_embedding = fluid.layers.embedding( input=mark, @@ -57,8 +51,8 @@ def db_lstm(): fluid.layers.embedding( size=[word_dict_len, word_dim], input=x, - param_attr={'name': embedding_name, - 'trainable': False}) for x in word_input + param_attr=fluid.ParamAttr( + name=embedding_name, trainable=False)) for x in word_input ] emb_layers.append(predicate_embedding) emb_layers.append(mark_embedding) @@ -120,25 +114,58 @@ def to_lodtensor(data, place): def main(): # define network topology - feature_out = db_lstm() - target = fluid.layers.data(name='target', shape=[1], dtype='int64') + word = fluid.layers.data( + name='word_data', shape=[1], dtype='int64', lod_level=1) + predicate = fluid.layers.data( + name='verb_data', shape=[1], dtype='int64', lod_level=1) + ctx_n2 = fluid.layers.data( + name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) + ctx_n1 = fluid.layers.data( + name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) + ctx_0 = fluid.layers.data( + name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) + ctx_p1 = fluid.layers.data( + name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) + ctx_p2 = fluid.layers.data( + name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) + mark = fluid.layers.data( + name='mark_data', shape=[1], dtype='int64', lod_level=1) + feature_out = db_lstm(**locals()) + target = fluid.layers.data( + name='target', shape=[1], dtype='int64', lod_level=1) crf_cost = fluid.layers.linear_chain_crf( input=feature_out, label=target, - param_attr={"name": 'crfw', - "learning_rate": mix_hidden_lr}) + param_attr=fluid.ParamAttr( + name='crfw', learning_rate=mix_hidden_lr)) avg_cost = fluid.layers.mean(x=crf_cost) + # TODO(qiao) - # 1. add crf_decode_layer and evaluator - # 2. use other optimizer and check why out will be NAN + # check other optimizers and check why out will be NAN sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.0001) sgd_optimizer.minimize(avg_cost) + # TODO(qiao) + # add dependency track and move this config before optimizer + crf_decode = fluid.layers.crf_decoding( + input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) + + precision, recall, f1_score = fluid.layers.chunk_eval( + input=crf_decode, + label=target, + chunk_scheme="IOB", + num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0))) + train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.conll05.test(), buf_size=8192), batch_size=BATCH_SIZE) place = fluid.CPUPlace() + feeder = fluid.DataFeeder( + feed_list=[ + word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target + ], + place=place) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -150,33 +177,19 @@ def main(): batch_id = 0 for pass_id in xrange(PASS_NUM): for data in train_data(): - word_data = to_lodtensor(map(lambda x: x[0], data), place) - ctx_n2_data = to_lodtensor(map(lambda x: x[1], data), place) - ctx_n1_data = to_lodtensor(map(lambda x: x[2], data), place) - ctx_0_data = to_lodtensor(map(lambda x: x[3], data), place) - ctx_p1_data = to_lodtensor(map(lambda x: x[4], data), place) - ctx_p2_data = to_lodtensor(map(lambda x: x[5], data), place) - verb_data = to_lodtensor(map(lambda x: x[6], data), place) - mark_data = to_lodtensor(map(lambda x: x[7], data), place) - target = to_lodtensor(map(lambda x: x[8], data), place) - outs = exe.run(fluid.default_main_program(), - feed={ - 'word_data': word_data, - 'ctx_n2_data': ctx_n2_data, - 'ctx_n1_data': ctx_n1_data, - 'ctx_0_data': ctx_0_data, - 'ctx_p1_data': ctx_p1_data, - 'ctx_p2_data': ctx_p2_data, - 'verb_data': verb_data, - 'mark_data': mark_data, - 'target': target - }, - fetch_list=[avg_cost]) + feed=feeder.feed(data), + fetch_list=[avg_cost, precision, recall, f1_score]) avg_cost_val = np.array(outs[0]) + precision_val = np.array(outs[1]) + recall_val = np.array(outs[2]) + f1_score_val = np.array(outs[3]) if batch_id % 10 == 0: print("avg_cost=" + str(avg_cost_val)) + print("precision_val=" + str(precision_val)) + print("recall_val:" + str(recall_val)) + print("f1_score_val:" + str(f1_score_val)) # exit early for CI exit(0) diff --git a/python/paddle/v2/fluid/tests/book/test_machine_translation.py b/python/paddle/v2/fluid/tests/book/test_machine_translation.py new file mode 100644 index 0000000000000000000000000000000000000000..80ffc5a544c201ed45a6de46b5a2addff82246b7 --- /dev/null +++ b/python/paddle/v2/fluid/tests/book/test_machine_translation.py @@ -0,0 +1,119 @@ +import numpy as np +import paddle.v2 as paddle +import paddle.v2.fluid as fluid +import paddle.v2.fluid.core as core +import paddle.v2.fluid.framework as framework +import paddle.v2.fluid.layers as layers +from paddle.v2.fluid.executor import Executor + +dict_size = 30000 +source_dict_dim = target_dict_dim = dict_size +src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size) +hidden_dim = 32 +word_dim = 16 +IS_SPARSE = True +batch_size = 10 +max_length = 50 +topk_size = 50 +trg_dic_size = 10000 + +decoder_size = hidden_dim + + +def encoder_decoder(): + # encoder + src_word_id = layers.data( + name="src_word_id", shape=[1], dtype='int64', lod_level=1) + src_embedding = layers.embedding( + input=src_word_id, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr=fluid.ParamAttr(name='vemb')) + + fc1 = fluid.layers.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') + lstm_hidden0, lstm_0 = layers.dynamic_lstm(input=fc1, size=hidden_dim * 4) + encoder_out = layers.sequence_pool(input=lstm_hidden0, pool_type="last") + + # decoder + trg_language_word = layers.data( + name="target_language_word", shape=[1], dtype='int64', lod_level=1) + trg_embedding = layers.embedding( + input=trg_language_word, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr=fluid.ParamAttr(name='vemb')) + + rnn = fluid.layers.DynamicRNN() + with rnn.block(): + current_word = rnn.step_input(trg_embedding) + mem = rnn.memory(init=encoder_out) + fc1 = fluid.layers.fc(input=[current_word, mem], + size=decoder_size, + act='tanh') + out = fluid.layers.fc(input=fc1, size=target_dict_dim, act='softmax') + rnn.update_memory(mem, fc1) + rnn.output(out) + + return rnn() + + +def to_lodtensor(data, place): + seq_lens = [len(seq) for seq in data] + cur_len = 0 + lod = [cur_len] + for l in seq_lens: + cur_len += l + lod.append(cur_len) + flattened_data = np.concatenate(data, axis=0).astype("int64") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + res = core.LoDTensor() + res.set(flattened_data, place) + res.set_lod([lod]) + return res + + +def main(): + rnn_out = encoder_decoder() + label = layers.data( + name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) + cost = layers.cross_entropy(input=rnn_out, label=label) + avg_cost = fluid.layers.mean(x=cost) + + optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) + optimizer.minimize(avg_cost) + + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000), + batch_size=batch_size) + + place = core.CPUPlace() + exe = Executor(place) + + exe.run(framework.default_startup_program()) + + batch_id = 0 + for pass_id in xrange(2): + for data in train_data(): + word_data = to_lodtensor(map(lambda x: x[0], data), place) + trg_word = to_lodtensor(map(lambda x: x[1], data), place) + trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) + outs = exe.run(framework.default_main_program(), + feed={ + 'src_word_id': word_data, + 'target_language_word': trg_word, + 'target_language_next_word': trg_word_next + }, + fetch_list=[avg_cost]) + avg_cost_val = np.array(outs[0]) + print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + + " avg_cost=" + str(avg_cost_val)) + if batch_id > 3: + exit(0) + batch_id += 1 + + +if __name__ == '__main__': + main() diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py index ba686b56f8603834c12f5ed24e0ef7308c78585d..35bf8da924dc76475df9bd5e6a4c04f4d204426a 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py @@ -37,20 +37,14 @@ train_reader = paddle.batch( place = fluid.CPUPlace() exe = fluid.Executor(place) - +feeder = fluid.DataFeeder(feed_list=[images, label], place=place) exe.run(fluid.default_startup_program()) for pass_id in range(PASS_NUM): accuracy.reset(exe) for data in train_reader(): - img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]), - data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = y_data.reshape([BATCH_SIZE, 1]) - loss, acc = exe.run(fluid.default_main_program(), - feed={"pixel": img_data, - "label": y_data}, + feed=feeder.feed(data), fetch_list=[avg_cost] + accuracy.metrics) pass_acc = accuracy.eval(exe) print("pass_id=" + str(pass_id) + " acc=" + str(acc) + " pass_acc=" + diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py index 8ca45134dc01ec21e720ca46c8ad020128aa6e04..4dc2c50e1c963a189b727f0a7edcb6886abd9038 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py @@ -6,24 +6,21 @@ import paddle.v2.fluid as fluid BATCH_SIZE = 128 image = fluid.layers.data(name='x', shape=[784], dtype='float32') -param_attr = { - 'name': None, - 'regularization': fluid.regularizer.L2Decay(0.0005 * BATCH_SIZE) -} +regularizer = fluid.regularizer.L2Decay(0.0005 * BATCH_SIZE) hidden1 = fluid.layers.fc(input=image, size=128, act='relu', - param_attr=param_attr) + param_attr=regularizer) hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu', - param_attr=param_attr) + param_attr=regularizer) predict = fluid.layers.fc(input=hidden2, size=10, act='softmax', - param_attr=param_attr) + param_attr=regularizer) label = fluid.layers.data(name='y', shape=[1], dtype='int64') @@ -51,40 +48,22 @@ test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=128) place = fluid.CPUPlace() exe = fluid.Executor(place) - +feeder = fluid.DataFeeder(feed_list=[image, label], place=place) exe.run(fluid.default_startup_program()) PASS_NUM = 100 for pass_id in range(PASS_NUM): accuracy.reset(exe) for data in train_reader(): - x_data = np.array(map(lambda x: x[0], data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = np.expand_dims(y_data, axis=1) - - tensor_x = fluid.LoDTensor() - tensor_x.set(x_data, place) - - tensor_y = fluid.LoDTensor() - tensor_y.set(y_data, place) - - outs = exe.run(fluid.default_main_program(), - feed={'x': tensor_x, - 'y': tensor_y}, - fetch_list=[avg_cost] + accuracy.metrics) - out = np.array(outs[0]) - acc = np.array(outs[1]) + out, acc = exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost] + accuracy.metrics) pass_acc = accuracy.eval(exe) test_accuracy.reset(exe) for data in test_reader(): - x_data = np.array(map(lambda x: x[0], data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = np.expand_dims(y_data, axis=1) - out, acc = exe.run(inference_program, - feed={'x': x_data, - 'y': y_data}, + feed=feeder.feed(data), fetch_list=[avg_cost] + test_accuracy.metrics) test_pass_acc = test_accuracy.eval(exe) diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py index f8dc1518579d5a9d7a8d0498dcc5fd8a6d1692c4..db91ca4f9c7d17fb51fc5d65a0464e976d98523c 100644 --- a/python/paddle/v2/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -24,7 +24,7 @@ def get_usr_combined_features(): input=uid, dtype='float32', size=[USR_DICT_SIZE, 32], - param_attr={'name': 'user_table'}, + param_attr='user_table', is_sparse=IS_SPARSE) usr_fc = layers.fc(input=usr_emb, size=32) @@ -36,7 +36,7 @@ def get_usr_combined_features(): usr_gender_emb = layers.embedding( input=usr_gender_id, size=[USR_GENDER_DICT_SIZE, 16], - param_attr={'name': 'gender_table'}, + param_attr='gender_table', is_sparse=IS_SPARSE) usr_gender_fc = layers.fc(input=usr_gender_emb, size=16) @@ -48,7 +48,7 @@ def get_usr_combined_features(): input=usr_age_id, size=[USR_AGE_DICT_SIZE, 16], is_sparse=IS_SPARSE, - param_attr={'name': 'age_table'}) + param_attr='age_table') usr_age_fc = layers.fc(input=usr_age_emb, size=16) @@ -58,7 +58,7 @@ def get_usr_combined_features(): usr_job_emb = layers.embedding( input=usr_job_id, size=[USR_JOB_DICT_SIZE, 16], - param_attr={'name': 'job_table'}, + param_attr='job_table', is_sparse=IS_SPARSE) usr_job_fc = layers.fc(input=usr_job_emb, size=16) @@ -81,7 +81,7 @@ def get_mov_combined_features(): input=mov_id, dtype='float32', size=[MOV_DICT_SIZE, 32], - param_attr={'name': 'movie_table'}, + param_attr='movie_table', is_sparse=IS_SPARSE) mov_fc = layers.fc(input=mov_emb, size=32) diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py index be875a952b7086ee64984525d70ffd3f1ecb5fae..f103358edca9bbd2e28c99afd249f97b1d8069ae 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py @@ -4,10 +4,8 @@ import paddle.v2 as paddle import paddle.v2.fluid as fluid -def convolution_net(input_dim, class_dim=2, emb_dim=32, hid_dim=32): - data = fluid.layers.data(name="words", shape=[1], dtype="int64") - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - +def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, + hid_dim=32): emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim]) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -55,8 +53,11 @@ def main(): dict_dim = len(word_dict) class_dim = 2 + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") cost, accuracy, acc_out = convolution_net( - input_dim=dict_dim, class_dim=class_dim) + data, label, input_dim=dict_dim, class_dim=class_dim) train_data = paddle.batch( paddle.reader.shuffle( @@ -64,25 +65,16 @@ def main(): batch_size=BATCH_SIZE) place = fluid.CPUPlace() exe = fluid.Executor(place) + feeder = fluid.DataFeeder(feed_list=[data, label], place=place) exe.run(fluid.default_startup_program()) for pass_id in xrange(PASS_NUM): accuracy.reset(exe) for data in train_data(): - tensor_words = to_lodtensor(map(lambda x: x[0], data), place) - - label = np.array(map(lambda x: x[1], data)).astype("int64") - label = label.reshape([BATCH_SIZE, 1]) - - tensor_label = fluid.LoDTensor() - tensor_label.set(label, place) - - cost_val, acc_val = exe.run( - fluid.default_main_program(), - feed={"words": tensor_words, - "label": tensor_label}, - fetch_list=[cost, acc_out]) + cost_val, acc_val = exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[cost, acc_out]) pass_acc = accuracy.eval(exe) print("cost=" + str(cost_val) + " acc=" + str(acc_val) + " pass_acc=" + str(pass_acc)) diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py index 094a3cdcda12eaee351476e99a388c44b3c81cd6..cd28f04b8574778316d70e7d8a03026f807c3e52 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py @@ -3,14 +3,14 @@ import paddle.v2 as paddle import paddle.v2.fluid as fluid -def stacked_lstm_net(input_dim, +def stacked_lstm_net(data, + label, + input_dim, class_dim=2, emb_dim=128, hid_dim=512, stacked_num=3): assert stacked_num % 2 == 1 - data = fluid.layers.data(name="words", shape=[1], dtype="int64") - label = fluid.layers.data(name="label", shape=[1], dtype="int64") emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim]) # add bias attr @@ -65,8 +65,11 @@ def main(): dict_dim = len(word_dict) class_dim = 2 + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") cost, accuracy, acc_out = stacked_lstm_net( - input_dim=dict_dim, class_dim=class_dim) + data, label, input_dim=dict_dim, class_dim=class_dim) train_data = paddle.batch( paddle.reader.shuffle( @@ -74,25 +77,16 @@ def main(): batch_size=BATCH_SIZE) place = fluid.CPUPlace() exe = fluid.Executor(place) + feeder = fluid.DataFeeder(feed_list=[data, label], place=place) exe.run(fluid.default_startup_program()) for pass_id in xrange(PASS_NUM): accuracy.reset(exe) for data in train_data(): - tensor_words = to_lodtensor(map(lambda x: x[0], data), place) - - label = np.array(map(lambda x: x[1], data)).astype("int64") - label = label.reshape([BATCH_SIZE, 1]) - - tensor_label = fluid.LoDTensor() - tensor_label.set(label, place) - - cost_val, acc_val = exe.run( - fluid.default_main_program(), - feed={"words": tensor_words, - "label": tensor_label}, - fetch_list=[cost, acc_out]) + cost_val, acc_val = exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[cost, acc_out]) pass_acc = accuracy.eval(exe) print("cost=" + str(cost_val) + " acc=" + str(acc_val) + " pass_acc=" + str(pass_acc)) diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py index b2479320330bde5771c3d4a8e2923b5ab1eecf2e..80f859967979ec07536d652d4ea620fd4ddb2daa 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py @@ -8,7 +8,8 @@ def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50): name="words", shape=[seq_len * batch_size, 1], append_batch_size=False, - dtype="int64") + dtype="int64", + lod_level=1) label = fluid.layers.data( name="label", shape=[batch_size, 1], @@ -21,6 +22,7 @@ def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50): c_pre_init = fluid.layers.fill_constant( dtype=emb.dtype, shape=[batch_size, emb_dim], value=0.0) + c_pre_init.stop_gradient = False layer_1_out = fluid.layers.lstm( emb, c_pre_init=c_pre_init, hidden_dim=emb_dim) layer_1_out = fluid.layers.transpose(x=layer_1_out, axis=[1, 0, 2]) diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index b0cd1a518cd1be60474df126470573a5a5b81b70..8b928ff9eed41f8945c749058b4177fd023452ba 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -23,25 +23,25 @@ embed_first = fluid.layers.embedding( size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, - param_attr={'name': 'shared_w'}) + param_attr='shared_w') embed_second = fluid.layers.embedding( input=second_word, size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, - param_attr={'name': 'shared_w'}) + param_attr='shared_w') embed_third = fluid.layers.embedding( input=third_word, size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, - param_attr={'name': 'shared_w'}) + param_attr='shared_w') embed_forth = fluid.layers.embedding( input=forth_word, size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, - param_attr={'name': 'shared_w'}) + param_attr='shared_w') concat_embed = fluid.layers.concat( input=[embed_first, embed_second, embed_third, embed_forth], axis=1) @@ -57,28 +57,17 @@ train_reader = paddle.batch( place = fluid.CPUPlace() exe = fluid.Executor(place) - -# fix https://github.com/PaddlePaddle/Paddle/issues/5434 then remove -# below exit line. -exit(0) +feeder = fluid.DataFeeder( + feed_list=[first_word, second_word, third_word, forth_word, next_word], + place=place) exe.run(fluid.default_startup_program()) for pass_id in range(PASS_NUM): for data in train_reader(): - input_data = [[data_idx[idx] for data_idx in data] for idx in xrange(5)] - input_data = map(lambda x: np.array(x).astype("int64"), input_data) - input_data = map(lambda x: np.expand_dims(x, axis=1), input_data) - avg_cost_np = exe.run(fluid.default_main_program(), - feed={ - 'firstw': input_data[0], - 'secondw': input_data[1], - 'thirdw': input_data[2], - 'forthw': input_data[3], - 'nextw': input_data[4] - }, + feed=feeder.feed(data), fetch_list=[avg_cost]) - if avg_cost_np[0] < 10.0: + if avg_cost_np[0] < 5.0: exit(0) # if avg cost less than 10.0, we think our code is good. exit(1) diff --git a/python/paddle/v2/fluid/tests/demo/fc_gan.py b/python/paddle/v2/fluid/tests/demo/fc_gan.py new file mode 100644 index 0000000000000000000000000000000000000000..cae959593e855f11c04585341d86478b649d17c9 --- /dev/null +++ b/python/paddle/v2/fluid/tests/demo/fc_gan.py @@ -0,0 +1,157 @@ +import errno +import math +import os + +import matplotlib +import numpy + +import paddle.v2 as paddle +import paddle.v2.fluid as fluid + +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec + +NOISE_SIZE = 100 +NUM_PASS = 1000 +NUM_REAL_IMGS_IN_BATCH = 121 +NUM_TRAIN_TIMES_OF_DG = 3 +LEARNING_RATE = 2e-5 + + +def D(x): + hidden = fluid.layers.fc(input=x, + size=200, + act='relu', + param_attr='D.w1', + bias_attr='D.b1') + logits = fluid.layers.fc(input=hidden, + size=1, + act=None, + param_attr='D.w2', + bias_attr='D.b2') + return logits + + +def G(x): + hidden = fluid.layers.fc(input=x, + size=200, + act='relu', + param_attr='G.w1', + bias_attr='G.b1') + img = fluid.layers.fc(input=hidden, + size=28 * 28, + act='tanh', + param_attr='G.w2', + bias_attr='G.b2') + return img + + +def plot(gen_data): + gen_data.resize(gen_data.shape[0], 28, 28) + n = int(math.ceil(math.sqrt(gen_data.shape[0]))) + fig = plt.figure(figsize=(n, n)) + gs = gridspec.GridSpec(n, n) + gs.update(wspace=0.05, hspace=0.05) + + for i, sample in enumerate(gen_data): + ax = plt.subplot(gs[i]) + plt.axis('off') + ax.set_xticklabels([]) + ax.set_yticklabels([]) + ax.set_aspect('equal') + plt.imshow(sample.reshape(28, 28), cmap='Greys_r') + + return fig + + +def main(): + try: + os.makedirs("./out") + except OSError as e: + if e.errno != errno.EEXIST: + raise + + startup_program = fluid.Program() + d_program = fluid.Program() + dg_program = fluid.Program() + + with fluid.program_guard(d_program, startup_program): + img = fluid.layers.data(name='img', shape=[784], dtype='float32') + d_loss = fluid.layers.sigmoid_cross_entropy_with_logits( + x=D(img), + label=fluid.layers.data( + name='label', shape=[1], dtype='float32')) + d_loss = fluid.layers.mean(x=d_loss) + + with fluid.program_guard(dg_program, startup_program): + noise = fluid.layers.data( + name='noise', shape=[NOISE_SIZE], dtype='float32') + g_img = G(x=noise) + g_program = dg_program.clone() + dg_loss = fluid.layers.sigmoid_cross_entropy_with_logits( + x=D(g_img), + label=fluid.layers.fill_constant_batch_size_like( + input=noise, dtype='float32', shape=[-1, 1], value=1.0)) + dg_loss = fluid.layers.mean(x=dg_loss) + + opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE) + + opt.minimize(loss=d_loss, startup_program=startup_program) + opt.minimize( + loss=dg_loss, + startup_program=startup_program, + parameter_list=[ + p.name for p in g_program.global_block().all_parameters() + ]) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(startup_program) + + num_true = NUM_REAL_IMGS_IN_BATCH + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=60000), + batch_size=num_true) + + for pass_id in range(NUM_PASS): + for batch_id, data in enumerate(train_reader()): + num_true = len(data) + n = numpy.random.uniform( + low=-1.0, high=1.0, + size=[num_true * NOISE_SIZE]).astype('float32').reshape( + [num_true, NOISE_SIZE]) + generated_img = exe.run(g_program, + feed={'noise': n}, + fetch_list={g_img})[0] + real_data = numpy.array(map(lambda x: x[0], data)).astype('float32') + real_data = real_data.reshape(num_true, 784) + total_data = numpy.concatenate([real_data, generated_img]) + total_label = numpy.concatenate([ + numpy.ones( + shape=[real_data.shape[0], 1], dtype='float32'), + numpy.zeros( + shape=[real_data.shape[0], 1], dtype='float32') + ]) + d_loss_np = exe.run(d_program, + feed={'img': total_data, + 'label': total_label}, + fetch_list={d_loss})[0] + for _ in xrange(NUM_TRAIN_TIMES_OF_DG): + n = numpy.random.uniform( + low=-1.0, high=1.0, + size=[2 * num_true * NOISE_SIZE]).astype('float32').reshape( + [2 * num_true, NOISE_SIZE, 1, 1]) + dg_loss_np = exe.run(dg_program, + feed={'noise': n}, + fetch_list={dg_loss})[0] + print("Pass ID={0}, Batch ID={1}, D-Loss={2}, DG-Loss={3}".format( + pass_id, batch_id, d_loss_np, dg_loss_np)) + # generate image each batch + fig = plot(generated_img) + plt.savefig( + 'out/{0}.png'.format(str(pass_id).zfill(3)), bbox_inches='tight') + plt.close(fig) + + +if __name__ == '__main__': + main() diff --git a/python/paddle/v2/fluid/tests/test_activation_op.py b/python/paddle/v2/fluid/tests/test_activation_op.py index bd52bef2605874d26e880fb09e589891fc1934d5..b052374dc7ec3c5684d6adfda6b9d000c5e19fe0 100644 --- a/python/paddle/v2/fluid/tests/test_activation_op.py +++ b/python/paddle/v2/fluid/tests/test_activation_op.py @@ -1,6 +1,7 @@ import unittest import numpy as np from op_test import OpTest +from scipy.special import expit class TestExp(OpTest): @@ -455,5 +456,20 @@ class TestHardSigmoid(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.002) +class TestSwish(OpTest): + def setUp(self): + self.op_type = "swish" + X = np.random.uniform(0.1, 1, [11, 17]).astype("float32") + self.inputs = {'X': X} + self.attrs = {'beta': 2.3} + self.outputs = {'Y': X * expit(self.attrs['beta'] * X)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.008) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_array_read_write_op.py b/python/paddle/v2/fluid/tests/test_array_read_write_op.py index b7790b01062d480cbd6c9e1a626d318385b4f61e..f6120aedecf1015c279b8f218f5e37f2e598ab91 100644 --- a/python/paddle/v2/fluid/tests/test_array_read_write_op.py +++ b/python/paddle/v2/fluid/tests/test_array_read_write_op.py @@ -3,7 +3,7 @@ import paddle.v2.fluid.core as core import paddle.v2.fluid.layers as layers from paddle.v2.fluid.executor import Executor from paddle.v2.fluid.backward import append_backward_ops -from paddle.v2.fluid.framework import g_main_program +from paddle.v2.fluid.framework import default_main_program import numpy @@ -66,7 +66,7 @@ class TestArrayReadWrite(unittest.TestCase): append_backward_ops(total_sum_scaled) - g_vars = map(g_main_program.global_block().var, + g_vars = map(default_main_program().global_block().var, [each_x.name + "@GRAD" for each_x in x]) g_out = [ item.sum() diff --git a/python/paddle/v2/fluid/tests/test_batch_norm_op.py b/python/paddle/v2/fluid/tests/test_batch_norm_op.py index 71f9599e0de83c86808f7e62547f80d3d50ffc7d..e766a68c0e338b07e47260e40edc544c98555382 100644 --- a/python/paddle/v2/fluid/tests/test_batch_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_batch_norm_op.py @@ -21,6 +21,13 @@ def get_backward_op(scope, op, no_grad_set): def _reference_training(x, scale, offset, epsilon, data_format): + x_shape = x.shape + if len(x_shape) == 2: + if data_format == "NCHW": + x = np.reshape(x, (x.shape[0], x.shape[1], 1, 1)) + else: + x = np.reshape(x, (x.shape[0], 1, 1, x.shape[1])) + if data_format == "NCHW": n, c, h, w = x.shape x_square = x * x @@ -39,6 +46,8 @@ def _reference_training(x, scale, offset, epsilon, data_format): offset_tile = np.reshape(offset, (1, c, 1, 1)) offset_tile = np.reshape(offset_tile, (1, c, 1, 1)) y = normalized * scale_tile + offset_tile + if len(x_shape) == 2: + y = np.reshape(y, (y.shape[0], y.shape[1])) return y, mean, var elif data_format == "NHWC": x_square = x * x @@ -48,7 +57,10 @@ def _reference_training(x, scale, offset, epsilon, data_format): mean = x_sum / element_count var = x_square_sum / element_count - mean * mean normalized = (x - mean) / np.sqrt(var + epsilon) - return (normalized * scale + offset), mean, var + y = normalized * scale + offset + if len(x_shape) == 2: + y = np.reshape(y, x_shape) + return y, mean, var else: raise ValueError("Unknown data order.") @@ -65,6 +77,18 @@ def _reference_grad(x, grad_y, scale, mean, var, epsilon, data_format): # (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon)) # transfer from (N, C, H, W) to (N, H, W, C) to simplify computation + x_shape = x.shape + + if len(x_shape) == 2: + if data_format == "NCHW": + x = np.reshape(x, (x.shape[0], x.shape[1], 1, 1)) + grad_y = np.reshape(grad_y, + (grad_y.shape[0], grad_y.shape[1], 1, 1)) + else: + x = np.reshape(x, (x.shape[0], 1, 1, x.shape[1])) + grad_y = np.reshape(grad_y, + (grad_y.shape[0], 1, 1, grad_y.shape[1])) + if data_format == "NCHW": x = np.transpose(x, (0, 2, 3, 1)) grad_y = np.transpose(grad_y, (0, 2, 3, 1)) @@ -83,6 +107,9 @@ def _reference_grad(x, grad_y, scale, mean, var, epsilon, data_format): grad_x = np.transpose(grad_x, (0, 3, 1, 2)) x = np.transpose(x, (0, 3, 1, 2)) grad_y = np.transpose(grad_y, (0, 3, 1, 2)) + + if len(x_shape) == 2: + grad_x = np.reshape(grad_x, x_shape) return grad_x, grad_scale, grad_offset @@ -127,7 +154,7 @@ class TestBatchNormOp(OpTest): momentum = 0.9 # N, H, W, C: 2, 3, 4, 2 - n, h, w, c = 2, 3, 4, 2 + n, h, w, c = 2, 3, 4, 5 x_shape = [n, h, w, c] scale_shape = [c] @@ -184,20 +211,23 @@ class TestBatchNormOp(OpTest): print 'python: NHWC, NCHW, backward checking passed' def test_forward_backward(self): - def test_with_place(place, tensor_format): + def test_with_place(place, tensor_format, shape): # attr epsilon = 0.00001 momentum = 0.9 - # N, H, W, C: 12, 3, 4, 2 - n, h, w, c = 2, 3, 4, 2 - - if data_format == "NHWC": - x_shape = [n, h, w, c] - elif data_format == "NCHW": - x_shape = [n, c, h, w] + if len(shape) == 2: + x_shape = shape + c = shape[1] else: - raise ValueError("Unknown data type.") + # n, h, w, c = 2, 3, 4, 2 + n, h, w, c = shape[0], shape[1], shape[2], shape[3] + if data_format == "NHWC": + x_shape = [n, h, w, c] + elif data_format == "NCHW": + x_shape = [n, c, h, w] + else: + raise ValueError("Unknown data type.") scale_shape = [c] x_val = np.random.random_sample(x_shape).astype(np.float32) @@ -219,7 +249,10 @@ class TestBatchNormOp(OpTest): # for gradient test # y_grad = np.ones(x_shape).astype(np.float32) y_grad = np.zeros(x_shape).astype(np.float32) - y_grad[0, 0, 0, 0] = 1. + if len(y_grad.shape) == 2: + y_grad[0, 0] = 1. + else: + y_grad[0, 0, 0, 0] = 1. # y_grad = np.random.random_sample(x_shape).astype(np.float32) x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_grad( x_val, y_grad, scale_val, saved_mean, var_ref, epsilon, @@ -313,7 +346,8 @@ class TestBatchNormOp(OpTest): places.append(core.GPUPlace(0)) for place in places: for data_format in ["NCHW", "NHWC"]: - test_with_place(place, data_format) + test_with_place(place, data_format, [2, 3, 4, 5]) + test_with_place(place, data_format, [2, 3]) if __name__ == '__main__': diff --git a/python/paddle/v2/fluid/tests/test_chunk_eval_op.py b/python/paddle/v2/fluid/tests/test_chunk_eval_op.py index 48673296a67716c4de804da533f0fd2567f10e2e..819e65a653437f0c34e14403f76317ff3b7f37f4 100644 --- a/python/paddle/v2/fluid/tests/test_chunk_eval_op.py +++ b/python/paddle/v2/fluid/tests/test_chunk_eval_op.py @@ -120,7 +120,7 @@ class TestChunkEvalOp(OpTest): self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks = 4, 5, 9 def set_data(self): - infer = np.zeros((self.batch_size, )).astype('int32') + infer = np.zeros((self.batch_size, )).astype('int64') infer.fill(self.num_chunk_types * self.num_tag_types) label = np.copy(infer) starts = np.random.choice( diff --git a/python/paddle/v2/fluid/tests/test_conditional_block.py b/python/paddle/v2/fluid/tests/test_conditional_block.py index d953ee7ddc37d150d87cbd680379410a4d16f6b1..2b9d8f351a2836cd723d629d4790de1e068d0ea3 100644 --- a/python/paddle/v2/fluid/tests/test_conditional_block.py +++ b/python/paddle/v2/fluid/tests/test_conditional_block.py @@ -1,7 +1,7 @@ import unittest import paddle.v2.fluid.layers as layers import paddle.v2.fluid.core as core -from paddle.v2.fluid.framework import g_startup_program, g_main_program +from paddle.v2.fluid.framework import default_startup_program, default_main_program from paddle.v2.fluid.executor import Executor from paddle.v2.fluid.backward import append_backward_ops import numpy @@ -19,7 +19,7 @@ class ConditionalBlock(unittest.TestCase): cpu = core.CPUPlace() exe = Executor(cpu) - exe.run(g_startup_program) + exe.run(default_startup_program()) x = numpy.random.random(size=(10, 1)).astype('float32') @@ -29,7 +29,9 @@ class ConditionalBlock(unittest.TestCase): append_backward_ops(loss=loss) outs = exe.run( feed={'X': x}, - fetch_list=[g_main_program.block(0).var(data.name + "@GRAD")])[0] + fetch_list=[ + default_main_program().block(0).var(data.name + "@GRAD") + ])[0] print outs diff --git a/python/paddle/v2/fluid/tests/test_crf_decoding_op.py b/python/paddle/v2/fluid/tests/test_crf_decoding_op.py index ee2b996bf430d5a0edaa0de459a937adffd9f8f6..ab573da31dfb9d7b40e44a79465a61cdc6b62a46 100644 --- a/python/paddle/v2/fluid/tests/test_crf_decoding_op.py +++ b/python/paddle/v2/fluid/tests/test_crf_decoding_op.py @@ -20,14 +20,14 @@ class CRFDecoding(object): self.w = transition_weights[2:, :] self.track = np.zeros( - (seq_start_positions[-1], self.tag_num), dtype="int32") + (seq_start_positions[-1], self.tag_num), dtype="int64") self.decoded_path = np.zeros( - (seq_start_positions[-1], 1), dtype="int32") + (seq_start_positions[-1], 1), dtype="int64") def _decode_one_sequence(self, decoded_path, x): seq_len, tag_num = x.shape alpha = np.zeros((seq_len, tag_num), dtype="float64") - track = np.zeros((seq_len, tag_num), dtype="int32") + track = np.zeros((seq_len, tag_num), dtype="int64") for i in range(tag_num): alpha[0, i] = self.a[i] + x[0, i] @@ -125,10 +125,10 @@ class TestCRFDecodingOp2(OpTest): axis=0) labels = np.random.randint( - low=0, high=TAG_NUM, size=(lod[-1][-1], 1), dtype="int32") + low=0, high=TAG_NUM, size=(lod[-1][-1], 1), dtype="int64") predicted_labels = np.ones( - (lod[-1][-1], 1), dtype="int32") * (TAG_NUM - 1) - expected_output = (labels == predicted_labels).astype("int32") + (lod[-1][-1], 1), dtype="int64") * (TAG_NUM - 1) + expected_output = (labels == predicted_labels).astype("int64") self.inputs = { "Emission": (emission, lod), diff --git a/python/paddle/v2/fluid/tests/test_data_feeder.py b/python/paddle/v2/fluid/tests/test_data_feeder.py new file mode 100644 index 0000000000000000000000000000000000000000..454969320321b72342803f507f0054f79f276669 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_data_feeder.py @@ -0,0 +1,13 @@ +import paddle.v2.fluid as fluid + + +def test_converter(): + img = fluid.layers.data(name='image', shape=[1, 28, 28]) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) + result = feeder.feed([[[0] * 784, [9]], [[1] * 784, [1]]]) + print(result) + + +if __name__ == '__main__': + test_converter() diff --git a/python/paddle/v2/fluid/tests/test_dyn_rnn.py b/python/paddle/v2/fluid/tests/test_dyn_rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..034266c26f48197872a3419135d45b30a8120e8a --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_dyn_rnn.py @@ -0,0 +1,130 @@ +import paddle.v2.fluid as fluid +import paddle.v2 as paddle +import unittest +import numpy + + +class TestDynRNN(unittest.TestCase): + def setUp(self): + self.word_dict = paddle.dataset.imdb.word_dict() + self.BATCH_SIZE = 2 + self.train_data = paddle.batch( + paddle.dataset.imdb.train(self.word_dict), + batch_size=self.BATCH_SIZE) + + def test_plain_while_op(self): + main_program = fluid.Program() + startup_program = fluid.Program() + + with fluid.program_guard(main_program, startup_program): + sentence = fluid.layers.data( + name='word', shape=[1], dtype='int64', lod_level=1) + sent_emb = fluid.layers.embedding( + input=sentence, size=[len(self.word_dict), 32], dtype='float32') + + label = fluid.layers.data(name='label', shape=[1], dtype='float32') + + rank_table = fluid.layers.lod_rank_table(x=sent_emb) + + sent_emb_array = fluid.layers.lod_tensor_to_array( + x=sent_emb, table=rank_table) + + seq_len = fluid.layers.max_sequence_len(rank_table=rank_table) + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) + i.stop_gradient = False + + boot_mem = fluid.layers.fill_constant_batch_size_like( + input=fluid.layers.array_read( + array=sent_emb_array, i=i), + value=0, + shape=[-1, 100], + dtype='float32') + boot_mem.stop_gradient = False + + mem_array = fluid.layers.array_write(x=boot_mem, i=i) + + cond = fluid.layers.less_than(x=i, y=seq_len) + cond.stop_gradient = False + while_op = fluid.layers.While(cond=cond) + out = fluid.layers.create_array(dtype='float32') + + with while_op.block(): + mem = fluid.layers.array_read(array=mem_array, i=i) + ipt = fluid.layers.array_read(array=sent_emb_array, i=i) + + mem = fluid.layers.shrink_memory(x=mem, i=i, table=rank_table) + + hidden = fluid.layers.fc(input=[mem, ipt], size=100, act='tanh') + + fluid.layers.array_write(x=hidden, i=i, array=out) + fluid.layers.increment(x=i, in_place=True) + fluid.layers.array_write(x=hidden, i=i, array=mem_array) + fluid.layers.less_than(x=i, y=seq_len, cond=cond) + + all_timesteps = fluid.layers.array_to_lod_tensor( + x=out, table=rank_table) + last = fluid.layers.sequence_pool( + input=all_timesteps, pool_type='last') + logits = fluid.layers.fc(input=last, size=1, act=None) + loss = fluid.layers.sigmoid_cross_entropy_with_logits( + x=logits, label=label) + loss = fluid.layers.mean(x=loss) + sgd = fluid.optimizer.SGD(1e-4) + sgd.minimize(loss=loss) + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + exe.run(startup_program) + feeder = fluid.DataFeeder(feed_list=[sentence, label], place=cpu) + + data = next(self.train_data()) + val = exe.run(main_program, feed=feeder.feed(data), + fetch_list=[loss])[0] + self.assertEqual((1, ), val.shape) + print(val) + self.assertFalse(numpy.isnan(val)) + + def test_train_dyn_rnn(self): + main_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(main_program, startup_program): + sentence = fluid.layers.data( + name='word', shape=[1], dtype='int64', lod_level=1) + sent_emb = fluid.layers.embedding( + input=sentence, size=[len(self.word_dict), 32], dtype='float32') + + rnn = fluid.layers.DynamicRNN() + + with rnn.block(): + in_ = rnn.step_input(sent_emb) + mem = rnn.memory(shape=[100], dtype='float32') + out_ = fluid.layers.fc(input=[in_, mem], size=100, act='tanh') + rnn.update_memory(mem, out_) + rnn.output(out_) + + last = fluid.layers.sequence_pool(input=rnn(), pool_type='last') + logits = fluid.layers.fc(input=last, size=1, act=None) + label = fluid.layers.data(name='label', shape=[1], dtype='float32') + loss = fluid.layers.sigmoid_cross_entropy_with_logits( + x=logits, label=label) + loss = fluid.layers.mean(x=loss) + sgd = fluid.optimizer.Adam(1e-3) + sgd.minimize(loss=loss) + + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + exe.run(startup_program) + feeder = fluid.DataFeeder(feed_list=[sentence, label], place=cpu) + data = next(self.train_data()) + loss_0 = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[loss])[0] + for _ in xrange(100): + val = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[loss])[0] + # loss should be small after 100 mini-batch + self.assertLess(val[0], loss_0[0]) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_executor_and_mul.py b/python/paddle/v2/fluid/tests/test_executor_and_mul.py index 558273e30dff7fb74f78751f4fe569f79a453d0d..b1ef87c5cb1711c419b401c5950839816f7f4160 100644 --- a/python/paddle/v2/fluid/tests/test_executor_and_mul.py +++ b/python/paddle/v2/fluid/tests/test_executor_and_mul.py @@ -1,9 +1,10 @@ import unittest -from paddle.v2.fluid.layers import mul, data, sequence_pool + +import numpy import paddle.v2.fluid.core as core + from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.framework import g_main_program -import numpy +from paddle.v2.fluid.layers import mul, data class TestExecutor(unittest.TestCase): @@ -19,10 +20,7 @@ class TestExecutor(unittest.TestCase): a_np = numpy.random.random((100, 784)).astype('float32') b_np = numpy.random.random((784, 100)).astype('float32') exe = Executor(place) - outs = exe.run(g_main_program, - feed={'a': a_np, - 'b': b_np}, - fetch_list=[out]) + outs = exe.run(feed={'a': a_np, 'b': b_np}, fetch_list=[out]) out = outs[0] self.assertEqual((100, 100), out.shape) self.assertTrue(numpy.allclose(out, numpy.dot(a_np, b_np))) diff --git a/python/paddle/v2/fluid/tests/test_fill_op.py b/python/paddle/v2/fluid/tests/test_fill_op.py new file mode 100644 index 0000000000000000000000000000000000000000..88337598c895a5a663ef45fd0800fa950fee1253 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_fill_op.py @@ -0,0 +1,24 @@ +import unittest +import numpy as np +from op_test import OpTest +import paddle.v2.fluid.core as core + + +class TestFillOp(OpTest): + def setUp(self): + self.op_type = "fill" + val = np.random.random(size=[100, 200]) + self.inputs = {} + self.attrs = { + 'value': val.flatten().tolist(), + 'shape': [100, 200], + 'dtype': int(core.DataType.FP64) + } + self.outputs = {'Out': val.astype('float64')} + + def test_check_output(self): + self.check_output() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_hinge_loss_op.py b/python/paddle/v2/fluid/tests/test_hinge_loss_op.py new file mode 100644 index 0000000000000000000000000000000000000000..a8757a891faa01413dc6858451f1a988a3e030b5 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_hinge_loss_op.py @@ -0,0 +1,28 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestHingeLossOp(OpTest): + def setUp(self): + self.op_type = 'hinge_loss' + samples_num = 64 + logits = np.random.uniform(-10, 10, (samples_num, 1)).astype('float32') + labels = np.random.randint(0, 2, (samples_num, 1)).astype('float32') + + self.inputs = { + 'Logits': logits, + 'Labels': labels, + } + loss = np.maximum(1.0 - (2 * labels - 1) * logits, 0) + self.outputs = {'Loss': loss} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['Logits'], 'Loss', max_relative_error=0.008) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_image_classification_layer.py b/python/paddle/v2/fluid/tests/test_image_classification_layer.py index 8e8e1b0a8c07a60cb1404462f976d10fe26e87f6..2fd609d4474e97ecd96adcd146f2f550e0772740 100644 --- a/python/paddle/v2/fluid/tests/test_image_classification_layer.py +++ b/python/paddle/v2/fluid/tests/test_image_classification_layer.py @@ -1,6 +1,6 @@ import unittest -import paddle.v2.fluid.layers as layers +import paddle.v2.fluid as fluid import paddle.v2.fluid.nets as nets from paddle.v2.fluid.framework import Program @@ -29,27 +29,35 @@ class TestLayer(unittest.TestCase): def test_batch_norm_layer(self): main_program = Program() startup_program = Program() - images = layers.data( + images = fluid.layers.data( name='pixel', shape=[3, 48, 48], dtype='float32', main_program=main_program) - layers.batch_norm( + hidden1 = fluid.layers.batch_norm( input=images, main_program=main_program, startup_program=startup_program) + hidden2 = fluid.layers.fc(input=hidden1, + size=128, + act='relu', + main_program=main_program) + hidden3 = fluid.layers.batch_norm( + input=hidden2, + main_program=main_program, + startup_program=startup_program) - # print str(main_program) + print str(main_program) def test_dropout_layer(self): main_program = Program() startup_program = Program() - images = layers.data( + images = fluid.layers.data( name='pixel', shape=[3, 48, 48], dtype='float32', main_program=main_program) - layers.dropout( + fluid.layers.dropout( x=images, dropout_prob=0.5, main_program=main_program, @@ -61,7 +69,7 @@ class TestLayer(unittest.TestCase): main_program = Program() startup_program = Program() - images = layers.data( + images = fluid.layers.data( name='pixel', shape=[3, 48, 48], dtype='float32', @@ -77,19 +85,19 @@ class TestLayer(unittest.TestCase): def test_elementwise_add_with_act(self): main_program = Program() startup_program = Program() - image1 = layers.data( + image1 = fluid.layers.data( name='pixel1', shape=[3, 48, 48], dtype='float32', main_program=main_program, startup_program=startup_program) - image2 = layers.data( + image2 = fluid.layers.data( name='pixel2', shape=[3, 48, 48], dtype='float32', main_program=main_program, startup_program=startup_program) - out = layers.elementwise_add( + out = fluid.layers.elementwise_add( x=image1, y=image2, act='relu', diff --git a/python/paddle/v2/fluid/tests/test_initializer.py b/python/paddle/v2/fluid/tests/test_initializer.py index 6c20203f8eca02b3f68ed2aa8664bed29551c070..3175010f48229d04421fc0068af0f0ed90e63af4 100644 --- a/python/paddle/v2/fluid/tests/test_initializer.py +++ b/python/paddle/v2/fluid/tests/test_initializer.py @@ -60,6 +60,29 @@ class TestUniformInitializer(unittest.TestCase): self.assertAlmostEqual(init_op.attr('max'), 1.0, delta=DELTA) self.assertEqual(init_op.attr('seed'), 0) + def test_uniform_initializer_random_seed(self): + """Test the uniform initializer with manually setting seed + """ + program = framework.Program() + program.random_seed = 123 + block = program.global_block() + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.UniformInitializer()) + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.UniformInitializer(seed=456)) + init_op = block.ops[1] + self.assertEqual(init_op.attr("seed"), 123) + init_op1 = block.ops[0] + self.assertEqual(init_op1.attr("seed"), 456) + def test_uniform_initializer(self): """Test uniform initializer with supplied attributes """ diff --git a/python/paddle/v2/fluid/tests/test_layers.py b/python/paddle/v2/fluid/tests/test_layers.py index 87dc6d1a6270e0f8425b56601d04049450c73380..9b88080158139f267e253c598e60a4d92a0eff68 100644 --- a/python/paddle/v2/fluid/tests/test_layers.py +++ b/python/paddle/v2/fluid/tests/test_layers.py @@ -1,183 +1,165 @@ +from __future__ import print_function import unittest import paddle.v2.fluid.layers as layers import paddle.v2.fluid.nets as nets -from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.framework import Program, program_guard +from paddle.v2.fluid.param_attr import ParamAttr class TestBook(unittest.TestCase): def test_fit_a_line(self): program = Program() - x = layers.data( - name='x', shape=[13], dtype='float32', main_program=program) - y_predict = layers.fc(input=x, size=1, act=None, main_program=program) + with program_guard(program, startup_program=Program()): + x = layers.data(name='x', shape=[13], dtype='float32') + y_predict = layers.fc(input=x, size=1, act=None) + y = layers.data(name='y', shape=[1], dtype='float32') + cost = layers.square_error_cost(input=y_predict, label=y) + avg_cost = layers.mean(x=cost) + self.assertIsNotNone(avg_cost) + program.append_backward(avg_cost) - y = layers.data( - name='y', shape=[1], dtype='float32', main_program=program) - cost = layers.square_error_cost( - input=y_predict, label=y, main_program=program) - - avg_cost = layers.mean(x=cost, main_program=program) - self.assertIsNotNone(avg_cost) - program.append_backward(avg_cost) - - print str(program) + print(str(program)) def test_recognize_digits_mlp(self): program = Program() - - # Change g_program, so the rest layers use `g_program` - images = layers.data( - name='pixel', shape=[784], dtype='float32', main_program=program) - label = layers.data( - name='label', shape=[1], dtype='int32', main_program=program) - hidden1 = layers.fc(input=images, - size=128, - act='relu', - main_program=program) - hidden2 = layers.fc(input=hidden1, - size=64, - act='relu', - main_program=program) - predict = layers.fc(input=hidden2, - size=10, - act='softmax', - main_program=program) - cost = layers.cross_entropy( - input=predict, label=label, main_program=program) - avg_cost = layers.mean(x=cost, main_program=program) - self.assertIsNotNone(avg_cost) - - print str(program) + with program_guard(program, startup_program=Program()): + # Change g_program, so the rest layers use `g_program` + images = layers.data(name='pixel', shape=[784], dtype='float32') + label = layers.data(name='label', shape=[1], dtype='int32') + hidden1 = layers.fc(input=images, size=128, act='relu') + hidden2 = layers.fc(input=hidden1, size=64, act='relu') + predict = layers.fc(input=[hidden2, hidden1], + size=10, + act='softmax', + param_attr=["sftmax.w1", "sftmax.w2"]) + cost = layers.cross_entropy(input=predict, label=label) + avg_cost = layers.mean(x=cost) + self.assertIsNotNone(avg_cost) + + print(str(program)) def test_simple_conv2d(self): program = Program() - images = layers.data( - name='pixel', - shape=[3, 48, 48], - dtype='int32', - main_program=program) - layers.conv2d( - input=images, - num_filters=3, - filter_size=[4, 4], - main_program=program) - - print str(program) + with program_guard(program, startup_program=Program()): + images = layers.data(name='pixel', shape=[3, 48, 48], dtype='int32') + layers.conv2d(input=images, num_filters=3, filter_size=[4, 4]) - def test_recognize_digits_conv(self): + print(str(program)) + + def test_conv2d_transpose(self): program = Program() + with program_guard(program): + img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32') + layers.conv2d_transpose(input=img, num_filters=10, output_size=28) + print(str(program)) - images = layers.data( - name='pixel', - shape=[1, 28, 28], - dtype='float32', - main_program=program) - label = layers.data( - name='label', shape=[1], dtype='int32', main_program=program) - conv_pool_1 = nets.simple_img_conv_pool( - input=images, - filter_size=5, - num_filters=2, - pool_size=2, - pool_stride=2, - act="relu", - main_program=program) - conv_pool_2 = nets.simple_img_conv_pool( - input=conv_pool_1, - filter_size=5, - num_filters=4, - pool_size=2, - pool_stride=2, - act="relu", - main_program=program) - - predict = layers.fc(input=conv_pool_2, - size=10, - act="softmax", - main_program=program) - cost = layers.cross_entropy( - input=predict, label=label, main_program=program) - avg_cost = layers.mean(x=cost, main_program=program) - - program.append_backward(avg_cost) - - print str(program) + def test_recognize_digits_conv(self): + program = Program() + with program_guard(program, startup_program=Program()): + images = layers.data( + name='pixel', shape=[1, 28, 28], dtype='float32') + label = layers.data(name='label', shape=[1], dtype='int32') + conv_pool_1 = nets.simple_img_conv_pool( + input=images, + filter_size=5, + num_filters=2, + pool_size=2, + pool_stride=2, + act="relu") + conv_pool_2 = nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=4, + pool_size=2, + pool_stride=2, + act="relu") + + predict = layers.fc(input=conv_pool_2, size=10, act="softmax") + cost = layers.cross_entropy(input=predict, label=label) + avg_cost = layers.mean(x=cost) + + program.append_backward(avg_cost) + + print(str(program)) def test_word_embedding(self): program = Program() - dict_size = 10000 - embed_size = 32 - first_word = layers.data( - name='firstw', shape=[1], dtype='int64', main_program=program) - second_word = layers.data( - name='secondw', shape=[1], dtype='int64', main_program=program) - third_word = layers.data( - name='thirdw', shape=[1], dtype='int64', main_program=program) - forth_word = layers.data( - name='forthw', shape=[1], dtype='int64', main_program=program) - next_word = layers.data( - name='nextw', shape=[1], dtype='int64', main_program=program) - - embed_first = layers.embedding( - input=first_word, - size=[dict_size, embed_size], - dtype='float32', - param_attr={'name': 'shared_w'}, - main_program=program) - embed_second = layers.embedding( - input=second_word, - size=[dict_size, embed_size], - dtype='float32', - param_attr={'name': 'shared_w'}, - main_program=program) - - embed_third = layers.embedding( - input=third_word, - size=[dict_size, embed_size], - dtype='float32', - param_attr={'name': 'shared_w'}, - main_program=program) - embed_forth = layers.embedding( - input=forth_word, - size=[dict_size, embed_size], - dtype='float32', - param_attr={'name': 'shared_w'}, - main_program=program) - - concat_embed = layers.concat( - input=[embed_first, embed_second, embed_third, embed_forth], - axis=1, - main_program=program) - - hidden1 = layers.fc(input=concat_embed, - size=256, - act='sigmoid', - main_program=program) - predict_word = layers.fc(input=hidden1, - size=dict_size, - act='softmax', - main_program=program) - cost = layers.cross_entropy( - input=predict_word, label=next_word, main_program=program) - avg_cost = layers.mean(x=cost, main_program=program) - self.assertIsNotNone(avg_cost) - - print str(program) + with program_guard(program, startup_program=Program()): + dict_size = 10000 + embed_size = 32 + first_word = layers.data(name='firstw', shape=[1], dtype='int64') + second_word = layers.data(name='secondw', shape=[1], dtype='int64') + third_word = layers.data(name='thirdw', shape=[1], dtype='int64') + forth_word = layers.data(name='forthw', shape=[1], dtype='int64') + next_word = layers.data(name='nextw', shape=[1], dtype='int64') + + embed_first = layers.embedding( + input=first_word, + size=[dict_size, embed_size], + dtype='float32', + param_attr='shared_w') + embed_second = layers.embedding( + input=second_word, + size=[dict_size, embed_size], + dtype='float32', + param_attr='shared_w') + + embed_third = layers.embedding( + input=third_word, + size=[dict_size, embed_size], + dtype='float32', + param_attr='shared_w') + embed_forth = layers.embedding( + input=forth_word, + size=[dict_size, embed_size], + dtype='float32', + param_attr='shared_w') + + concat_embed = layers.concat( + input=[embed_first, embed_second, embed_third, embed_forth], + axis=1) + + hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid') + predict_word = layers.fc(input=hidden1, + size=dict_size, + act='softmax') + cost = layers.cross_entropy(input=predict_word, label=next_word) + avg_cost = layers.mean(x=cost) + self.assertIsNotNone(avg_cost) + + print(str(program)) def test_linear_chain_crf(self): program = Program() - - # Change g_program, so the rest layers use `g_program` - images = layers.data( - name='pixel', shape=[784], dtype='float32', main_program=program) - label = layers.data( - name='label', shape=[1], dtype='int32', main_program=program) - hidden = layers.fc(input=images, size=128, main_program=program) - crf = layers.linear_chain_crf( - input=hidden, label=label, main_program=program) - - print str(program) + with program_guard(program, startup_program=Program()): + label_dict_len = 10 + images = layers.data(name='pixel', shape=[784], dtype='float32') + label = layers.data(name='label', shape=[1], dtype='int32') + hidden = layers.fc(input=images, size=128) + crf = layers.linear_chain_crf( + input=hidden, label=label, param_attr=ParamAttr(name="crfw")) + crf_decode = layers.crf_decoding( + input=hidden, param_attr=ParamAttr(name="crfw")) + layers.chunk_eval( + input=crf_decode, + label=label, + chunk_scheme="IOB", + num_chunk_types=(label_dict_len - 1) / 2) + self.assertNotEqual(crf, None) + self.assertNotEqual(crf_decode, None) + + print(str(program)) + + def test_sigmoid_cross_entropy(self): + program = Program() + with program_guard(program): + dat = layers.data(name='data', shape=[10], dtype='float32') + lbl = layers.data(name='label', shape=[10], dtype='float32') + self.assertIsNotNone( + layers.sigmoid_cross_entropy_with_logits( + x=dat, label=lbl)) + print(str(program)) if __name__ == '__main__': diff --git a/python/paddle/v2/fluid/tests/test_lod_rank_table.py b/python/paddle/v2/fluid/tests/test_lod_rank_table.py index bbc11930b9e804c2769cc590c298c6e90dc36ca6..30d619fe318517345195281b17f88e9916b6afb3 100644 --- a/python/paddle/v2/fluid/tests/test_lod_rank_table.py +++ b/python/paddle/v2/fluid/tests/test_lod_rank_table.py @@ -1,6 +1,5 @@ from paddle.v2.fluid.layers import lod_rank_table, data from paddle.v2.fluid.executor import Executor -from paddle.v2.fluid.framework import g_main_program import paddle.v2.fluid.core as core import numpy import unittest @@ -18,7 +17,7 @@ class TestLoDRankTable(unittest.TestCase): tensor = core.LoDTensor() tensor.set(numpy.random.random(size=(17, 100)), cpu) tensor.set_lod([[0, 1, 3], [0, 5, 6, 7], [0, 3, 4, 9, 10, 13, 16, 17]]) - exe.run(g_main_program, scope=scope, feed={'x': tensor}) + exe.run(scope=scope, feed={'x': tensor}) var = scope.find_var(rank_table.name) table = var.get_lod_rank_table() self.assertEqual([(0, 5), (1, 1), (2, 1)], table.items()) diff --git a/python/paddle/v2/fluid/tests/test_log_loss_op.py b/python/paddle/v2/fluid/tests/test_log_loss_op.py new file mode 100644 index 0000000000000000000000000000000000000000..2eeaa90758c57ef0d92a8ad7b0a4c1b1f2c38be3 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_log_loss_op.py @@ -0,0 +1,33 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestLogLossOp(OpTest): + def setUp(self): + self.op_type = 'log_loss' + samples_num = 32 + + predicted = np.random.uniform(0.1, 1.0, + (samples_num, 1)).astype("float32") + labels = np.random.randint(0, 2, (samples_num, 1)).astype("float32") + epsilon = 1e-4 + self.inputs = { + 'Predicted': predicted, + 'Labels': labels, + } + + self.attrs = {'epsilon': epsilon} + loss = -labels * np.log(predicted + epsilon) - ( + 1 - labels) * np.log(1 - predicted + epsilon) + self.outputs = {'Loss': loss} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['Predicted'], 'Loss', max_relative_error=0.03) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_lrn_op.py b/python/paddle/v2/fluid/tests/test_lrn_op.py index 7e34b3c91c16c440f12c51415c509400e1f315dc..9abb09e53a7af8eec69f9bd501c6883dd9df9930 100644 --- a/python/paddle/v2/fluid/tests/test_lrn_op.py +++ b/python/paddle/v2/fluid/tests/test_lrn_op.py @@ -23,7 +23,7 @@ class TestLRNOp(OpTest): start = -(self.n - 1) / 2 end = start + self.n - mid = np.empty((self.N, self.C, self.H, self.W), dtype=float) + mid = np.empty((self.N, self.C, self.H, self.W)).astype("float32") mid.fill(self.k) for m in range(0, self.N): for i in range(0, self.C): @@ -74,5 +74,4 @@ class TestLRNOp(OpTest): if __name__ == "__main__": - exit(0) # LRN grad implement wrong unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_nce.py b/python/paddle/v2/fluid/tests/test_nce.py new file mode 100644 index 0000000000000000000000000000000000000000..8aeba69769525935c26576ec50035ed50d2ce44f --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_nce.py @@ -0,0 +1,98 @@ +import unittest +import numpy as np +from op_test import OpTest + + +def nce(input, weight, bias, sample_weight, labels, num_classes, + num_sample_class): + samples = [] + sample_labels = [] + batch_size = input.shape[0] + num_true_class = labels.shape[1] + for i in range(batch_size): + w = 1 if sample_weight is None else sample_weight[i] + for label in labels[i]: + samples.append((i, label, True, w)) + sample_labels.append(label) + for num in range(num_sample_class): + samples.append((i, num, False, w)) + sample_labels.append(num) + # forward bias + sample_out = np.zeros(len(samples)).astype(np.float32) + if bias is not None: + for i in range(len(samples)): + sample_out[i] = bias[samples[i][1]] + # forward weight + for i in range(len(samples)): + sample_out[i] += np.dot(input[samples[i][0]], weight[samples[i][1]]) + + # forward activation + sample_out = 1.0 / (1.0 + np.exp(-sample_out)) + # forward cost + out = np.zeros(batch_size).astype(np.float32) + b = 1.0 / num_classes * num_sample_class + for i in range(len(samples)): + o = sample_out[i] + cost = -np.log(o / (o + b)) if samples[i][2] else -np.log(b / (o + b)) + out[samples[i][0]] += cost * samples[i][3] + return (out[:, np.newaxis], np.array(sample_out).reshape( + batch_size, num_sample_class + num_true_class), + np.array(sample_labels).reshape(batch_size, + num_sample_class + num_true_class)) + + +class TestNCE(OpTest): + def generate_data(self, dim, batch_size, num_classes, num_true_class, + num_neg_samples): + input = np.random.randn(batch_size, dim).astype(np.float32) + weight = np.random.randn(num_classes, dim).astype(np.float32) + bias = np.random.randn(num_classes).astype(np.float32) + sample_weight = np.random.randn(batch_size).astype(np.float32) + labels = np.random.randint(0, num_classes, (batch_size, num_true_class)) + self.attrs = { + 'num_total_classes': num_classes, + 'num_neg_samples': num_neg_samples, + 'custom_neg_classes': range(num_neg_samples) + } + self.inputs = { + 'Input': input, + 'Label': labels, + 'Weight': weight, + 'Bias': bias, + 'SampleWeight': sample_weight + } + + def set_data(self): + self.generate_data(5, 5, 4, 1, 2) + + def compute(self): + out = nce(self.inputs['Input'], self.inputs['Weight'], + self.inputs['Bias'], self.inputs['SampleWeight'], + self.inputs['Label'], self.attrs['num_total_classes'], + self.attrs['num_neg_samples']) + self.outputs = { + 'Cost': out[0], + 'SampleLogits': out[1], + 'SampleLabels': out[2] + } + + def setUp(self): + self.op_type = 'nce' + self.set_data() + self.compute() + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad( + ["Input", "Weight", "Bias"], "Cost", max_relative_error=0.02) + + +class TestNCECase1(TestNCE): + def set_data(self): + self.generate_data(10, 20, 10, 2, 5) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_operator_desc.py b/python/paddle/v2/fluid/tests/test_operator_desc.py index e8362d2e9c6038c04c24dce35de8c53bfde78142..ce34d95ac8cb2644dee9c551cd8e85b33609919a 100644 --- a/python/paddle/v2/fluid/tests/test_operator_desc.py +++ b/python/paddle/v2/fluid/tests/test_operator_desc.py @@ -1,11 +1,15 @@ import unittest -from paddle.v2.fluid.framework import Variable, Program, g_main_program + import paddle.v2.fluid.core as core +from paddle.v2.fluid.framework import Program, default_startup_program + +main_program = default_startup_program() + class TestOperator(unittest.TestCase): def test_error_type(self): - block = g_main_program.create_block() + block = main_program.create_block() try: block.append_op() self.assertFail() diff --git a/python/paddle/v2/fluid/tests/test_parameter.py b/python/paddle/v2/fluid/tests/test_parameter.py index 13f6278ad8b7244e7980b32463f29d7a824b4572..694344acbbd3b7c80cb0ff48ada843f794061282 100644 --- a/python/paddle/v2/fluid/tests/test_parameter.py +++ b/python/paddle/v2/fluid/tests/test_parameter.py @@ -1,17 +1,19 @@ import unittest -from paddle.v2.fluid.framework import g_main_program +from paddle.v2.fluid.framework import default_main_program import paddle.v2.fluid.core as core from paddle.v2.fluid.executor import Executor import paddle.v2.fluid.io as io from paddle.v2.fluid.initializer import ConstantInitializer import numpy as np +main_program = default_main_program() + class TestParameter(unittest.TestCase): def test_param(self): shape = [784, 100] val = 1.0625 - b = g_main_program.global_block() + b = main_program.global_block() param = b.create_parameter( name='fc.w', shape=shape, @@ -23,9 +25,9 @@ class TestParameter(unittest.TestCase): self.assertEqual(core.DataType.FP32, param.dtype) self.assertEqual(0, param.block.idx) exe = Executor(core.CPUPlace()) - p = exe.run(g_main_program, fetch_list=[param])[0] + p = exe.run(main_program, fetch_list=[param])[0] self.assertTrue(np.allclose(p, np.ones(shape) * val)) - p = io.get_parameter_value_by_name('fc.w', exe, g_main_program) + p = io.get_parameter_value_by_name('fc.w', exe, main_program) self.assertTrue(np.allclose(np.array(p), np.ones(shape) * val)) diff --git a/python/paddle/v2/fluid/tests/test_profiler.py b/python/paddle/v2/fluid/tests/test_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..395d0dc36a3d1d6fbfebb4cdf34395c4edee412d --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_profiler.py @@ -0,0 +1,28 @@ +import unittest +import numpy as np +import paddle.v2.fluid as fluid +import paddle.v2.fluid.profiler as profiler +import paddle.v2.fluid.layers as layers + + +class TestProfiler(unittest.TestCase): + def test_nvprof(self): + if not fluid.core.is_compile_gpu(): + return + epoc = 8 + dshape = [4, 3, 28, 28] + data = layers.data(name='data', shape=[3, 28, 28], dtype='float32') + conv = layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) + + place = fluid.GPUPlace(0) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: + for i in range(epoc): + input = np.random.random(dshape).astype('float32') + exe.run(fluid.default_main_program(), feed={'data': input}) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_program.py b/python/paddle/v2/fluid/tests/test_program.py index 15653a1dbf5b1a66edd3f768bee5a36be1bb7a7a..1a9313c68aab165d85ae29051faeacb4927ac2c9 100644 --- a/python/paddle/v2/fluid/tests/test_program.py +++ b/python/paddle/v2/fluid/tests/test_program.py @@ -1,37 +1,38 @@ from __future__ import print_function import unittest -from paddle.v2.fluid.framework import Program -from paddle.v2.fluid.framework import g_main_program +from paddle.v2.fluid.framework import Program, default_main_program import paddle.v2.fluid.layers as layers +main_program = default_main_program() + class TestProgram(unittest.TestCase): def test_program(self): - b = g_main_program.current_block() + b = main_program.current_block() self.assertEqual(-1, b.parent_idx) self.assertEqual(0, b.idx) - b = g_main_program.create_block() + b = main_program.create_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) - b = g_main_program.create_block() + b = main_program.create_block() self.assertEqual(2, b.idx) self.assertEqual(1, b.parent_idx) - g_main_program.rollback() + main_program.rollback() - b = g_main_program.current_block() + b = main_program.current_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) - b = g_main_program.create_block() + b = main_program.create_block() self.assertEqual(3, b.idx) self.assertEqual(1, b.parent_idx) - g_main_program.rollback() - b = g_main_program.current_block() + main_program.rollback() + b = main_program.current_block() self.assertEqual(1, b.idx) self.assertEqual(0, b.parent_idx) diff --git a/python/paddle/v2/fluid/tests/test_recurrent_op.py b/python/paddle/v2/fluid/tests/test_recurrent_op.py index 84548847f76c6315da000e1b3d062deafe55a05e..694ff0d8dd794111aff51bb7d503a56b87514342 100644 --- a/python/paddle/v2/fluid/tests/test_recurrent_op.py +++ b/python/paddle/v2/fluid/tests/test_recurrent_op.py @@ -271,12 +271,12 @@ class RecurrentOpTest2(RecurrentOpTest1): temp_l = layers.fc(input=x_t, size=self.input_dim, - param_attr={'name': 'W'}, + param_attr='W', bias_attr=False, **self.p_info) temp_r = layers.fc(input=h_pre, size=self.input_dim, - param_attr={'name': 'U'}, + param_attr='U', bias_attr=False, **self.p_info) @@ -454,4 +454,6 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): if __name__ == '__main__': + # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/6152 + exit(0) unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_registry.py b/python/paddle/v2/fluid/tests/test_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..f8328f31cf8203f5ea8af2c14417879616ccab71 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_registry.py @@ -0,0 +1,22 @@ +import unittest +import warnings + +import paddle.v2.fluid as fluid +import paddle.v2.fluid.framework as framework +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.registry as registry + + +class TestRegistry(unittest.TestCase): + def test_registry_layer(self): + self.layer_type = "mean" + program = framework.Program() + + x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32') + output = layers.mean(x) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + X = np.random.random((10, 10)).astype("float32") + mean_out = exe.run(program, feed={"X": X}, fetch_list=[output]) + self.assertAlmostEqual(np.mean(X), mean_out) diff --git a/python/paddle/v2/fluid/tests/test_row_conv_op.py b/python/paddle/v2/fluid/tests/test_row_conv_op.py new file mode 100644 index 0000000000000000000000000000000000000000..1ed86e23ac28a575cdc3388e9da547918eb8a1be --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_row_conv_op.py @@ -0,0 +1,95 @@ +import unittest +import numpy as np +from op_test import OpTest + + +def row_conv_forward(x, lod, wt): + out = np.zeros_like(x) + seq_info = lod[0] + num_sequences = len(seq_info) - 1 + context_length = wt.shape[0] + + for i in range(num_sequences): # loop over number of sequences + start = seq_info[i] + end = seq_info[i + 1] + curinput = x[start:end, :] + curoutput = out[start:end, :] + + cur_timesteps = end - start + for j in range(cur_timesteps): # loop over different timesteps + for k in range(context_length): + + if j + k >= cur_timesteps: + continue + curoutput[j, :] += curinput[j + k, :] * wt[k, :] + + return out + + +class TestRowConvOp1(OpTest): + def setUp(self): + + self.op_type = "row_conv" + lod = [[0, 2, 5, 7]] + T = lod[0][-1] + D = 16 + context_length = 2 + + x = np.random.random((T, D)).astype("float32") + wt = np.random.random((context_length, D)).astype("float32") + self.inputs = {'X': (x, lod), 'Filter': wt} + + out = row_conv_forward(x, lod, wt) + self.outputs = {'Out': (out, lod)} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.05) + + def test_check_grad_ignore_x(self): + self.check_grad( + ['Filter'], 'Out', max_relative_error=0.05, no_grad_set=set('X')) + + def test_check_grad_ignore_wt(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Filter')) + + +class TestRowConvOp2(OpTest): + def setUp(self): + + self.op_type = "row_conv" + lod = [[0, 20, 50, 100]] + T = lod[0][-1] + D = 35 + context_length = 35 + + x = np.random.random((T, D)).astype("float32") + wt = np.random.random((context_length, D)).astype("float32") + self.inputs = {'X': (x, lod), 'Filter': wt} + + out = row_conv_forward(x, lod, wt) + self.outputs = {'Out': (out, lod)} + + def test_check_output(self): + self.check_output() + + #max_relative_error is increased from 0.05 to 0.06 as for higher + #dimensional input, the dX on CPU for some values has max_rel_error + #slightly more than 0.05 + def test_check_grad_normal(self): + self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.06) + + def test_check_grad_ignore_x(self): + self.check_grad( + ['Filter'], 'Out', max_relative_error=0.06, no_grad_set=set('X')) + + def test_check_grad_ignore_wt(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.06, no_grad_set=set('Filter')) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py b/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py index 05f6a560644f18da6ff2e015911901cd73cc36c9..86db4c64b493d94cc675ed4bcee7e2925fef1977 100644 --- a/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py +++ b/python/paddle/v2/fluid/tests/test_shrink_rnn_memory.py @@ -3,9 +3,11 @@ import paddle.v2.fluid.core as core from paddle.v2.fluid.executor import Executor import paddle.v2.fluid.layers as layers from paddle.v2.fluid.backward import append_backward_ops -from paddle.v2.fluid.framework import g_main_program +from paddle.v2.fluid.framework import default_main_program import numpy +main_program = default_main_program() + class TestShrinkRNNMemory(unittest.TestCase): def test_shrink_rnn_memory(self): @@ -36,7 +38,7 @@ class TestShrinkRNNMemory(unittest.TestCase): append_backward_ops(loss=mem3_mean) x_grad = exe.run( feed={'x': tensor}, - fetch_list=[g_main_program.global_block().var('x@GRAD')])[0] + fetch_list=[main_program.global_block().var('x@GRAD')])[0] self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1) diff --git a/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py index e53856b38aa5ddd6061b350a66e9fe86bc23923c..c42f578f72cb121a24d6b852334cbd8a977f2730 100644 --- a/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py +++ b/python/paddle/v2/fluid/tests/test_sigmoid_cross_entropy_with_logits_op.py @@ -2,11 +2,12 @@ import numpy as np from op_test import OpTest from scipy.special import logit from scipy.special import expit +import unittest class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): - '''Test sigmoid_cross_entropy_with_logit_op with binary labels - ''' + """Test sigmoid_cross_entropy_with_logit_op with binary label + """ def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -16,16 +17,16 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): 'X': logit( np.random.uniform(0, 1, (batch_size, num_classes)) .astype("float32")), - 'Labels': np.random.randint(0, 2, (batch_size, num_classes)) + 'Label': np.random.randint(0, 2, (batch_size, num_classes)) .astype("float32") } # Fw Pass is implemented as elementwise sigmoid followed by # elementwise logistic loss - # Labels * -log(sigmoid(X)) + (1 - labels) * -log(1 - sigmoid(X)) + # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) sigmoid_X = expit(self.inputs['X']) - term1 = self.inputs['Labels'] * np.log(sigmoid_X) - term2 = (1 - self.inputs['Labels']) * np.log(1 - sigmoid_X) + term1 = self.inputs['Label'] * np.log(sigmoid_X) + term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) self.outputs = {'Out': -term1 - term2} def test_check_output(self): @@ -36,8 +37,8 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): - '''Test sigmoid_cross_entropy_with_logit_op with probabalistic labels - ''' + """Test sigmoid_cross_entropy_with_logit_op with probabalistic label + """ def setUp(self): self.op_type = "sigmoid_cross_entropy_with_logits" @@ -47,16 +48,16 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): 'X': logit( np.random.uniform(0, 1, (batch_size, num_classes)) .astype("float32")), - 'Labels': np.random.uniform(0, 1, (batch_size, num_classes)) + 'Label': np.random.uniform(0, 1, (batch_size, num_classes)) .astype("float32") } # Fw Pass is implemented as elementwise sigmoid followed by # elementwise logistic loss - # Labels * -log(sigmoid(X)) + (1 - labels) * -log(1 - sigmoid(X)) + # Label * -log(sigmoid(X)) + (1 - label) * -log(1 - sigmoid(X)) sigmoid_X = expit(self.inputs['X']) - term1 = self.inputs['Labels'] * np.log(sigmoid_X) - term2 = (1 - self.inputs['Labels']) * np.log(1 - sigmoid_X) + term1 = self.inputs['Label'] * np.log(sigmoid_X) + term2 = (1 - self.inputs['Label']) * np.log(1 - sigmoid_X) self.outputs = {'Out': -term1 - term2} def test_check_output(self): @@ -64,3 +65,7 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): def test_check_grad(self): self.check_grad(['X'], 'Out') + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_unpool_op.py b/python/paddle/v2/fluid/tests/test_unpool_op.py new file mode 100644 index 0000000000000000000000000000000000000000..e87f283042c081ed9f232d140ff8c303cd3d1858 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_unpool_op.py @@ -0,0 +1,83 @@ +import unittest +import numpy as np +from op_test import OpTest + + +def unpool2dmax_forward_naive(input, indices, ksize, strides, paddings): + s0, s1, s2, s3 = input.shape + out_hsize = (s2 - 1) * strides[0] - 2 * paddings[0] + ksize[0] + out_wsize = (s2 - 1) * strides[1] - 2 * paddings[1] + ksize[1] + out = np.zeros((s0, s1, out_hsize, out_wsize)) + for nidx in xrange(s0): + for cidx in xrange(s1): + for h in xrange(s2): + for w in xrange(s3): + index = indices[nidx, cidx, h, w] + hidx = (index - index % out_wsize) / out_wsize + widx = index % out_wsize + out[nidx, cidx, int(hidx), int(widx)] = \ + input[nidx, cidx, h, w] + + return out + + +class TestUnpoolOp(OpTest): + def setUp(self): + self.op_type = "unpool" + self.init_test_case() + pre_input = np.random.random(self.shape).astype("float32") + nsize, csize, hsize, wsize = pre_input.shape + hsize_out = (hsize - self.ksize[0] + 2 * self.paddings[0]) / \ + self.strides[0] + 1 + wsize_out = (wsize - self.ksize[1] + 2 * self.paddings[1]) / \ + self.strides[1] + 1 + input = np.zeros((nsize, csize, hsize_out, wsize_out)) + indices = np.zeros((nsize, csize, hsize_out, wsize_out)) + for i in xrange(hsize_out): + for j in xrange(wsize_out): + r_start = np.max((i * self.strides[0] - self.paddings[0], 0)) + r_end = np.min((i * self.strides[0] + self.ksize[0] - \ + self.paddings[0], hsize)) + c_start = np.max((j * self.strides[1] - self.paddings[1], 0)) + c_end = np.min((j * self.strides[1] + self.ksize[1] - \ + self.paddings[1], wsize)) + for nidx in xrange(nsize): + for cidx in xrange(csize): + x_masked = pre_input[nidx, cidx, r_start:r_end, \ + c_start:c_end] + input[nidx, cidx, i, j] = x_masked.max() + arg = x_masked.argmax() + indices[nidx, cidx, i, j] = \ + (r_start + arg / self.ksize[1]) * wsize + \ + c_start + arg % self.ksize[1] + output = self.unpool2d_forward_naive(input, indices, self.ksize, \ + self.strides, self.paddings).astype("float32") + self.inputs = { + 'X': input.astype('float32'), + 'Indices': indices.astype('int32') + } + self.attrs = { + 'strides': self.strides, + 'paddings': self.paddings, + 'ksize': self.ksize, + 'unpooling_type': self.unpooling_type, + } + self.outputs = {'Out': output.astype('float32')} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + def init_test_case(self): + self.unpool2d_forward_naive = unpool2dmax_forward_naive + self.unpooling_type = "max" + self.shape = [6, 4, 5, 5] + self.ksize = [3, 3] + self.strides = [2, 2] + self.paddings = [0, 0] + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_variable.py b/python/paddle/v2/fluid/tests/test_variable.py index 92ffdceb6c84fb2669f8c1bb556c46fb1c03c411..f1e4c0ba21d5c4f10d2b5011bdb5abaebaec5431 100644 --- a/python/paddle/v2/fluid/tests/test_variable.py +++ b/python/paddle/v2/fluid/tests/test_variable.py @@ -1,5 +1,5 @@ import unittest -from paddle.v2.fluid.framework import g_main_program, Program, convert_np_dtype_to_dtype_ +from paddle.v2.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_ import paddle.v2.fluid.core as core import numpy as np @@ -18,7 +18,7 @@ class TestVariable(unittest.TestCase): self.assertRaises(ValueError, lambda: convert("int8")) def test_var(self): - b = g_main_program.current_block() + b = default_main_program().current_block() w = b.create_var( dtype="float64", shape=[784, 100], lod_level=0, name="fc.w") self.assertNotEqual(str(w), "") diff --git a/python/paddle/v2/reader/decorator.py b/python/paddle/v2/reader/decorator.py index 45a4288751e37b99dd1005ec78f30a98044926ff..7e457f987d36e52e9e7c7727b4f996ad31c6bf08 100644 --- a/python/paddle/v2/reader/decorator.py +++ b/python/paddle/v2/reader/decorator.py @@ -14,13 +14,16 @@ __all__ = [ 'map_readers', 'buffered', 'compose', 'chain', 'shuffle', - 'ComposeNotAligned', 'firstn', 'xmap_readers' + 'ComposeNotAligned', 'firstn', 'xmap_readers', 'pipe_reader' ] +from threading import Thread +import subprocess + +from Queue import Queue import itertools import random -from Queue import Queue -from threading import Thread +import zlib def map_readers(func, *readers): @@ -323,3 +326,101 @@ def xmap_readers(mapper, reader, process_num, buffer_size, order=False): yield sample return xreader + + +def _buf2lines(buf, line_break="\n"): + # FIXME: line_break should be automatically configured. + lines = buf.split(line_break) + return lines[:-1], lines[-1] + + +def pipe_reader(left_cmd, + parser, + bufsize=8192, + file_type="plain", + cut_lines=True, + line_break="\n"): + """ + pipe_reader read data by stream from a command, take it's + stdout into a pipe buffer and redirect it to the parser to + parse, then yield data as your desired format. + + You can using standard linux command or call another program + to read data, from HDFS, Ceph, URL, AWS S3 etc: + + cmd = "hadoop fs -cat /path/to/some/file" + cmd = "cat sample_file.tar.gz" + cmd = "curl http://someurl" + cmd = "python print_s3_bucket.py" + + A sample parser: + + def sample_parser(lines): + # parse each line as one sample data, + # return a list of samples as batches. + ret = [] + for l in lines: + ret.append(l.split(" ")[1:5]) + return ret + + :param left_cmd: command to excute to get stdout from. + :type left_cmd: string + :param parser: parser function to parse lines of data. + if cut_lines is True, parser will receive list + of lines. + if cut_lines is False, parser will receive a + raw buffer each time. + parser should return a list of parsed values. + :type parser: callable + :param bufsize: the buffer size used for the stdout pipe. + :type bufsize: int + :param file_type: can be plain/gzip, stream buffer data type. + :type file_type: string + :param cut_lines: whether to pass lines instead of raw buffer + to the parser + :type cut_lines: bool + :param line_break: line break of the file, like \n or \r + :type line_break: string + + :return: the reader generator. + :rtype: callable + """ + if not isinstance(left_cmd, str): + raise TypeError("left_cmd must be a string") + if not callable(parser): + raise TypeError("parser must be a callable object") + + process = subprocess.Popen( + left_cmd.split(" "), bufsize=bufsize, stdout=subprocess.PIPE) + # TODO(typhoonzero): add a thread to read stderr + + # Always init a decompress object is better than + # create in the loop. + dec = zlib.decompressobj( + 32 + zlib.MAX_WBITS) # offset 32 to skip the header + + def reader(): + remained = "" + while True: + buff = process.stdout.read(bufsize) + if buff: + if file_type == "gzip": + decomp_buff = dec.decompress(buff) + elif file_type == "plain": + decomp_buff = buff + else: + raise TypeError("file_type %s is not allowed" % file_type) + + if cut_lines: + lines, remained = _buf2lines(''.join( + [remained, decomp_buff]), line_break) + parsed_list = parser(lines) + for ret in parsed_list: + yield ret + else: + for ret in parser(decomp_buff): + yield ret + else: + break + + return reader diff --git a/python/setup.py.in b/python/setup.py.in index fe91df10daf303bb14d1e5f28817984d261e0880..9ccb4dc1762ac761212347fa7c7c94b223d75e24 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,8 +1,61 @@ from setuptools import setup, Distribution, Extension +import subprocess class BinaryDistribution(Distribution): def has_ext_modules(foo): return True +MAJOR = 0 +MINOR = 11 +PATCH = 0 +RC = 0 +ISTAGED = False + + + +def git_commit(): + try: + cmd = ['git', 'rev-parse', 'HEAD'] + git_commit = subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0].strip() + except: + git_commit = 'Unknown' + return git_commit + +def write_version_py(filename='paddle/version.py'): + cnt = ''' +# THIS FILE IS GENERATED FROM PADDLEPADDLE SETUP.PY +# +full_version = '%(major)d.%(minor)d.%(patch)d' +major = '%(major)d' +minor = '%(minor)d' +patch = '%(patch)d' +rc = '%(rc)d' +istaged = %(istaged)s +commit = '%(commit)s' + +def show(): + if istaged: + print 'full_version:', full_version + print 'major:', major + print 'minor:', minor + print 'patch:', patch + print 'rc:', rc + else: + print 'commit:', commit +''' + commit = git_commit() + with open(filename, 'w') as f: + f.write(cnt % { + 'major': MAJOR, + 'minor': MINOR, + 'patch': PATCH, + 'rc': RC, + 'version': '${PADDLE_VERSION}', + 'commit': commit, + 'istaged': ISTAGED}) + +write_version_py(filename='@PADDLE_SOURCE_DIR@/python/paddle/version.py') + + packages=['paddle', 'paddle.proto', 'paddle.trainer', @@ -21,7 +74,7 @@ with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: setup_requires = f.read().splitlines() if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: - setup_requires+=["opencv-python"] + setup_requires+=['opencv-python'] # the prefix is sys.prefix which should always be usr paddle_bin_dir = 'opt/paddle/bin' @@ -36,7 +89,7 @@ paddle_rt_libs = ['${WARPCTC_LIBRARIES}'] if '${MKL_SHARED_LIBS}'!= '': paddle_rt_libs += '${MKL_SHARED_LIBS}'.split(';') -setup(name='paddlepaddle', +setup(name='${PACKAGE_NAME}', version='${PADDLE_VERSION}', description='Parallel Distributed Deep Learning', install_requires=setup_requires,