diff --git a/.travis.yml b/.travis.yml index 929c847bd36d64e79a199b2634ebf68c3225429b..fe4eb2d1570fb3dbb38128400e6f438b99236c0d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,7 +12,7 @@ services: os: - linux env: - - JOB=build_doc + - JOB=doc - JOB=check_style - JOB=build_android addons: @@ -36,21 +36,18 @@ addons: - ccache ssh_known_hosts: 13.229.163.131 before_install: - - if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - # Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python - # protobuf version. - sudo pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt - - sudo pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker + - sudo pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit - | function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; } script: - | # 43min timeout - if [[ "$JOB" == "build_android" ]]; then timeout 2580 docker run -it --rm -v "$TRAVIS_BUILD_DIR:/paddle" paddlepaddle/paddle:latest-dev-android; - else timeout 2580 paddle/scripts/travis/${JOB}.sh; fi; - RESULT=$?; if [ $RESULT -eq 0 ] || [ $RESULT -eq 142 ]; then true; else exit 1; fi; + if [[ "$JOB" != "doc" ]]; then timeout 2580 paddle/scripts/paddle_docker_build.sh ${JOB}; else paddle/scripts/paddle_build.sh ${JOB}; fi; + if [ $? -eq 0 ] || [ $? -eq 142 ]; then true; else exit 1; fi; - | - if [[ "$JOB" != "build_doc" ]]; then exit 0; fi; + if [[ "$JOB" != "doc" ]]; then exit 0; fi; + # For document only if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit 0; fi; if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then exit 0; fi; export DEPLOY_DOCS_SH=https://raw.githubusercontent.com/PaddlePaddle/PaddlePaddle.org/master/scripts/deploy/deploy_docs.sh diff --git a/AUTHORS.md b/AUTHORS.md index 9c6821d9f8681c5907c2fc9938fdb62ba64b9a92..4ee05420982d13f686cf13e8957ce41dfcdd2cb8 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -2,12 +2,14 @@ |---|---| | abhinavarora | Abhinav Arora | | backyes | Yan-Fei Wang | +| baiyfbupt | Yi-Fan Bai | | beckett1124 | Bin Qi | -| JiayiFeng | Jia-Yi Feng | | chengxiaohua1105 | Xiao-Hua Cheng | | cxwangyi, yiwangbaidu, wangkuiyi | Yi Wang | | cxysteven | Xing-Yi Cheng | | dzhwinter | Zhi-Hong Dong | +| dragonwarrior | Long Wang | +| dyning | Yuning Du | | emailweixu | Wei Xu | | gangliao | Gang Liao | | gongweibao | Wei-Bao Gong | @@ -16,6 +18,9 @@ | hedaoyuan | Dao-Yuan He | | helinwang | He-Lin Wang | | jacquesqiao | Long-Fei Qiao | +| jczaja | Jacek Czaja | +| JiayiFeng | Jia-Yi Feng | +| kbinias | Krzysztof Binias | | kuke | Yi-Bing Liu | | lcy-seso | Ying Cao | | lipeng-unisound | Peng Li | @@ -24,15 +29,20 @@ | llxxxll | Yong-Feng Liu | | luotao01 | Tao Luo | | lzhao4ever | Liang Zhao | +| mozga-intel | Mateusz Ozga | | NHZlX | Zhao-Long Xing | +| Noplz | Yuan Gao | | pakchoi | Chuan-Jiang Song | +| panyx0718 | Xin Pan | | pengli09 | Peng Li | | pkuyym | Ya-Ming Yang | +| pzelazko-intel | Pawel Zelazko | | QiJune | Jun Qi | | qingqing01 | Qing-Qing Dang | | reyoung | Yang Yu | | Superjom | Chun-Wei Yan | | tianbingsz | Tian-Bing Xu | +| tpatejko | Tomasz Patejko | | typhoonzero | Yi Wu | | wanghaoshuang | Hao-Shuang Wang | | wangyang59 | Yang Wang | diff --git a/Dockerfile b/Dockerfile index c257dbfc2987323f8ed2a24dfffa8b3c15e09399..164fe84904947bfc3cf71132b5fba04744460b26 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,6 @@ # A image for building paddle binaries # Use cuda devel base image for both cpu and gpu environment - -# When you modify it, please be aware of cudnn-runtime version +# When you modify it, please be aware of cudnn-runtime version # and libcudnn.so.x in paddle/scripts/docker/build.sh FROM nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04 MAINTAINER PaddlePaddle Authors @@ -24,7 +23,7 @@ ENV HOME /root COPY ./paddle/scripts/docker/root/ /root/ RUN apt-get update && \ - apt-get install -y \ + apt-get install -y --allow-downgrades \ git python-pip python-dev openssh-server bison \ libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ @@ -33,7 +32,7 @@ RUN apt-get update && \ automake locales clang-format swig doxygen cmake \ liblapack-dev liblapacke-dev \ clang-3.8 llvm-3.8 libclang-3.8-dev \ - net-tools libtool && \ + net-tools libtool ccache && \ apt-get clean -y # Install Go and glide diff --git a/benchmark/fluid/machine_translation.py b/benchmark/fluid/machine_translation.py index d7a421c10979c3b9d6865a8c0b99a6410e0f46a8..adde5f21acd4e77d58a453d6868abeccfca4bb5a 100644 --- a/benchmark/fluid/machine_translation.py +++ b/benchmark/fluid/machine_translation.py @@ -21,7 +21,7 @@ import argparse import time import distutils.util -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework diff --git a/benchmark/fluid/mnist.py b/benchmark/fluid/mnist.py index dc10ac2ec195acc9a5693718141ddb32417dfb71..1e2185dfac1072d1f1046f4616a9d53a8fc76061 100644 --- a/benchmark/fluid/mnist.py +++ b/benchmark/fluid/mnist.py @@ -20,7 +20,7 @@ import numpy as np import argparse import time -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.profiler as profiler diff --git a/benchmark/fluid/resnet.py b/benchmark/fluid/resnet.py index 1af5eaf6b46be47cb6b778cedcf53830c201ef39..831fa2c019fc2868cd85b1ca7b2c8c76a2f1628c 100644 --- a/benchmark/fluid/resnet.py +++ b/benchmark/fluid/resnet.py @@ -23,7 +23,7 @@ import time import cProfile, pstats, StringIO -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.profiler as profiler diff --git a/benchmark/fluid/stacked_dynamic_lstm.py b/benchmark/fluid/stacked_dynamic_lstm.py index 5fcbdd64af9dc196c9d5b2b82ce4213478ea1418..73bcc47b4d404af2c01d61ca3dfb11971bbcfe9c 100644 --- a/benchmark/fluid/stacked_dynamic_lstm.py +++ b/benchmark/fluid/stacked_dynamic_lstm.py @@ -23,10 +23,10 @@ import random import time import numpy -import paddle.v2 as paddle -import paddle.v2.dataset.imdb as imdb +import paddle +import paddle.dataset.imdb as imdb import paddle.fluid as fluid -from paddle.v2 import batch +import paddle.batch as batch import paddle.fluid.profiler as profiler diff --git a/benchmark/fluid/vgg.py b/benchmark/fluid/vgg.py index 9d990eff62ec368dc7033f55cc0862fa974a64e0..53e34e0cbd15914791c305db6797f826ebfae34e 100644 --- a/benchmark/fluid/vgg.py +++ b/benchmark/fluid/vgg.py @@ -17,7 +17,7 @@ from __future__ import print_function import sys import time import numpy as np -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import argparse diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake index 7edc8637727e300539a46bc3941ace87c87903b8..b520c03a836a9e3f263ba050f151877ffe0d071d 100644 --- a/cmake/cuda.cmake +++ b/cmake/cuda.cmake @@ -172,6 +172,8 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF) list(APPEND CUDA_NVCC_FLAGS "-std=c++11") list(APPEND CUDA_NVCC_FLAGS "--use_fast_math") list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC") +# in cuda9, suppress cuda warning on eigen +list(APPEND CUDA_NVCC_FLAGS "-w") # Set :expt-relaxed-constexpr to suppress Eigen warnings list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr") diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake index 73d70c34dce8bedd9e62519c207e5be3dcf7dba3..edc93c2773f46ec9e0bf898557c55c93274e6a01 100644 --- a/cmake/external/eigen.cmake +++ b/cmake/external/eigen.cmake @@ -22,7 +22,9 @@ else() extern_eigen3 ${EXTERNAL_PROJECT_LOG_ARGS} GIT_REPOSITORY "https://github.com/RLovelett/eigen.git" - GIT_TAG 70661066beef694cadf6c304d0d07e0758825c10 + # eigen on cuda9.1 missing header of math_funtions.hpp + # https://stackoverflow.com/questions/43113508/math-functions-hpp-not-found-when-using-cuda-with-eigen + GIT_TAG 917060c364181f33a735dc023818d5a54f60e54c PREFIX ${EIGEN_SOURCE_DIR} UPDATE_COMMAND "" CONFIGURE_COMMAND "" diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index a631ad14b18310598f7eea3a51839d61a9e456ff..07e1137e16afc1e4e9ab9640e1ccaea8008a0cd2 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -38,8 +38,7 @@ ENDIF() ExternalProject_Add( extern_warpctc ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY "https://github.com/gangliao/warp-ctc.git" - GIT_TAG b63a0644654a3e0ed624c85a1767bc8193aead09 + GIT_REPOSITORY "https://github.com/dzhwinter/warp-ctc.git" PREFIX ${WARPCTC_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/doc/fluid/api/data/data_reader.rst b/doc/fluid/api/data/data_reader.rst index d7c896a6270b488ca4449e5211d0d0879eda6ac5..1a35d0bbc8f9d751f49c7e1fc26feb1bcb3ae7f0 100644 --- a/doc/fluid/api/data/data_reader.rst +++ b/doc/fluid/api/data/data_reader.rst @@ -56,11 +56,11 @@ DataFeeder Reader ====== -.. automodule:: paddle.v2.reader +.. automodule:: paddle.reader :members: :noindex: -.. automodule:: paddle.v2.reader.creator +.. automodule:: paddle.reader.creator :members: :noindex: diff --git a/doc/fluid/api/layers.rst b/doc/fluid/api/layers.rst index 3790f09c84563fe541bd8d0bc08e23b19d4287ca..ff3c9346a2cd777a5294d536911f39de9032fe52 100644 --- a/doc/fluid/api/layers.rst +++ b/doc/fluid/api/layers.rst @@ -479,6 +479,13 @@ label_smooth .. autofunction:: paddle.fluid.layers.label_smooth :noindex: +roi_pool +--------- + +.. autofunction:: paddle.fluid.layers.roi_pool + :noindex: + + ops === @@ -820,3 +827,5 @@ topk .. autofunction:: paddle.fluid.layers.topk :noindex: + + diff --git a/doc/fluid/design/algorithm/parameter_average.md b/doc/fluid/design/algorithm/parameter_average.md index 340bc302d57429a9bf10a9d23ed9b0cdc7a2a568..28ad6495d97515442eb8af2050158829814acd33 100644 --- a/doc/fluid/design/algorithm/parameter_average.md +++ b/doc/fluid/design/algorithm/parameter_average.md @@ -1,7 +1,7 @@ # Averaging Parameter in PaddlePaddle ## Why Averaging -In a large scale machine learning setup where the size of the training data is huge, it could take us a large number of iterations over the training data before we can achieve the optimal values of parameters of our model. Looking at the problem setup, it is desirable if we can obtain the optimal values of parameters by going through the data in as few passes as we can. +In a large scale machine learning setup where the size of the training data is huge, it could take us a large number of iterations over the training data before we can achieve the optimal values of parameters of our model. Looking at the problem setup, it is desirable to obtain the optimal values of parameters by going through the data in as few passes as possible. Polyak and Juditsky (1992) showed that the test performance of simple average of parameters obtained by Stochastic Gradient Descent (SGD) is as good as that of parameter values that are obtained by training the model over and over again, over the training dataset. @@ -16,16 +16,16 @@ We propose averaging for any optimizer similar to how ASGD performs it, as menti ### How to perform Parameter Averaging in PaddlePaddle Parameter Averaging in PaddlePaddle works in the following way during training : -1. It will take in an instance of a normal optimizer as an input, e.g. RMSPropOptimizer +1. It will take in an instance of an optimizer as an input, e.g. RMSPropOptimizer 2. The optimizer itself is responsible for updating the parameters. 3. The ParameterAverageOptimizer maintains a separate copy of the parameters for itself: - 1. In concept, the values of this copy are the average of the values of the parameters in the most recent N batches. - 2. However, saving all the N instances of the parameters in memory is not feasible. + 1. In theory, the values of this copy are the average of the values of the parameters in the most recent N batches. + 2. However, saving all N instances of the parameters in memory is not feasible. 3. Therefore, an approximation algorithm is used. Hence, overall we have have two copies of the parameters: one for the optimizer itself, and one for the ParameterAverageOptimizer. The former should be used in back propagation, while the latter should be used during testing and should be saved. -During the testing/ saving the model phase, we perform the following steps: +During the testing/saving the model phase, we perform the following steps: 1. Perform the delayed operations. 2. Save current values of the parameters to a temporary variable. 3. Replace the values of the parameters with the averaged values. diff --git a/doc/fluid/design/data_type/float16.md b/doc/fluid/design/data_type/float16.md index 1ea95ed6b5d6792171569b6ff76d09be92fcb13e..844d2aafcf257b85057e1ac200ed3d5cf0be2ff0 100644 --- a/doc/fluid/design/data_type/float16.md +++ b/doc/fluid/design/data_type/float16.md @@ -3,7 +3,7 @@ ## Why float16 Half precision (float16) is a binary floating-point format that occupies 16 bits in memory. float16 is half the size of traditional 32-bit single precision format (float) and has lower precision and smaller range. -When high precision computation is not required, using float16 data type could potentially +When high precision computation is not required (which is usually the case at least in the deep learning inference stage), using float16 data type could potentially - reduce storage space, memory bandwidth, and power usages; - increase the chance of data fitting into a smaller cache of lower latency; @@ -12,7 +12,7 @@ When high precision computation is not required, using float16 data type could p ## Survey of current float16 support A brief survey of float16 support on different compilers, hardwares, and libraries can be found below. Interested readers can refer to [link1](https://github.com/PaddlePaddle/Paddle/issues/4853) and [link2](https://github.com/Xreki/Xreki.github.io/blob/master/multi_data_types_in_dl_framework/ppt/float16_and_quantized_type.md) for more info. -The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernel. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier. +The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernels. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier. ### Compiler - nvcc supports `__half` data type after CUDA 7.5. @@ -95,11 +95,89 @@ float half_to_float(float16 h); ``` which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM instrinsics will be used when the corresonding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion. -## To do -After float16 class is available, some of the future items are below: +## float16 inference +In Fluid, a neural network is represented as a protobuf message called [ProgramDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/program.md), whose Python wrapper is a [Program](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#program). The basic structure of a program is some nested [blocks](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#block), where each block consists of some [variable](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#variable) definitions and a sequence of [operators](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#operator). An [executor](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/executor.md) will run a given program desc by executing the sequence of operators in the entrance block of the program one by one. -- Update pybind/tensor_py.h to bind c++ float16 with numpy float16. +### Operator level requirement +Each operator has many kernels for different data types, devices, and library types. The operator will select the appropriate kernel to run based on, among other things, the data type of the input variables. By default, every Fluid operator has a float data type kernel that takes float variables as input and generates float output. -- Modify `GetKernelType()` method in `framework/operator.h` to make it compatible with float16. +This means that if we provide float input to the first operator in a program, then each opeartor will use float kernel to compute float output and send it as input to the next operator to trigger the float kernel. Overall, the program will run in float mode and give us a final output of float data type. -- Create a type-casting operator that can convert the data type in tensor between float16 and other types. +The same principle applies if we want a program to run in float16 mode. We provide input variable of float16 data type to the first operator, and then one by one, each operator in the program will run the float16 kernel (provided that each operator in this program has float16 kernels registered) until we finally obtain a float16 output variable. + +So the preliminary requirement for float16 inference is to add float16 kernel to operators that are needed in a specific kind of program. For example, float16 inference on an image classification neural network like Vgg or Resnet, typically requires the following operators to have float16 kernels: convolution, pooling, multiplication, addition, batch norm, dropout, relu, and softmax. Please refer to [new_op_en](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/dev/new_op_en.md) for details of how to add new kernels to an operator. + +### Variable level requirement +Operators including convolution and multiplication (used in fully-connected layers) takes as input not only the variables generated by the preceding operators but also [parameter](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#parameter) variables, which contains the trained weights to apply to the input data. These weights are obtained in the Fluid training process and are by default of float data type. + +When these operators are running in float16 mode, the float16 kernel requires those parameter variables to contain weights of Fluid float16 data type. Thus, we need a convenient way to convert the original float weights to float16 weights. + +In Fluid, we use tensor to hold actual data for a variable on the c++ end. [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h) is used to bind c++ tensors of certain data type with numpy array of the correponding numpy data type on the Python end. Each common c++ built-in data type has a corresponding numpy data type of the same name. However, since there is no built-in float16 type in c++, we cannot directly bind numpy float16 data type with the Fluid float16 class. Since both Fluid float16 and numpy float16 use uint16 as the internal data storage type, we use c++ built-in type `uint16_t` and the corresponding numpy uint16 data type to bridge the gap via [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h). + +The following code demonstrates how to do the tensor conversion. +```Python +# var is the variable of float weights +# tensor is a numpy array of data copied from the tensor data in var +# fp16_var is the variable that will contain float16 weights converted from var +tensor = numpy.array(var.get_tensor()) +fp16_tensor = fp16_var.get_tensor() + +# After the original tensor data is converted to numpy float16 data type, +# view(numpy.uint16) is used so that the internal memory of the numpy array +# will be reinterpreted to be of uint16 data type, which is binded to +# Fluid float16 class via pybind with the help of uint16_t built-in c++ type +fp16_tensor.set(tensor.astype(numpy.float16).view(numpy.uint16), GPUPlace) +``` + +### Consistent API requirement +The basic inference in float16 mode requires users to feed input and obtain output both of float16 data type. However, in this way, the inference APIs are not consistent between float16 mode and float mode, and users may find it confusing and diffcult to use float16 inference since they need to do extra steps to provide float16 input data and convert float16 output data back to float. To have consistent API for different inference modes, we need to transpile the program desc in some way so that we can run float16 inference by feeding and fetching variables of float data type. + +This problem can be solved by introducing a type-casting operator which takes an input variable of certain data type, cast it to another specified data type, and put the casted data into the output variable. Insert cast operator where needed can make a program internally run in float16 mode. + +### float16 transpiler +Put all the above requirements in mind, we designed a float16 inference transpiler that can tranpile a float32 mode inference program desc to a float16 mode one. + +Given a float inference program and the corresponding variables of float32 weights in the [scope](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/scope.md), +this transpiler mainly does the following modifications: + +1. Insert cast operators at the beginning of the program so that the input float data will be converted to float16 data type before feeding to subsequent operators to invoke the float16 kernel. + +2. Insert cast operators at the end of the program so that the output float16 data will be converted back to float data type before users obtain the result. + +3. For each parameter variable of float weights, create in the scope a corresponding variable of float16 weights which are converted from the corresponding float weights and add this new float16 variable to the program. + +4. Update the operator information in the program so that each relevant operator use the newly created float16 variable instead of its float counterpart. + +Below is an example of usage: +```Python +# Get the float inference program +[float_inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + +# Prepare the float input data +tensor_img = numpy.random.rand(1, 3, 32, 32).astype(numpy.float32) + +# Running inference_program in float mode +float_results = exe.run(float_inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + +# Use float16 transpiler to speedup +float16_inference_program = float_inference_program.clone() +t = fluid.InferenceTranspiler() +t.float16_transpile(float16_inference_program, GPUPlace) + +# Running +float16_results = exe.run(float16_inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) +``` + +As we can see from the example above, users can simply use the `float16_transpile` method provided by the infernece transpiler class on an existing float inference program to run inference in float16 mode. + +### Speedup on GPU +Currently, Fluid inference in float16 mode is only supported on Nvidia GPU device. There is no motivation to support float16 inference on non-ARM CPUs because float16 is not natively supported there and float16 calculation will only be slower than its float counterpart. + +Nvidia started to support its native float16 data type (which has the same internal memory representation as Fluid float16 class) on CUDA 7.5. Moreover, float16 speedups on common computational intensive tasks including GEMM (general matrix-matrix multiplication) and convolution are supported since cublas 7.5 and cuDNN 5.0. + +Recently, the introduction of [tensor core](https://devblogs.nvidia.com/programming-tensor-cores-cuda-9/) in volta architecture GPUs and the support of tensor core calculation in CUDA 9.0 and cuDNN 7.0 make float16 truly superior to float in certain deep learning applications. Please refer to this [benchmark report](https://github.com/kexinzhao/Paddle_benchmark/blob/master/float16_benchmark.md) for more details. diff --git a/doc/v2/api/data/data_reader.rst b/doc/v2/api/data/data_reader.rst index d7c896a6270b488ca4449e5211d0d0879eda6ac5..1a35d0bbc8f9d751f49c7e1fc26feb1bcb3ae7f0 100644 --- a/doc/v2/api/data/data_reader.rst +++ b/doc/v2/api/data/data_reader.rst @@ -56,11 +56,11 @@ DataFeeder Reader ====== -.. automodule:: paddle.v2.reader +.. automodule:: paddle.reader :members: :noindex: -.. automodule:: paddle.v2.reader.creator +.. automodule:: paddle.reader.creator :members: :noindex: diff --git a/doc/v2/api/data/dataset.rst b/doc/v2/api/data/dataset.rst index 02e41564b1e48c07da6ac071fc4b60089169e05a..e7c8be4452bf55e0967d750c2e624e8e316e9330 100644 --- a/doc/v2/api/data/dataset.rst +++ b/doc/v2/api/data/dataset.rst @@ -1,82 +1,82 @@ Dataset ======= -.. automodule:: paddle.v2.dataset +.. automodule:: paddle.dataset :members: :noindex: mnist +++++ -.. automodule:: paddle.v2.dataset.mnist +.. automodule:: paddle.dataset.mnist :members: :noindex: cifar +++++ -.. automodule:: paddle.v2.dataset.cifar +.. automodule:: paddle.dataset.cifar :members: :noindex: conll05 +++++++ -.. automodule:: paddle.v2.dataset.conll05 +.. automodule:: paddle.dataset.conll05 :members: get_dict,get_embedding,test :noindex: imdb ++++ -.. automodule:: paddle.v2.dataset.imdb +.. automodule:: paddle.dataset.imdb :members: :noindex: imikolov ++++++++ -.. automodule:: paddle.v2.dataset.imikolov +.. automodule:: paddle.dataset.imikolov :members: :noindex: movielens +++++++++ -.. automodule:: paddle.v2.dataset.movielens +.. automodule:: paddle.dataset.movielens :members: :noindex: -.. autoclass:: paddle.v2.dataset.movielens.MovieInfo +.. autoclass:: paddle.dataset.movielens.MovieInfo :noindex: - -.. autoclass:: paddle.v2.dataset.movielens.UserInfo + +.. autoclass:: paddle.dataset.movielens.UserInfo :noindex: sentiment +++++++++ -.. automodule:: paddle.v2.dataset.sentiment +.. automodule:: paddle.dataset.sentiment :members: :noindex: uci_housing +++++++++++ -.. automodule:: paddle.v2.dataset.uci_housing +.. automodule:: paddle.dataset.uci_housing :members: :noindex: wmt14 +++++ -.. automodule:: paddle.v2.dataset.wmt14 +.. automodule:: paddle.dataset.wmt14 :members: :noindex: wmt16 +++++ -.. automodule:: paddle.v2.dataset.wmt16 +.. automodule:: paddle.dataset.wmt16 :members: :noindex: diff --git a/paddle/cuda/include/hl_base.h b/paddle/cuda/include/hl_base.h index 6c4f09dacb47c431db2e2610a3e61390a82dcba0..b979aa7723ea77fcae98a5be41d9b9dc488a68c3 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/cuda/include/hl_base.h @@ -228,6 +228,21 @@ extern __thread cudaStream_t default_stream; << "CUDA error: " << hl_get_device_error_string((size_t)err); \ } +// __shfl has been deprecated as of CUDA 9.0. +#if CUDA_VERSION < 9000 +template +__forceinline__ __device__ T +__shfl_sync(unsigned, T val, int src_line, int width) { + return __shfl(val, src_line, width); +} + +#define CREATE_SHFL_MASK(mask, predicate) mask = 0u; +#else +#define FULL_WARP_MASK 0xFFFFFFFF +#define CREATE_SHFL_MASK(mask, predicate) \ + mask = __ballot_sync(FULL_WARP_MASK, (predicate)) +#endif + #endif /* __NVCC__ */ #endif /* HL_BASE_H_ */ diff --git a/paddle/cuda/src/hl_cuda_lstm.cu b/paddle/cuda/src/hl_cuda_lstm.cu index 21c0c26b6ef0420b1a719736a66eeb8114ed9680..e30fcddffdf99417a4b9b811a0b0cb0a12e79b99 100644 --- a/paddle/cuda/src/hl_cuda_lstm.cu +++ b/paddle/cuda/src/hl_cuda_lstm.cu @@ -341,12 +341,15 @@ void hl_lstm_parallel_forward(real *gateValue, } __device__ __forceinline__ void transpose_32x32(real a[], const int idx) { - int addr = idx % 32; + const int warp_size = 32; + int addr = idx % warp_size; + unsigned mask = 0u; + CREATE_SHFL_MASK(mask, addr < warp_size); #pragma unroll for (int k = 1; k < 32; k++) { - // rSrc[k] = __shfl(rSrc[k], (threadIdx.x + k) % 32, 32); - addr = __shfl(addr, (idx + 1) % 32, 32); - a[k] = __shfl(a[k], addr, 32); + // rSrc[k] = __shfl_sync(rSrc[k], (threadIdx.x + k) % 32, 32); + addr = __shfl_sync(mask, addr, (idx + 1) % 32, 32); + a[k] = __shfl_sync(mask, a[k], addr, 32); } #pragma unroll @@ -360,10 +363,11 @@ __device__ __forceinline__ void transpose_32x32(real a[], const int idx) { } addr = (32 - idx) % 32; + CREATE_SHFL_MASK(mask, idx % 32 < warp_size); #pragma unroll for (int k = 0; k < 32; k++) { - a[k] = __shfl(a[k], addr, 32); - addr = __shfl(addr, (idx + 31) % 32, 32); + a[k] = __shfl_sync(mask, a[k], addr, 32); + addr = __shfl_sync(mask, addr, (idx + 31) % 32, 32); } } diff --git a/paddle/cuda/src/hl_top_k.cu b/paddle/cuda/src/hl_top_k.cu index fea8712a773b1524022f4bba626cf5044edebef6..59ba552f560dab904d4983e0778ff57be9477c3e 100644 --- a/paddle/cuda/src/hl_top_k.cu +++ b/paddle/cuda/src/hl_top_k.cu @@ -244,13 +244,16 @@ __device__ __forceinline__ void blockReduce(Pair* shTopK, if (--beamSize == 0) break; __syncthreads(); + unsigned mask = 0u; + // CREATE_SHFL_MASK(mask, tid < len); + if (tid == maxId[0]) { if (beam < maxLength) { shTopK[tid] = topK[beam]; } } if (maxId[0] / 32 == warp) { - if (__shfl(beam, (maxId[0]) % 32, 32) == maxLength) break; + if (__shfl_sync(mask, beam, (maxId[0]) % 32, 32) == maxLength) break; } } } diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.cc b/paddle/fluid/framework/details/broadcast_op_handle_test.cc index 3f2dcde3e9597287d72046dd4f8b07faab1ede25..8f1b6d1615312fced0887f9ff14ae17877371b7e 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.cc @@ -139,7 +139,7 @@ struct TestBroadcastOpHandle { PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal."); f::Tensor result_tensor; - f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor); + f::TensorCopySync(out_tensor, cpu_place, &result_tensor); float* ct = result_tensor.mutable_data(cpu_place); for (int64_t i = 0; i < f::product(kDims); ++i) { @@ -185,7 +185,7 @@ struct TestBroadcastOpHandle { } f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor); + f::TensorCopySync(rt, cpu_place, &result_tensor); float* ct = result_tensor.data(); for (int64_t i = 0; i < f::product(kDims); ++i) { diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 423449abff97dbf70d81314f852d9135e25f243f..1e8ca20b51d43554cf1898b41b31c27b90e6c642 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -66,8 +66,7 @@ void FetchOpHandle::RunImpl() { auto &t = var->Get(); if (platform::is_gpu_place(t.place())) { #ifdef PADDLE_WITH_CUDA - TensorCopy(t, cpu, *dev_ctxes_[t.place()], &tensors_[i], true); - dev_ctxes_.at(t.place())->Wait(); + TensorCopySync(t, cpu, &tensors_[i]); #endif } else { tensors_[i].ShareDataWith(t); diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 3413467b149539bcff42d78a9a6fe315d6558bb4..daba9bf2dd861d6366723ed71eada66bc67c78f2 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -34,7 +34,7 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( const std::vector &places, const std::string &loss_var_name, const std::unordered_set ¶ms, - const std::vector &local_scopes, bool skip_scale_loss, + const std::vector &local_scopes, bool use_default_grad_scale, platform::NCCLContextMap *nccl_ctxs) : loss_var_name_(loss_var_name), places_(places), @@ -45,7 +45,7 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( const std::vector &places, const std::string &loss_var_name, const std::unordered_set ¶ms, - const std::vector &local_scopes, bool skip_scale_loss) + const std::vector &local_scopes, bool use_default_grad_scale) : loss_var_name_(loss_var_name), places_(places), local_scopes_(local_scopes) { @@ -53,28 +53,25 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( for (auto &p : params) { grad_names_.insert(GradVarName(p)); } - skip_scale_loss_ = skip_scale_loss; + use_default_grad_scale_ = use_default_grad_scale; } void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, const OpDesc &op, - const platform::Place &p, - const size_t &i) const { + size_t place_id) const { + auto p = places_[place_id]; auto *op_handle = result->ops_.back().get(); op_handle->SetDeviceContext(p, platform::DeviceContextPool::Instance().Get(p)); - auto var_names = op.InputArgumentNames(); - - for (auto &each_var_name : var_names) { - VarHandle *var = CreateOrGetLatestVarHandle(result, each_var_name, p, i); + for (auto &each_var_name : op.InputArgumentNames()) { + VarHandle *var = + CreateOrGetLatestVarHandle(result, each_var_name, p, place_id); op_handle->AddInput(var); } - var_names = op.OutputArgumentNames(); - - for (auto &each_var_name : var_names) { - CreateOpOutput(result, op_handle, each_var_name, p, i); + for (auto &each_var_name : op.OutputArgumentNames()) { + CreateOpOutput(result, op_handle, each_var_name, p, place_id); } } @@ -84,17 +81,18 @@ bool MultiDevSSAGraphBuilder::IsDistTrainOp(const OpDesc &op, return false; } - auto checker = [&](const std::vector opvars, - const std::vector sendvars) -> bool { - bool is_dist_train_op = false; + /** + * Check any of opvars contains `.block` and in sendvars + */ + auto checker = [](const std::vector &opvars, + const std::vector &sendvars) -> bool { for (auto &var : opvars) { if (var.find(".block") != std::string::npos && std::find(sendvars.begin(), sendvars.end(), var) != sendvars.end()) { - is_dist_train_op = true; - break; + return true; } } - return is_dist_train_op; + return false; }; if (op.Type() == "split") { @@ -117,13 +115,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( places_.size()); // Find "send" op first for split is in front of send. - OpDesc *send_op = nullptr; - for (auto *op : program.Block(0).AllOps()) { - if (op->Type() == "send") { - send_op = op; - break; - } - } + OpDesc *send_op = GetSendOpDesc(program); bool is_forwarding = true; for (auto *op : program.Block(0).AllOps()) { @@ -134,7 +126,8 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( } else if (IsDistTrainOp(*op, send_op)) { CreateComputationalOps(&result, *op, 1); } else if (IsScaleLossOp(*op)) { - if (!skip_scale_loss_) { + // user can customize loss@grad if not use_default_grad_scale_ + if (use_default_grad_scale_) { CreateScaleLossGradOp(&result); } is_forwarding = false; @@ -142,10 +135,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( CreateComputationalOps(&result, *op, places_.size()); if (!is_forwarding) { // Currently, we assume that once gradient is generated, it can be - // broadcast, and each gradient is only broadcast once. But there are no - // other cases, for example, we need to adjust the gradient according to - // the input when we get the gradient, which is not considered at - // present. + // broadcast, and each gradient is only broadcast once. for (auto &og : op->OutputArgumentNames()) { if (IsParameterGradientOnce(og, &og_has_been_broadcast)) { InsertNCCLAllReduceOp(&result, og); @@ -175,6 +165,16 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( return std::unique_ptr(graph); } +OpDesc *MultiDevSSAGraphBuilder::GetSendOpDesc( + const ProgramDesc &program) const { + for (auto *op : program.Block(0).AllOps()) { + if (op->Type() == "send") { + return op; + } + } + return nullptr; +} + void MultiDevSSAGraphBuilder::InsertNCCLAllReduceOp( SSAGraph *result, const std::string &og) const { #ifdef PADDLE_WITH_CUDA @@ -243,7 +243,7 @@ void MultiDevSSAGraphBuilder::CreateComputationalOps(SSAGraph *result, auto p = places_[scope_idx]; auto s = local_scopes_[scope_idx]; result->ops_.emplace_back(new ComputationOpHandle(op, s, p)); - CreateOpHandleIOs(result, op, p, scope_idx); + CreateOpHandleIOs(result, op, scope_idx); } } @@ -255,7 +255,7 @@ void MultiDevSSAGraphBuilder::CreateSendOp(SSAGraph *result, result->ops_.emplace_back(new SendOpHandle(op, s, p)); // Create inputs for output on original place and no ssa output // is created for send op. - CreateOpHandleIOs(result, op, p, 0); + CreateOpHandleIOs(result, op, 0); } bool MultiDevSSAGraphBuilder::IsScaleLossOp(const OpDesc &op) const { diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.h b/paddle/fluid/framework/details/multi_devices_graph_builder.h index dc3da70eda2abaa1a312c25aedf94fa7e427c78a..bad47458ef4cd1cd42e902341e8be66da5c210ed 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.h +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.h @@ -41,14 +41,14 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { const std::string &loss_var_name, const std::unordered_set ¶ms, const std::vector &local_scopes, - bool skip_scale_loss); + bool use_default_grad_scale); #endif std::unique_ptr Build(const ProgramDesc &program) const override; private: void CreateOpHandleIOs(SSAGraph *result, const OpDesc &op, - const platform::Place &p, const size_t &i) const; + size_t place_id) const; private: std::string loss_var_name_; @@ -59,12 +59,15 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { #ifdef PADDLE_WITH_CUDA platform::NCCLContextMap *nccl_ctxs_; #endif - bool skip_scale_loss_; + bool use_default_grad_scale_; bool IsScaleLossOp(const OpDesc &op) const; void CreateSendOp(SSAGraph *result, const OpDesc &op) const; + /** + * Is this operator as the end-point operator before/after send operator. + */ bool IsDistTrainOp(const OpDesc &op, OpDesc *send_op) const; void CreateComputationalOps(SSAGraph *result, const OpDesc &op, @@ -77,6 +80,12 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { std::unordered_set *og_has_been_broadcast) const; void InsertNCCLAllReduceOp(SSAGraph *result, const std::string &og) const; + + /** + * Get send op in the global block of program. + * nullptr if not found. + */ + OpDesc *GetSendOpDesc(const ProgramDesc &program) const; }; } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/reduce_op_handle_test.cc b/paddle/fluid/framework/details/reduce_op_handle_test.cc index c17aabee53680fba10eac289cf8f8bd5f7d419e8..ffdd7c14eb5097cc8285da090e4a72e1e3f43d86 100644 --- a/paddle/fluid/framework/details/reduce_op_handle_test.cc +++ b/paddle/fluid/framework/details/reduce_op_handle_test.cc @@ -194,7 +194,7 @@ struct TestReduceOpHandle { } f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); + f::TensorCopySync(rt, cpu_place, &result_tensor); float *ct = result_tensor.data(); for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { @@ -239,7 +239,7 @@ struct TestReduceOpHandle { auto &rt = out_var->Get(); f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); + f::TensorCopySync(rt, cpu_place, &result_tensor); float *ct = result_tensor.data(); for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index 7a65ee62c9bfc0dad2ebee3be21de825fa405d73..1cd3113030086104e7fc5c4ba3364a5ff027632b 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -46,6 +46,7 @@ void ScaleLossGradOpHandle::RunImpl() { ->stream(); memory::Copy(boost::get(place_), tmp, platform::CPUPlace(), &coeff_, sizeof(float), stream); + VLOG(1) << place_ << "RUN Scale loss grad op"; }); #endif } diff --git a/paddle/fluid/framework/details/ssa_graph.h b/paddle/fluid/framework/details/ssa_graph.h index 72684e7f97f1324d0efba960903cf9f2acb618a4..e996a00c162186e47e77d007503ac67caa9f8024 100644 --- a/paddle/fluid/framework/details/ssa_graph.h +++ b/paddle/fluid/framework/details/ssa_graph.h @@ -25,12 +25,22 @@ namespace paddle { namespace framework { namespace details { +// A SSA graph used by parallel executor. struct SSAGraph { + // all variable in each devices. + // The outside vector is the device vector. Each element of this vector is a + // map from variable name to variables. The variables, who have the same name, + // will have a different version. The offset in the + // `std::vector>` is the version of varaibles. std::vector< std::unordered_map>>> vars_; + // aux variables to represent dependency. Useful to resolve data hazard. std::unordered_set> dep_vars_; + + // all operators. NOTE that even we use a vector here, the operators is + // unordered. std::vector> ops_; }; diff --git a/paddle/fluid/framework/details/ssa_graph_builder.h b/paddle/fluid/framework/details/ssa_graph_builder.h index be1f0460e45402806b18835f054a7195df1374cc..64e5d93081eb76c56898bbeb530e37364619fdbb 100644 --- a/paddle/fluid/framework/details/ssa_graph_builder.h +++ b/paddle/fluid/framework/details/ssa_graph_builder.h @@ -48,6 +48,8 @@ class SSAGraphBuilder { const platform::Place &place, size_t place_offset); + // Add an output variable (each_var_name, place, place_offset) to op_handle, + // which belongs to graph static void CreateOpOutput(SSAGraph *graph, OpHandleBase *op_handle, const std::string &each_var_name, const platform::Place &place, size_t place_offset); diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index b30f276b4b7c61fda1b40273ce6ccfa19738da41..85beae775b96c3b7e08a2795bcd0ec79b24faeb4 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -15,7 +15,6 @@ limitations under the License. */ #include #include #include -#include #include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/operator.h" @@ -31,6 +30,7 @@ std::once_flag p2p_init_flag; void InitGflags(std::vector argv) { std::call_once(gflags_init_flag, [&]() { + argv.insert(argv.begin(), "dummy"); int argc = argv.size(); char **arr = new char *[argv.size()]; std::string line; @@ -44,20 +44,23 @@ void InitGflags(std::vector argv) { }); } -void InitP2P(int count) { +void InitP2P(std::vector devices) { #ifdef PADDLE_WITH_CUDA std::call_once(p2p_init_flag, [&]() { + int count = devices.size(); for (int i = 0; i < count; ++i) { for (int j = 0; j < count; ++j) { - if (i == j) continue; + if (devices[i] == devices[j]) continue; int can_acess = -1; - PADDLE_ENFORCE(cudaDeviceCanAccessPeer(&can_acess, i, j), - "Failed to test P2P access."); + PADDLE_ENFORCE( + cudaDeviceCanAccessPeer(&can_acess, devices[i], devices[j]), + "Failed to test P2P access."); if (can_acess != 1) { - LOG(WARNING) << "Cannot enable P2P access from " << i << " to " << j; + LOG(WARNING) << "Cannot enable P2P access from " << devices[i] + << " to " << devices[j]; } else { - cudaSetDevice(i); - cudaDeviceEnablePeerAccess(j, 0); + cudaSetDevice(devices[i]); + cudaDeviceEnablePeerAccess(devices[j], 0); } } } @@ -67,11 +70,26 @@ void InitP2P(int count) { void InitDevices(bool init_p2p) { /*Init all available devices by default */ + std::vector devices; +#ifdef PADDLE_WITH_CUDA + try { + int count = platform::GetCUDADeviceCount(); + for (int i = 0; i < count; ++i) { + devices.push_back(i); + } + } catch (const std::exception &exp) { + LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime."; + } +#else + LOG(WARNING) + << "'CUDA' is not supported, Please re-compile with WITH_GPU option"; +#endif + InitDevices(init_p2p, devices); +} +void InitDevices(bool init_p2p, const std::vector devices) { std::vector places; - places.emplace_back(platform::CPUPlace()); int count = 0; - #ifdef PADDLE_WITH_CUDA try { count = platform::GetCUDADeviceCount(); @@ -83,12 +101,17 @@ void InitDevices(bool init_p2p) { << "'CUDA' is not supported, Please re-compile with WITH_GPU option"; #endif - for (int i = 0; i < count; ++i) { - places.emplace_back(platform::CUDAPlace(i)); + for (size_t i = 0; i < devices.size(); ++i) { + if (devices[i] >= count || devices[i] < 0) { + LOG(WARNING) << "Invalid devices id."; + continue; + } + places.emplace_back(platform::CUDAPlace(devices[i])); } if (init_p2p) { - InitP2P(count); + InitP2P(devices); } + places.emplace_back(platform::CPUPlace()); platform::DeviceContextPool::Init(places); } diff --git a/paddle/fluid/framework/init.h b/paddle/fluid/framework/init.h index 1155ca36049dc66e7ee40e8eca87285d7a728299..0e30594672927253cc8083dcb88bb867d63ec729 100644 --- a/paddle/fluid/framework/init.h +++ b/paddle/fluid/framework/init.h @@ -28,5 +28,7 @@ void InitGLOG(const std::string &prog_name); void InitDevices(bool init_p2p); +void InitDevices(bool init_p2p, const std::vector devices); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index de644e851999920251c762a75c050e8182e950c6..4712efeff68cf98a50b610acf78bb52d7519c520 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -58,7 +58,7 @@ ParallelExecutor::ParallelExecutor( const std::unordered_set &bcast_vars, const ProgramDesc &main_program, const std::string &loss_var_name, Scope *scope, const std::vector &local_scopes, bool allow_op_delay, - bool customize_scale_loss) + bool use_default_grad_scale) : member_(new ParallelExecutorPrivate(places)) { member_->global_scope_ = scope; @@ -93,11 +93,11 @@ ParallelExecutor::ParallelExecutor( #ifdef PADDLE_WITH_CUDA details::MultiDevSSAGraphBuilder builder( member_->places_, loss_var_name, params, member_->local_scopes_, - customize_scale_loss, member_->nccl_ctxs_.get()); + use_default_grad_scale, member_->nccl_ctxs_.get()); #else details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name, params, member_->local_scopes_, - customize_scale_loss); + use_default_grad_scale); #endif auto graph = builder.Build(main_program); diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index 49da123d98181c3d3abcdd64d14c5583142eba58..ecd107d81f8f5bf5d8b899d0c07797114a7ab767 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -40,7 +40,7 @@ class ParallelExecutor { const ProgramDesc& main_program, const std::string& loss_var_name, Scope* scope, const std::vector& local_scopes, - bool allow_op_delay, bool customize_scale_loss); + bool allow_op_delay, bool use_default_grad_scale); ~ParallelExecutor(); diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index d2e60ab1dd16758a91d22ef6872edc5053ef88b3..e5bc74755f46449296a153e8b330968e6d9f1e1d 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -20,7 +20,7 @@ namespace paddle { namespace framework { void TensorCopy(const Tensor& src, const platform::Place& dst_place, - const platform::DeviceContext& ctx, Tensor* dst, bool sync) { + const platform::DeviceContext& ctx, Tensor* dst) { VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to " << dst_place; src.check_memory_size(); @@ -48,9 +48,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto ctx_gpu_place = boost::get(ctx_place); PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); auto stream = - sync ? nullptr - : reinterpret_cast(ctx) - .stream(); + reinterpret_cast(ctx).stream(); memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); } else if (platform::is_cpu_place(src_place) && platform::is_gpu_place(dst_place)) { @@ -61,9 +59,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto ctx_gpu_place = boost::get(ctx_place); PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place); auto stream = - sync ? nullptr - : reinterpret_cast(ctx) - .stream(); + reinterpret_cast(ctx).stream(); memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream); } else if (platform::is_gpu_place(src_place) && platform::is_gpu_place(dst_place)) { @@ -72,9 +68,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto ctx_place = ctx.GetPlace(); PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); auto stream = - sync ? nullptr - : reinterpret_cast(ctx) - .stream(); + reinterpret_cast(ctx).stream(); memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); } #endif @@ -92,6 +86,41 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, TensorCopy(src, dst_place, *dev_ctx, dst); } +void TensorCopySync(const Tensor& src, const platform::Place& dst_place, + Tensor* dst) { + VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place() + << " to " << dst_place; + src.check_memory_size(); + dst->Resize(src.dims()); + dst->set_layout(src.layout()); + auto src_place = src.place(); + auto src_ptr = src.data(); + auto dst_ptr = dst->mutable_data(dst_place, src.type()); + auto size = src.numel() * SizeOfType(src.type()); + if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { + memory::Copy(boost::get(dst_place), dst_ptr, + boost::get(src_place), src_ptr, size); + } +#ifdef PADDLE_WITH_CUDA + else if (platform::is_gpu_place(src_place) && // NOLINT + platform::is_cpu_place(dst_place)) { + auto src_gpu_place = boost::get(src_place); + auto dst_cpu_place = boost::get(dst_place); + memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr); + } else if (platform::is_cpu_place(src_place) && + platform::is_gpu_place(dst_place)) { + auto src_cpu_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); + memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, nullptr); + } else if (platform::is_gpu_place(src_place) && + platform::is_gpu_place(dst_place)) { + auto src_gpu_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); + memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr); + } +#endif +} + template struct AnyDTypeVisitor { Predicate predicate_; diff --git a/paddle/fluid/framework/tensor_util.h b/paddle/fluid/framework/tensor_util.h index 3af68402dc56230171e858bf8f8f8c89c2bfe760..dca279b69382b80e055f661cefe84b81326704b5 100644 --- a/paddle/fluid/framework/tensor_util.h +++ b/paddle/fluid/framework/tensor_util.h @@ -24,10 +24,11 @@ namespace paddle { namespace framework { void TensorCopy(const Tensor& src, const platform::Place& dst_place, - const platform::DeviceContext& ctx, Tensor* dst, - bool sync = false); + const platform::DeviceContext& ctx, Tensor* dst); void TensorCopy(const Tensor& src, const platform::Place& dst_place, Tensor* dst); +void TensorCopySync(const Tensor& src, const platform::Place& dst_place, + Tensor* dst); template void TensorFromVector(const std::vector& src, diff --git a/paddle/fluid/inference/engine.h b/paddle/fluid/inference/engine.h index 0633c052e4dc61d495b9acacb9a7ab6c941a267a..6b0ac92fa908427a89a6a5fa74dacc3b24abd1c3 100644 --- a/paddle/fluid/inference/engine.h +++ b/paddle/fluid/inference/engine.h @@ -46,7 +46,6 @@ class EngineBase { virtual void Execute(int batch_size) = 0; virtual ~EngineBase() {} - }; // class EngineBase } // namespace inference diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 78d2f16746cf478c4424df929bd1f62b08f8a67c..65db7c7b5008dcb301e741ec17c3623715e10bb8 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -16,17 +16,29 @@ limitations under the License. */ #include #include +#include #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/pybind/pybind.h" +DEFINE_string(devices, "", "The devices to be used which is joined by comma."); +DEFINE_bool(init_p2p, false, "Whether to init p2p."); + namespace paddle { namespace inference { -// Temporarily add this function for exposing framework::InitDevices() when -// linking the inference shared library. -void Init(bool init_p2p) { framework::InitDevices(init_p2p); } +void Init(const std::vector argv) { + framework::InitGflags(argv); + // init devices + std::vector devices; + std::string token; + std::istringstream tokenStream(FLAGS_devices); + while (std::getline(tokenStream, token, ',')) { + devices.push_back(std::stoi(token)); + } + framework::InitDevices(FLAGS_init_p2p, devices); +} void ReadBinaryFile(const std::string& filename, std::string* contents) { std::ifstream fin(filename, std::ios::in | std::ios::binary); diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index ba3e45099ae7c1626bf11d9527d4fa4c7f772fec..caf599b1a68783f155cd134c2a29e9ffa49a0895 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -25,7 +25,7 @@ limitations under the License. */ namespace paddle { namespace inference { -void Init(bool init_p2p); +void Init(const std::vector argv); void LoadPersistables(framework::Executor* executor, framework::Scope* scope, const framework::ProgramDesc& main_program, diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 276502e4999df3aecf246fcb9591c59ea9d3f02b..03a25f8e8b52b452cb621aeddf3563cc21a033df 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include +#include #include "paddle/fluid/inference/tensorrt/helper.h" #include "paddle/fluid/platform/enforce.h" diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index ff853455b8bf0c9db36353f3cbe6be0ed4948fb3..82d8c3df4ece7e56a72b650f8ea58f3953af3b64 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -16,7 +16,9 @@ limitations under the License. */ #include #include +#include #include +#include #include "paddle/fluid/inference/engine.h" #include "paddle/fluid/inference/tensorrt/helper.h" @@ -56,9 +58,9 @@ class TensorRTEngine : public EngineBase { virtual ~TensorRTEngine(); // TODO(Superjomn) implement it later when graph segmentation is supported. - virtual void Build(const DescType& paddle_model) override; + void Build(const DescType& paddle_model) override; - virtual void Execute(int batch_size) override; + void Execute(int batch_size) override; // Initialize the inference network, so that TensorRT layers can add to this // network. diff --git a/paddle/fluid/inference/tensorrt/test_engine.cc b/paddle/fluid/inference/tensorrt/test_engine.cc index f3dbdf11f217ca35bc541b9bdb95bf0c06377fd3..c6e1c71cdc8eea48ab6197057d1d389f03c9cf54 100644 --- a/paddle/fluid/inference/tensorrt/test_engine.cc +++ b/paddle/fluid/inference/tensorrt/test_engine.cc @@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/inference/tensorrt/engine.h" - #include #include #include #include +#include "paddle/fluid/inference/tensorrt/engine.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { @@ -65,7 +64,8 @@ TEST_F(TensorRTEngineTest, add_layer) { // fill in real data float x_v = 1234; - engine_->SetInputFromCPU("x", (void*)&x_v, 1 * sizeof(float)); + engine_->SetInputFromCPU("x", reinterpret_cast(&x_v), + 1 * sizeof(float)); LOG(INFO) << "to execute"; engine_->Execute(1); diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index 1e6555bb02033a28dedd2a1d1962981dfcc97cc2..1a685b9e2ebcd7d4b5a057b506bccf6adcd9952c 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -62,5 +62,21 @@ TEST(inference, image_classification) { LOG(INFO) << output2.dims(); CheckError(output1, output2); + + // float16 inference requires cuda GPUs with >= 5.3 compute capability + if (paddle::platform::GetCUDAComputeCapability(0) >= 53) { + paddle::framework::LoDTensor output3; + std::vector cpu_fetchs3; + cpu_fetchs3.push_back(&output3); + + LOG(INFO) << "--- GPU Runs in float16 mode: ---"; + std::string fp16_dirname = dirname; + fp16_dirname.replace(fp16_dirname.find("book/"), + std::string("book/").size(), "book/float16_"); + TestInference( + fp16_dirname, cpu_feeds, cpu_fetchs3, FLAGS_repeat); + + CheckError(output2, output3); + } #endif } diff --git a/paddle/fluid/operators/accuracy_op.cu b/paddle/fluid/operators/accuracy_op.cu index 630a4a2df2ca8f6afe81be3c455d255a0693fcc3..23b48c6fdf427348879de07c671c65327d6436d7 100644 --- a/paddle/fluid/operators/accuracy_op.cu +++ b/paddle/fluid/operators/accuracy_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/operators/accuracy_op.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { diff --git a/paddle/fluid/operators/adagrad_op.cu b/paddle/fluid/operators/adagrad_op.cu index e798101ca6a3a44de749a2d2219295bd8911dfac..b25268786d622bc7a94117849763833e528bef48 100644 --- a/paddle/fluid/operators/adagrad_op.cu +++ b/paddle/fluid/operators/adagrad_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/adagrad_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adam_op.h b/paddle/fluid/operators/adam_op.h index b332b6716369ca355454dc57fbd6cc2cbc71c658..f82ff47b52490c354f383515d430d14e24cbf6af 100644 --- a/paddle/fluid/operators/adam_op.h +++ b/paddle/fluid/operators/adam_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include // for sqrt in CPU and CUDA +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" @@ -24,8 +25,14 @@ namespace operators { namespace scatter = paddle::operators::math::scatter; +struct GPUAdam; +struct CPUAdam; + +template +struct AdamFunctor; + template -struct AdamFunctor { +struct AdamFunctor { T beta1_; T beta2_; T epsilon_; @@ -71,6 +78,7 @@ struct AdamFunctor { // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); @@ -82,6 +90,71 @@ struct AdamFunctor { } }; +template +struct AdamFunctor { + T beta1_; + T beta2_; + T epsilon_; + + const T* beta1_pow_; + const T* beta2_pow_; + const T* moment1_; + T* moment1_out_; + const T* moment2_; + T* moment2_out_; + const T* lr_; + const T* grad_; + const T* param_; + T* param_out_; + + AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, + const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, + T* mom2_out, const T* lr, const T* grad, const T* param, + T* param_out) + : beta1_(beta1), + beta2_(beta2), + epsilon_(epsilon), + beta1_pow_(beta1_pow), + beta2_pow_(beta2_pow), + moment1_(mom1), + moment1_out_(mom1_out), + moment2_(mom2), + moment2_out_(mom2_out), + lr_(lr), + grad_(grad), + param_(param), + param_out_(param_out) {} + + void operator()(size_t numel) const { + Eigen::Map> g{ + grad_, static_cast(numel)}; + Eigen::Map> mom1{ + moment1_, static_cast(numel)}; + Eigen::Map> mom2{ + moment2_, static_cast(numel)}; + Eigen::Map> param{ + param_, static_cast(numel)}; + + Eigen::Map> param_out{ + param_out_, static_cast(numel)}; + Eigen::Map> moment1_out{ + moment1_out_, static_cast(numel)}; + Eigen::Map> moment2_out{ + moment2_out_, static_cast(numel)}; + + T lr = *lr_; + T beta1_pow = *beta1_pow_; + T beta2_pow = *beta2_pow_; + + // Calculation + lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + + moment1_out = beta1_ * mom1 + (1 - beta1_) * g; + moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g; + param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_)); + } +}; + template struct SparseAdamFunctor { T beta1_; @@ -134,6 +207,7 @@ struct SparseAdamFunctor { T p = param_[rows_[i] * row_numel_ + j]; lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); @@ -177,19 +251,34 @@ class AdamOpKernel : public framework::OpKernel { if (grad_var->IsType()) { auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); - AdamFunctor functor( - beta1, beta2, epsilon, beta1_pow.template data(), - beta2_pow.template data(), mom1.template data(), - mom1_out.template mutable_data(ctx.GetPlace()), - mom2.template data(), - mom2_out.template mutable_data(ctx.GetPlace()), - lr.template data(), grad.template data(), - param.template data(), - param_out.template mutable_data(ctx.GetPlace())); - platform::ForRange for_range( - static_cast(ctx.device_context()), - param.numel()); - for_range(functor); + + if (platform::is_cpu_place(ctx.GetPlace())) { + AdamFunctor functor( + beta1, beta2, epsilon, beta1_pow.template data(), + beta2_pow.template data(), mom1.template data(), + mom1_out.template mutable_data(ctx.GetPlace()), + mom2.template data(), + mom2_out.template mutable_data(ctx.GetPlace()), + lr.template data(), grad.template data(), + param.template data(), + param_out.template mutable_data(ctx.GetPlace())); + functor(param.numel()); + } else if (platform::is_gpu_place(ctx.GetPlace())) { + AdamFunctor functor( + beta1, beta2, epsilon, beta1_pow.template data(), + beta2_pow.template data(), mom1.template data(), + mom1_out.template mutable_data(ctx.GetPlace()), + mom2.template data(), + mom2_out.template mutable_data(ctx.GetPlace()), + lr.template data(), grad.template data(), + param.template data(), + param_out.template mutable_data(ctx.GetPlace())); + + platform::ForRange for_range( + static_cast(ctx.device_context()), + param.numel()); + for_range(functor); + } } else if (grad_var->IsType()) { auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); diff --git a/paddle/fluid/operators/beam_search_op.cc b/paddle/fluid/operators/beam_search_op.cc index fdab4e92f47c7c8f241d93268a73dcb8c2eb2dc6..cff097cca13f3b92c7efe4b69259fdf7c75b3760 100644 --- a/paddle/fluid/operators/beam_search_op.cc +++ b/paddle/fluid/operators/beam_search_op.cc @@ -195,10 +195,9 @@ std::string ItemToString(const BeamSearch::Item &item) { return stream.str(); } -class BeamSearchProtoAndCheckerMaker - : public framework::OpProtoAndCheckerMaker { +class BeamSearchOpMaker : public framework::OpProtoAndCheckerMaker { public: - BeamSearchProtoAndCheckerMaker(OpProto *proto, OpAttrChecker *op_checker) + BeamSearchOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { // inputs and outputs stored in proto AddInput("pre_ids", "ids in previous step"); @@ -222,20 +221,32 @@ class BeamSearchProtoAndCheckerMaker } }; -class BeamSearchInferShape : public framework::InferShapeBase { +class BeamSearchOp : public framework::OperatorWithKernel { public: - void operator()(framework::InferShapeContext *context) const override { + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext *ctx) const override { for (const std::string &arg : std::vector({"pre_ids", "ids", "scores"})) { - PADDLE_ENFORCE(context->HasInput(arg), - "BeamSearch need input argument '%s'", arg); + PADDLE_ENFORCE(ctx->HasInput(arg), "BeamSearch need input argument '%s'", + arg); } for (const std::string &arg : std::vector({"selected_ids", "selected_scores"})) { - PADDLE_ENFORCE(context->HasOutput(arg), + PADDLE_ENFORCE(ctx->HasOutput(arg), "BeamSearch need output argument '%s'", arg); } } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + framework::OpKernelType kt = framework::OpKernelType( + framework::ToDataType( + ctx.Input("pre_ids")->type()), + platform::CPUPlace()); + return kt; + } }; class BeamSearchInferVarType : public framework::VarTypeInference { @@ -254,8 +265,13 @@ class BeamSearchInferVarType : public framework::VarTypeInference { } // namespace operators } // namespace paddle -REGISTER_OPERATOR(beam_search, paddle::operators::BeamSearchOp, - paddle::operators::BeamSearchProtoAndCheckerMaker, - paddle::operators::BeamSearchInferShape, - paddle::operators::BeamSearchInferVarType, - paddle::framework::EmptyGradOpMaker); +namespace ops = paddle::operators; + +REGISTER_OPERATOR(beam_search, ops::BeamSearchOp, ops::BeamSearchOpMaker, + ops::BeamSearchInferVarType); +REGISTER_OP_CPU_KERNEL( + beam_search, + ops::BeamSearchOpKernel, + ops::BeamSearchOpKernel, + ops::BeamSearchOpKernel, + ops::BeamSearchOpKernel); diff --git a/paddle/fluid/operators/beam_search_op.h b/paddle/fluid/operators/beam_search_op.h index 0a481a85ce6fbb582b8c0e12710455aaaac72aa1..9b51db8a45186c2a90cf8b2eb7966d0aaea04028 100644 --- a/paddle/fluid/operators/beam_search_op.h +++ b/paddle/fluid/operators/beam_search_op.h @@ -192,49 +192,29 @@ std::ostream& operator<<(std::ostream& os, const BeamSearch::Item& item); std::string ItemToString(const BeamSearch::Item& item); -class BeamSearchOp : public framework::OperatorBase { +template +class BeamSearchOpKernel : public framework::OpKernel { public: - BeamSearchOp(const std::string& type, - const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs) - : OperatorBase(type, inputs, outputs, attrs) {} - - BeamSearchOp(const BeamSearchOp& o) - : framework::OperatorBase( - static_cast(o)) { - PADDLE_THROW("Not Implemented"); - } - - private: - void RunImpl(const framework::Scope& scope, - const platform::Place& dev_place) const override { - auto ids_var = scope.FindVar(Input("ids")); - auto scores_var = scope.FindVar(Input("scores")); - auto pre_ids_var = scope.FindVar(Input("pre_ids")); + void Compute(const framework::ExecutionContext& context) const override { + auto* ids_var = context.Input("ids"); + auto* scores_var = context.Input("scores"); + auto* pre_ids_var = context.Input("pre_ids"); PADDLE_ENFORCE_NOT_NULL(ids_var); PADDLE_ENFORCE_NOT_NULL(scores_var); PADDLE_ENFORCE_NOT_NULL(pre_ids_var); - auto& ids = ids_var->Get(); - auto& scores = scores_var->Get(); - auto& pre_ids = pre_ids_var->Get(); - size_t level = Attr("level"); - size_t beam_size = Attr("beam_size"); - int end_id = Attr("end_id"); - BeamSearch alg(ids, scores, level, beam_size, end_id); - - auto selected_ids_var = scope.FindVar(Output("selected_ids")); - auto selected_scores_var = scope.FindVar(Output("selected_scores")); + size_t level = context.Attr("level"); + size_t beam_size = context.Attr("beam_size"); + int end_id = context.Attr("end_id"); + BeamSearch alg(*ids_var, *scores_var, level, beam_size, end_id); + auto selected_ids_var = + context.Output("selected_ids"); + auto selected_scores_var = + context.Output("selected_scores"); PADDLE_ENFORCE_NOT_NULL(selected_ids_var); PADDLE_ENFORCE_NOT_NULL(selected_scores_var); - auto& selected_ids_tensor = - *selected_ids_var->GetMutable(); - auto& selected_scores_tensor = - *selected_scores_var->GetMutable(); - alg(pre_ids, &selected_ids_tensor, &selected_scores_tensor); + alg(*pre_ids_var, selected_ids_var, selected_scores_var); } }; - } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/bilinear_interp_op.cu b/paddle/fluid/operators/bilinear_interp_op.cu index 82eb9e83bd84e6ec6881facbb2fac0aebce93d55..510190f1aaf02960284216a1bedd409011088499 100644 --- a/paddle/fluid/operators/bilinear_interp_op.cu +++ b/paddle/fluid/operators/bilinear_interp_op.cu @@ -10,7 +10,7 @@ limitations under the License. */ #include "paddle/fluid/operators/bilinear_interp_op.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/box_coder_op.cu b/paddle/fluid/operators/box_coder_op.cu index 0944e9c95d4a66cc4a51751a8c70cd7a3fefaf1a..708c7a5fa96c2f9ce6a2d913ca5f30126bb6192f 100644 --- a/paddle/fluid/operators/box_coder_op.cu +++ b/paddle/fluid/operators/box_coder_op.cu @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/box_coder_op.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index 92c8ab6d9ff11ec6acd46a39877eb67d624748a9..1b1b8bf5ed959dd9c2ce8c9f5c905a75b81865fd 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -87,7 +87,7 @@ class ConcatGradKernel : public framework::OpKernel { auto& dev_ctx = ctx.template device_context(); paddle::operators::math::ConcatGradFunctor concat_grad_functor; - concat_grad_functor(dev_ctx, *in, static_cast(axis), outputs); + concat_grad_functor(dev_ctx, *in, static_cast(axis), &outputs); } } }; diff --git a/paddle/fluid/operators/concurrency/channel_util.cc b/paddle/fluid/operators/concurrency/channel_util.cc index 246c99489c45efec16babb1d3980606318236605..fba4abf1897bceea615222b2438700085ed8e551 100644 --- a/paddle/fluid/operators/concurrency/channel_util.cc +++ b/paddle/fluid/operators/concurrency/channel_util.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "channel_util.h" +#include "paddle/fluid/operators/concurrency/channel_util.h" #include "paddle/fluid/framework/var_type.h" namespace poc = paddle::operators::concurrency; diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index c70e3cc3c9198008d9eca5f462000aa67ff7e5ba..cf410c3ca16955620610634b99ee9111106ef99f 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -20,6 +20,11 @@ limitations under the License. */ #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" +DEFINE_bool(cudnn_algo_use_autotune, true, + "Whether allow using an autotuning algorithm for convolution " + "operator. The autotuning algorithm may be non-deterministic. If " + "false, the algorithm is deterministic."); + namespace paddle { namespace operators { @@ -267,17 +272,23 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { auto& dev_ctx = ctx.template device_context(); auto handle = dev_ctx.cudnn_handle(); if (input_grad) { - PADDLE_ENFORCE( - platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( - handle, cudnn_filter_desc, - // dyDesc: Handle to the previously initialized input differential - // tensor descriptor. - cudnn_output_grad_desc, cudnn_conv_desc, - // dxDesc: Handle to the previously initialized output tensor - // descriptor. - cudnn_input_desc, - CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT, - workspace_size_limit, &data_algo)); + if (FLAGS_cudnn_algo_use_autotune) { + PADDLE_ENFORCE( + platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( + handle, cudnn_filter_desc, + // dyDesc: Handle to the previously initialized input + // differential + // tensor descriptor. + cudnn_output_grad_desc, cudnn_conv_desc, + // dxDesc: Handle to the previously initialized output tensor + // descriptor. + cudnn_input_desc, + CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT, + workspace_size_limit, &data_algo)); + } else { + data_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; + } + PADDLE_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize( handle, cudnn_filter_desc, cudnn_output_grad_desc, @@ -286,12 +297,16 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { } if (filter_grad) { - PADDLE_ENFORCE( - platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( - handle, cudnn_input_desc, cudnn_output_grad_desc, cudnn_conv_desc, - cudnn_filter_desc, - CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT, - workspace_size_limit, &filter_algo)); + if (FLAGS_cudnn_algo_use_autotune) { + PADDLE_ENFORCE( + platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( + handle, cudnn_input_desc, cudnn_output_grad_desc, + cudnn_conv_desc, cudnn_filter_desc, + CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT, + workspace_size_limit, &filter_algo)); + } else { + filter_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; + } PADDLE_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize( diff --git a/paddle/fluid/operators/conv_shift_op.cu b/paddle/fluid/operators/conv_shift_op.cu index 344bbade7055aa8e0aede61dd31dab246bddd169..314d33310588ed960eecaf1a0319ebf56d925c55 100644 --- a/paddle/fluid/operators/conv_shift_op.cu +++ b/paddle/fluid/operators/conv_shift_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/conv_shift_op.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 8cee46cbb2d6a1002864916e250fb7ab30f91430..95f4738b4ff50852d9591719133ca650533bf848 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -30,9 +30,13 @@ enum CallStatus { PROCESS = 0, FINISH }; class RequestBase { public: explicit RequestBase(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, const platform::DeviceContext* dev_ctx) - : service_(service), cq_(cq), status_(PROCESS), dev_ctx_(dev_ctx) { + : service_(service), + cq_(cq), + sync_mode_(sync_mode), + status_(PROCESS), + dev_ctx_(dev_ctx) { PADDLE_ENFORCE(cq_); } virtual ~RequestBase() {} @@ -49,6 +53,7 @@ class RequestBase { ::grpc::ServerContext ctx_; GrpcService::AsyncService* service_; ::grpc::ServerCompletionQueue* cq_; + const bool sync_mode_; CallStatus status_; const platform::DeviceContext* dev_ctx_; }; @@ -56,11 +61,17 @@ class RequestBase { class RequestSend final : public RequestBase { public: explicit RequestSend(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, framework::Scope* scope, ReceivedQueue* queue, const platform::DeviceContext* dev_ctx) - : RequestBase(service, cq, dev_ctx), queue_(queue), responder_(&ctx_) { - request_.reset(new VariableResponse(scope, dev_ctx_)); + : RequestBase(service, cq, sync_mode, dev_ctx), + queue_(queue), + responder_(&ctx_) { + if (sync_mode_) { + request_.reset(new VariableResponse(scope, dev_ctx_, false)); + } else { + request_.reset(new VariableResponse(scope, dev_ctx_, true)); + } int method_id = static_cast(detail::GrpcMethod::kSendVariable); service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, cq_, cq_, this); @@ -87,11 +98,11 @@ class RequestSend final : public RequestBase { class RequestGet final : public RequestBase { public: explicit RequestGet(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, framework::Scope* scope, const platform::DeviceContext* dev_ctx, framework::BlockingQueue* queue) - : RequestBase(service, cq, dev_ctx), + : RequestBase(service, cq, sync_mode, dev_ctx), responder_(&ctx_), scope_(scope), queue_(queue) { @@ -134,19 +145,23 @@ class RequestGet final : public RequestBase { class RequestPrefetch final : public RequestBase { public: explicit RequestPrefetch(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, framework::Scope* scope, const platform::DeviceContext* dev_ctx, framework::Executor* executor, framework::ProgramDesc* program, framework::ExecutorPrepareContext* prefetch_ctx) - : RequestBase(service, cq, dev_ctx), + : RequestBase(service, cq, sync_mode, dev_ctx), responder_(&ctx_), scope_(scope), executor_(executor), program_(program), prefetch_ctx_(prefetch_ctx) { - request_.reset(new VariableResponse(scope, dev_ctx_)); + if (sync_mode_) { + request_.reset(new VariableResponse(scope, dev_ctx_, false)); + } else { + request_.reset(new VariableResponse(scope, dev_ctx_, true)); + } int method_id = static_cast(detail::GrpcMethod::kPrefetchVariable); service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, cq_, cq_, this); @@ -181,7 +196,6 @@ class RequestPrefetch final : public RequestBase { framework::Executor* executor_; framework::ProgramDesc* program_; framework::ExecutorPrepareContext* prefetch_ctx_; - int blkid_; }; void AsyncGRPCServer::WaitClientGet(int count) { @@ -254,8 +268,8 @@ void AsyncGRPCServer::TryToRegisterNewSendOne() { VLOG(3) << "shutdown, do not TryToRegisterNewSendOne"; return; } - RequestSend* send = new RequestSend(&service_, cq_send_.get(), scope_, - &var_recv_queue_, dev_ctx_); + RequestSend* send = new RequestSend(&service_, cq_send_.get(), sync_mode_, + scope_, &var_recv_queue_, dev_ctx_); VLOG(4) << "Create RequestSend status:" << send->Status(); } @@ -265,8 +279,8 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() { VLOG(3) << "shutdown, do not TryToRegisterNewGetOne"; return; } - RequestGet* get = new RequestGet(&service_, cq_get_.get(), scope_, dev_ctx_, - &var_get_queue_); + RequestGet* get = new RequestGet(&service_, cq_get_.get(), sync_mode_, scope_, + dev_ctx_, &var_get_queue_); VLOG(4) << "Create RequestGet status:" << get->Status(); } @@ -277,8 +291,8 @@ void AsyncGRPCServer::TryToRegisterNewPrefetchOne() { return; } RequestPrefetch* prefetch = - new RequestPrefetch(&service_, cq_prefetch_.get(), scope_, dev_ctx_, - executor_, program_, prefetch_ctx_); + new RequestPrefetch(&service_, cq_prefetch_.get(), sync_mode_, scope_, + dev_ctx_, executor_, program_, prefetch_ctx_); VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); } @@ -301,9 +315,11 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, VLOG(3) << "HandleRequest for " << cq_name << " while after Next"; PADDLE_ENFORCE(tag); - // FIXME(typhoonzero): de-couple the barriers with recv_op - if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); - if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); + if (sync_mode_) { + // FIXME(typhoonzero): de-couple the barriers with recv_op + if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); + if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); + } RequestBase* base = reinterpret_cast(tag); // reference: @@ -320,13 +336,13 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, switch (base->Status()) { case PROCESS: { - VLOG(4) << cq_name << " status:" << base->Status(); + VLOG(4) << cq_name << " PROCESS status:" << base->Status(); TryToRegisterNewOne(); base->Process(); break; } case FINISH: { - VLOG(4) << cq_name << " status:" << base->Status(); + VLOG(4) << cq_name << " FINISH status:" << base->Status(); delete base; break; } diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index a15c93b7830265a2bb22334b5bb5a0f8ee2f28f4..99b87b8c6cb3e597778b88c395e4abf400d82c39 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -44,7 +44,8 @@ class RequestBase; class AsyncGRPCServer final { public: - explicit AsyncGRPCServer(const std::string &address) : address_(address) {} + explicit AsyncGRPCServer(const std::string &address, bool sync_mode) + : address_(address), sync_mode_(sync_mode) {} void RunSyncUpdate(); @@ -95,6 +96,7 @@ class AsyncGRPCServer final { std::unique_ptr<::grpc::Server> server_; std::string address_; + const bool sync_mode_; framework::Scope *scope_; const platform::DeviceContext *dev_ctx_; diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc index c51933718f4ca78e87c77e007c485642000d247d..25b95d608d10d6e456d5f563ce9fbe35d812cb0f 100644 --- a/paddle/fluid/operators/detail/grpc_server_test.cc +++ b/paddle/fluid/operators/detail/grpc_server_test.cc @@ -89,7 +89,7 @@ void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place, } void StartServer(const std::string& endpoint) { - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, true)); framework::ProgramDesc program; framework::Scope scope; platform::CPUPlace place; diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/detail/variable_response.h index 3018a5c4af876828380ff4c1cbfdaafa8a2057e1..bf624da2a6c26472e47711b3c6409f78afba0a64 100644 --- a/paddle/fluid/operators/detail/variable_response.h +++ b/paddle/fluid/operators/detail/variable_response.h @@ -46,7 +46,9 @@ class VariableResponse { } virtual ~VariableResponse() { - if (create_scope_) scope_->DeleteScope(local_scope_); + if (create_scope_) { + scope_->DeleteScope(local_scope_); + } } // return: @@ -63,6 +65,8 @@ class VariableResponse { const framework::Scope& GetLocalScope() const { return *local_scope_; } + framework::Scope* GetMutableLocalScope() const { return local_scope_; } + inline std::string Varname() { return meta_.varname(); } inline std::string OutVarname() { return meta_.out_varname(); } diff --git a/paddle/fluid/operators/edit_distance_op.cu b/paddle/fluid/operators/edit_distance_op.cu index 913a9145420dae7c4f6a4df10c0330636b5796b0..c25b7d2f9ec32bcef44db239de43feefd855bfe5 100644 --- a/paddle/fluid/operators/edit_distance_op.cu +++ b/paddle/fluid/operators/edit_distance_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/edit_distance_op.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index f0362ec606c994d69f31c7a2e1e9ad0d0108b621..953aedc85064ee803ab02afd427a5a6f22096f94 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -22,6 +22,7 @@ limitations under the License. */ #ifdef __NVCC__ #include #include +#include "paddle/fluid/platform/cuda_primitives.h" constexpr int ELEMWISE_MAX_BLOCK_DIM = 1024; #endif @@ -333,24 +334,12 @@ static void ElemwiseGradBroadcast1CPU(const T* x, const T* y, const T* out, } } } -#ifdef __NVCC__ -// __shfl_down has been deprecated as of CUDA 9.0. -#if CUDA_VERSION < 9000 -template -__forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) { - return __shfl_down(val, delta); -} -#define CREATE_SHFL_MASK(mask, predicate) mask = 0u; -#else -#define FULL_WARP_MASK 0xFFFFFFFF -#define CREATE_SHFL_MASK(mask, predicate) \ - mask = __ballot_sync(FULL_WARP_MASK, (predicate)) -#endif +#ifdef __NVCC__ template __device__ T reduceSum(T val, int tid, int len) { - // TODO(zcd): The warp size should be taken from the + // NOTE(zcd): The warp size should be taken from the // parameters of the GPU but not specified as 32 simply. // To make the reduceSum more efficiently, // I use Warp-Level Parallelism and assume the Warp size @@ -362,7 +351,7 @@ __device__ T reduceSum(T val, int tid, int len) { CREATE_SHFL_MASK(mask, tid < len); for (int offset = warpSize / 2; offset > 0; offset /= 2) - val += __shfl_down_sync(mask, val, offset); + val += platform::__shfl_down_sync(mask, val, offset); if (tid < warpSize) shm[tid] = 0; @@ -378,7 +367,7 @@ __device__ T reduceSum(T val, int tid, int len) { if (tid < warpSize) { val = shm[tid]; for (int offset = warpSize / 2; offset > 0; offset /= 2) - val += __shfl_down_sync(mask, val, offset); + val += platform::__shfl_down_sync(mask, val, offset); } return val; diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index 7c7f3e9059fbb1e3f2cca4f04edfff55c9452761..18deec58137676a0b2c8d559e49d0f7a840cd5ba 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -57,10 +57,7 @@ class FetchOp : public framework::OperatorBase { // FIXME(yuyang18): Should we assume the fetch operator always generate // CPU outputs? - auto &dev_ctx = *pool.Get(src_item.place()); - - TensorCopy(src_item, platform::CPUPlace(), dev_ctx, &dst_item); - dev_ctx.Wait(); + TensorCopySync(src_item, platform::CPUPlace(), &dst_item); dst_item.set_lod(src_item.lod()); VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name; diff --git a/paddle/fluid/operators/gru_op.h b/paddle/fluid/operators/gru_op.h index 53f844a6607bd2e98c53b53c23422f6b48e2ced6..3b0d93e54b72910de1429ddf41eb6b0fe9646942 100644 --- a/paddle/fluid/operators/gru_op.h +++ b/paddle/fluid/operators/gru_op.h @@ -34,7 +34,7 @@ inline void ReorderInitState(const DeviceContext& ctx, framework::Tensor* dst, bool indexed_src) { math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); - row_shuffle(ctx, src, index_lod, *dst, indexed_src); + row_shuffle(ctx, src, index_lod, dst, indexed_src); } template @@ -61,7 +61,7 @@ class GRUKernel : public framework::OpKernel { bool is_reverse = context.Attr("is_reverse"); math::LoDTensor2BatchFunctor to_batch; auto& dev_ctx = context.template device_context(); - to_batch(dev_ctx, *input, *batch_gate, true, is_reverse); + to_batch(dev_ctx, *input, batch_gate, true, is_reverse); if (bias) { math::RowwiseAdd add_bias; @@ -113,7 +113,7 @@ class GRUKernel : public framework::OpKernel { math::Batch2LoDTensorFunctor to_seq; batch_hidden->set_lod(batch_gate->lod()); - to_seq(dev_ctx, *batch_hidden, *hidden); + to_seq(dev_ctx, *batch_hidden, hidden); } void Compute(const framework::ExecutionContext& context) const override { @@ -174,7 +174,7 @@ class GRUGradKernel : public framework::OpKernel { bool is_reverse = context.Attr("is_reverse"); batch_hidden_grad.set_lod(batch_hidden->lod()); - to_batch(dev_ctx, *hidden_grad, batch_hidden_grad, false, is_reverse); + to_batch(dev_ctx, *hidden_grad, &batch_hidden_grad, false, is_reverse); math::GRUMetaValue gru_value; gru_value.gate_weight = const_cast(weight_data); @@ -236,7 +236,7 @@ class GRUGradKernel : public framework::OpKernel { input_grad->mutable_data(context.GetPlace()); math::Batch2LoDTensorFunctor to_seq; batch_gate_grad.set_lod(batch_gate->lod()); - to_seq(dev_ctx, batch_gate_grad, *input_grad); + to_seq(dev_ctx, batch_gate_grad, input_grad); } if (bias_grad) { bias_grad->mutable_data(context.GetPlace()); diff --git a/paddle/fluid/operators/iou_similarity_op.h b/paddle/fluid/operators/iou_similarity_op.h index c76448c736847a536be2021a8a5fef23bef23a50..9f193ebc59b7be44b987db7d068c209ef7f5a8da 100644 --- a/paddle/fluid/operators/iou_similarity_op.h +++ b/paddle/fluid/operators/iou_similarity_op.h @@ -41,22 +41,24 @@ struct IOUSimilarityFunctor { IOUSimilarityFunctor(const T* x, const T* y, T* z, int cols) : x_(x), y_(y), z_(z), cols_(static_cast(cols)) {} - inline HOSTDEVICE void operator()(size_t row_id) const { + inline HOSTDEVICE void operator()(size_t tid) const { + size_t row_id = tid / cols_; + size_t col_id = tid % cols_; + T x_min1 = x_[row_id * 4]; T y_min1 = x_[row_id * 4 + 1]; T x_max1 = x_[row_id * 4 + 2]; T y_max1 = x_[row_id * 4 + 3]; - for (size_t i = 0; i < cols_; ++i) { - T x_min2 = y_[i * 4]; - T y_min2 = y_[i * 4 + 1]; - T x_max2 = y_[i * 4 + 2]; - T y_max2 = y_[i * 4 + 3]; - T sim = IOUSimilarity(x_min1, y_min1, x_max1, y_max1, x_min2, y_min2, - x_max2, y_max2); + T x_min2 = y_[col_id * 4]; + T y_min2 = y_[col_id * 4 + 1]; + T x_max2 = y_[col_id * 4 + 2]; + T y_max2 = y_[col_id * 4 + 3]; + + T sim = IOUSimilarity(x_min1, y_min1, x_max1, y_max1, x_min2, y_min2, + x_max2, y_max2); - z_[row_id * cols_ + i] = sim; - } + z_[row_id * cols_ + col_id] = sim; } const T* x_; const T* y_; @@ -81,7 +83,7 @@ class IOUSimilarityKernel : public framework::OpKernel { out->mutable_data(ctx.GetPlace()), y_n); platform::ForRange for_range( - static_cast(ctx.device_context()), x_n); + static_cast(ctx.device_context()), x_n * y_n); for_range(functor); } }; // namespace operators diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index af235fb6a029a71ee275bebfbbd75aaa0b7d546d..57cff680ab89f2df7e71af4056ee06cdf330bbab 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -27,6 +27,38 @@ void RunServer(std::shared_ptr service) { VLOG(4) << "RunServer thread end"; } +static void split(const std::string &str, char sep, + std::vector *pieces) { + pieces->clear(); + if (str.empty()) { + return; + } + size_t pos = 0; + size_t next = str.find(sep, pos); + while (next != std::string::npos) { + pieces->push_back(str.substr(pos, next - pos)); + pos = next + 1; + next = str.find(sep, pos); + } + if (!str.substr(pos).empty()) { + pieces->push_back(str.substr(pos)); + } +} + +static void AsyncExecuteBlock(framework::Executor *executor, + framework::ExecutorPrepareContext *prepared, + framework::Scope *scope) { + std::future future = framework::Async([&executor, &prepared, &scope]() { + try { + executor->RunPreparedContext(prepared, scope, false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + }); + // TODO(qiao) maybe we can remove this + future.wait(); +} + static void ParallelExecuteBlocks( const std::vector ¶llel_blkids, framework::Executor *executor, const std::vector> @@ -169,15 +201,82 @@ void ListenAndServOp::RunSyncLoop(framework::Executor *executor, } // while(true) } +void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, + framework::ProgramDesc *program, + framework::Scope *recv_scope, + framework::BlockDesc *prefetch_block) const { + VLOG(3) << "RunAsyncLoop in"; + // grad name to block id + std::unordered_map grad_to_block_id; + std::unordered_map id_to_grad; + + auto grad_to_block_id_str = + Attr>("grad_to_block_id"); + for (auto &grad_and_id : grad_to_block_id_str) { + std::vector pieces; + split(grad_and_id, ':', &pieces); + VLOG(3) << "after split, grad = " << pieces[0] << ", id=" << pieces[1]; + PADDLE_ENFORCE_EQ(pieces.size(), 2); + PADDLE_ENFORCE_EQ(grad_to_block_id.count(pieces[0]), 0); + int block_id = std::stoi(pieces[1]); + grad_to_block_id[pieces[0]] = block_id; + id_to_grad[block_id] = pieces[0]; + } + size_t num_blocks = program->Size(); + PADDLE_ENFORCE_GE(num_blocks, 2, + "server program should have at least 2 blocks"); + + std::vector block_list; + for (size_t blkid = 1; blkid < num_blocks; ++blkid) { + block_list.push_back(blkid); + } + auto optimize_prepared = executor->Prepare(*program, block_list); + std::unordered_map> + grad_to_prepared_ctx; + for (size_t i = 0; i < block_list.size(); ++i) { + grad_to_prepared_ctx[id_to_grad[block_list[i]]] = optimize_prepared[i]; + } + + VLOG(3) << "RunAsyncLoop into while"; + bool exit_flag = false; + while (!exit_flag) { + const detail::ReceivedMessage v = rpc_service_->Get(); + auto recv_var_name = v.first; + if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { + LOG(INFO) << "received terminate message and exit"; + exit_flag = true; + break; + } else { + VLOG(3) << "received grad: " << recv_var_name; + auto var = v.second->GetVar(); + if (var == nullptr) { + LOG(ERROR) << "Can not find server side var: " << recv_var_name; + PADDLE_THROW("Can not find server side var"); + } + AsyncExecuteBlock(executor, grad_to_prepared_ctx[recv_var_name].get(), + v.second->GetMutableLocalScope()); + } + + if (exit_flag) { + rpc_service_->ShutDown(); + break; + } + } // while(true) +} + void ListenAndServOp::RunImpl(const framework::Scope &scope, const platform::Place &dev_place) const { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); framework::Scope &recv_scope = scope.NewScope(); + bool sync_mode = Attr("sync_mode"); + PADDLE_ENFORCE(!rpc_service_); std::string endpoint = Attr("endpoint"); - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); + + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, sync_mode)); auto *optimize_block = Attr(kOptimizeBlock); auto *prefetch_block = Attr(kPrefetchBlock); @@ -202,7 +301,11 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, sleep(5); // Write to a file of server selected port for python use. SavePort(rpc_service_); - RunSyncLoop(&executor, program, &recv_scope, prefetch_block); + if (sync_mode) { + RunSyncLoop(&executor, program, &recv_scope, prefetch_block); + } else { + RunAsyncLoop(&executor, program, &recv_scope, prefetch_block); + } } class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { @@ -221,6 +324,12 @@ from send_op and send back variables to recv_op. "IP address to listen on.") .SetDefault("127.0.0.1:6164") .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); + AddAttr>( + "grad_to_block_id", + "['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'] " + "a map from grad name to it's optimize block id") + .SetDefault({}); + AddAttr("sync_mode", "if works at sync_mode or not").SetDefault(true); AddAttr(kOptimizeBlock, "BlockID to run on server side."); AddAttr(kPrefetchBlock, diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index dfb7c77c8e36d9af79d8b1713d0c0c59c81b1ca6..3cc0f3047733bea94daa310cd39cb0a4f44bef85 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -46,6 +46,11 @@ class ListenAndServOp : public framework::OperatorBase { framework::Scope* recv_scope, framework::BlockDesc* prefetch_block) const; + void RunAsyncLoop(framework::Executor* executor, + framework::ProgramDesc* program, + framework::Scope* recv_scope, + framework::BlockDesc* prefetch_block) const; + void Stop() override; void RunImpl(const framework::Scope& scope, diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index 6d81fccd2059c511f71d403229e04587e553e93d..77722c50d39003d9342afb04a61ae3aaf6b21100 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/lookup_table_op.h" #include "paddle/fluid/platform/assert.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h index a1ef0eb278dea7205cd8052bbe006b0ae4e3a466..0707aded8c9aa37d6be92373c274b59b7d6b34b6 100644 --- a/paddle/fluid/operators/lstm_op.h +++ b/paddle/fluid/operators/lstm_op.h @@ -33,7 +33,7 @@ inline void ReorderInitState(const DeviceContext& ctx, framework::Tensor* dst, bool indexed_src) { math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); - row_shuffle(ctx, src, index_lod, *dst, indexed_src); + row_shuffle(ctx, src, index_lod, dst, indexed_src); } template @@ -57,7 +57,7 @@ class LSTMKernel : public framework::OpKernel { bool is_reverse = ctx.Attr("is_reverse"); math::LoDTensor2BatchFunctor to_batch; auto& device_ctx = ctx.template device_context(); - to_batch(device_ctx, *input, *batch_gate, true, is_reverse); + to_batch(device_ctx, *input, batch_gate, true, is_reverse); auto in_dims = input->dims(); int frame_size = static_cast(in_dims[1] / 4); @@ -161,11 +161,11 @@ class LSTMKernel : public framework::OpKernel { math::Batch2LoDTensorFunctor to_seq; batch_hidden.set_lod(batch_gate->lod()); // restore the output hidden in LoDTensor from the batch hidden - to_seq(device_ctx, batch_hidden, *hidden_out); + to_seq(device_ctx, batch_hidden, hidden_out); batch_cell.set_lod(batch_gate->lod()); // restore the output cell state in LoDTensor from the batch cell - to_seq(device_ctx, batch_cell, *cell_out); + to_seq(device_ctx, batch_cell, cell_out); } }; @@ -257,7 +257,7 @@ class LSTMGradKernel : public framework::OpKernel { const framework::DDim& dims, framework::LoDTensor& dst) { dst.mutable_data(dims, ctx.GetPlace()); dst.set_lod(batch_gate->lod()); - to_batch(ctx, src, dst, false); + to_batch(ctx, src, &dst, false); }; LoDTensor batch_hidden, batch_hidden_g, batch_cell; @@ -351,7 +351,7 @@ class LSTMGradKernel : public framework::OpKernel { if (in_g) { /* backward data */ in_g->mutable_data(ctx.GetPlace()); - to_seq(device_ctx, batch_gate_g, *in_g); + to_seq(device_ctx, batch_gate_g, in_g); } if (bias && bias_g) { /* backward bias */ diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index 172db548960135fbc1841cf58b73894d4f74d838..628936a3105b95577bef080f05b0bd556b514918 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -40,7 +40,7 @@ inline void ReorderInitState(const DeviceContext& ctx, framework::Tensor* dst, bool indexed_src) { math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); - row_shuffle(ctx, src, index, *dst, indexed_src); + row_shuffle(ctx, src, index, dst, indexed_src); } template @@ -81,7 +81,7 @@ class LSTMPKernel : public framework::OpKernel { bool is_reverse = ctx.Attr("is_reverse"); math::LoDTensor2BatchFunctor to_batch; auto& device_ctx = ctx.template device_context(); - to_batch(device_ctx, *input, *batch_gate, true, is_reverse); + to_batch(device_ctx, *input, batch_gate, true, is_reverse); auto in_dims = input->dims(); int frame_size = static_cast(in_dims[1] / 4); @@ -208,11 +208,11 @@ class LSTMPKernel : public framework::OpKernel { math::Batch2LoDTensorFunctor to_seq; batch_proj.set_lod(batch_gate->lod()); // restore the output hidden in LoDTensor from the batch hidden - to_seq(device_ctx, batch_proj, *proj_out); + to_seq(device_ctx, batch_proj, proj_out); batch_cell.set_lod(batch_gate->lod()); // restore the output cell state in LoDTensor from the batch cell - to_seq(device_ctx, batch_cell, *cell_out); + to_seq(device_ctx, batch_cell, cell_out); } }; @@ -332,7 +332,7 @@ class LSTMPGradKernel : public framework::OpKernel { const framework::DDim& dims, framework::LoDTensor& dst) { dst.mutable_data(dims, ctx.GetPlace()); dst.set_lod(batch_gate->lod()); - to_batch(ctx, src, dst, false); + to_batch(ctx, src, &dst, false); }; LoDTensor batch_hidden_g, batch_proj, batch_proj_g, batch_cell; @@ -471,7 +471,7 @@ class LSTMPGradKernel : public framework::OpKernel { if (in_g) { /* backward data */ in_g->mutable_data(ctx.GetPlace()); - to_seq(device_ctx, batch_gate_g, *in_g); + to_seq(device_ctx, batch_gate_g, in_g); } if (bias && bias_g) { /* backward bias */ diff --git a/paddle/fluid/operators/math/concat.cc b/paddle/fluid/operators/math/concat.cc index bfce56f9fdcafa0800c9742b9fae41fd6a572b40..cc69212466b72f3fa82e8f5f58b4f3229dab28ec 100644 --- a/paddle/fluid/operators/math/concat.cc +++ b/paddle/fluid/operators/math/concat.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/concat.h" +#include namespace paddle { namespace operators { @@ -70,20 +71,20 @@ class ConcatGradFunctor { public: void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, const int axis, - std::vector& outputs) { + std::vector* outputs) { // TODO(zcd): Add input data validity checking - int num = outputs.size(); + int num = outputs->size(); int input_rows = 1; - auto dim_0 = outputs[0].dims(); + auto dim_0 = outputs->at(0).dims(); for (int i = 0; i < axis; ++i) { input_rows *= dim_0[i]; } int input_cols = 0; - std::vector output_cols(outputs.size()); + std::vector output_cols(outputs->size()); for (int i = 0; i < num; ++i) { - int t_cols = outputs[i].numel() / input_rows; + int t_cols = outputs->at(i).numel() / input_rows; input_cols += t_cols; output_cols[i] = t_cols; } @@ -95,7 +96,7 @@ class ConcatGradFunctor { int col_idx = 0; for (int j = 0; j < num; ++j) { int col_len = output_cols[j]; - T* dst_ptr = outputs[j].data() + k * col_len; + T* dst_ptr = outputs->at(j).data() + k * col_len; memory::Copy(cpu_place, dst_ptr, cpu_place, src_ptr + col_idx, sizeof(T) * col_len); col_idx += col_len; diff --git a/paddle/fluid/operators/math/concat.cu b/paddle/fluid/operators/math/concat.cu index c0786757b34195d47c3b1cadc938f0e9fcfd6038..4285d38dcd6a4124543cdd2246c82a8203f5a281 100644 --- a/paddle/fluid/operators/math/concat.cu +++ b/paddle/fluid/operators/math/concat.cu @@ -12,9 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/operators/math/concat.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { @@ -202,16 +204,16 @@ class ConcatGradFunctor { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const int axis, - std::vector& outputs) { + std::vector* outputs) { // TODO(zcd): Add input data validity checking - int o_num = outputs.size(); + int o_num = outputs->size(); int out_row = 1; - auto dim_0 = outputs[0].dims(); + auto dim_0 = outputs->at(0).dims(); for (int i = 0; i < axis; ++i) { out_row *= dim_0[i]; } - int out_col = outputs[0].numel() / out_row; + int out_col = outputs->at(0).numel() / out_row; int in_col = 0, in_row = out_row; bool sameShape = true; @@ -221,13 +223,13 @@ class ConcatGradFunctor { outputs_cols[0] = 0; for (int i = 0; i < o_num; ++i) { - int t_col = outputs[i].numel() / out_row; + int t_col = outputs->at(i).numel() / out_row; if (sameShape) { if (t_col != out_col) sameShape = false; } in_col += t_col; outputs_cols[i + 1] = in_col; - outputs_ptr[i] = outputs[i].data(); + outputs_ptr[i] = outputs->at(i).data(); } T** dev_out_gpu_data = diff --git a/paddle/fluid/operators/math/concat.h b/paddle/fluid/operators/math/concat.h index c0e983e4aa7abbdd87649f5a3147d2a464993bce..041ce8bf8a2e9528a004c076ead4471a3837c1a6 100644 --- a/paddle/fluid/operators/math/concat.h +++ b/paddle/fluid/operators/math/concat.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/tensor.h" @@ -56,7 +57,7 @@ template class ConcatGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, - const int axis, std::vector& outputs); + const int axis, std::vector* outputs); }; } // namespace math diff --git a/paddle/fluid/operators/math/concat_test.cc b/paddle/fluid/operators/math/concat_test.cc index 1741af8148bb90863f294ba4930006a58b5ddbf9..f0847aafae78f17eb28745bd224d45ec86497030 100644 --- a/paddle/fluid/operators/math/concat_test.cc +++ b/paddle/fluid/operators/math/concat_test.cc @@ -17,17 +17,14 @@ limitations under the License. */ #include #include "paddle/fluid/framework/tensor_util.h" -using namespace paddle::framework; -using namespace paddle::platform; - template void testConcat() { - Tensor input_a_cpu; - Tensor input_b_cpu; - Tensor out_cpu; - Tensor input_a; - Tensor input_b; - Tensor out; + paddle::framework::Tensor input_a_cpu; + paddle::framework::Tensor input_b_cpu; + paddle::framework::Tensor out_cpu; + paddle::framework::Tensor input_a; + paddle::framework::Tensor input_b; + paddle::framework::Tensor out; DeviceContext* context = new DeviceContext(Place()); // DeviceContext context(Place()); @@ -40,18 +37,18 @@ void testConcat() { * output: * out.shape: [5, 3, 4] */ - auto dim_a = make_ddim({2, 3, 4}); - auto dim_b = make_ddim({3, 3, 4}); - auto dim_out = make_ddim({5, 3, 4}); + auto dim_a = paddle::framework::make_ddim({2, 3, 4}); + auto dim_b = paddle::framework::make_ddim({3, 3, 4}); + auto dim_out = paddle::framework::make_ddim({5, 3, 4}); input_a.mutable_data(dim_a, Place()); input_b.mutable_data(dim_b, Place()); out.mutable_data(dim_out, Place()); if (paddle::platform::is_gpu_place(Place())) { - input_a_cpu.mutable_data(dim_a, CPUPlace()); - input_b_cpu.mutable_data(dim_b, CPUPlace()); - out_cpu.mutable_data(dim_out, CPUPlace()); + input_a_cpu.mutable_data(dim_a, paddle::platform::CPUPlace()); + input_b_cpu.mutable_data(dim_b, paddle::platform::CPUPlace()); + out_cpu.mutable_data(dim_out, paddle::platform::CPUPlace()); } int* a_ptr; @@ -72,11 +69,11 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a); + paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b); } - std::vector input; + std::vector input; input.push_back(input_a); input.push_back(input_b); @@ -89,7 +86,8 @@ void testConcat() { int* out_ptr; if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context, + &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -115,9 +113,9 @@ void testConcat() { * output: * out.shape: [2, 7, 4] */ - dim_a = make_ddim({2, 3, 4}); - dim_b = make_ddim({2, 4, 4}); - dim_out = make_ddim({2, 7, 4}); + dim_a = paddle::framework::make_ddim({2, 3, 4}); + dim_b = paddle::framework::make_ddim({2, 4, 4}); + dim_out = paddle::framework::make_ddim({2, 7, 4}); input_a.Resize(dim_a); input_b.Resize(dim_b); @@ -144,8 +142,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a); + paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b); } input.clear(); @@ -159,7 +157,8 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context, + &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -187,9 +186,9 @@ void testConcat() { * output: * out.shape: [2, 3, 9] */ - dim_a = make_ddim({2, 3, 4}); - dim_b = make_ddim({2, 3, 5}); - dim_out = make_ddim({2, 3, 9}); + dim_a = paddle::framework::make_ddim({2, 3, 4}); + dim_b = paddle::framework::make_ddim({2, 3, 5}); + dim_out = paddle::framework::make_ddim({2, 3, 9}); input_a.Resize(dim_a); input_b.Resize(dim_b); @@ -216,8 +215,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a); + paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b); } input.clear(); @@ -231,7 +230,8 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context, + &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -261,9 +261,9 @@ void testConcat() { * output: * out.shape: [2, 6, 4] */ - dim_a = make_ddim({2, 3, 4}); - dim_b = make_ddim({2, 3, 4}); - dim_out = make_ddim({2, 6, 4}); + dim_a = paddle::framework::make_ddim({2, 3, 4}); + dim_b = paddle::framework::make_ddim({2, 3, 4}); + dim_out = paddle::framework::make_ddim({2, 6, 4}); input_a.Resize(dim_a); input_b.Resize(dim_b); @@ -290,8 +290,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a); + paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b); } input.clear(); @@ -305,7 +305,8 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context, + &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); diff --git a/paddle/fluid/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h index 4da94383af6c927a2db3337be6e82ca95a8cb036..027a019a284cac097eea50553e7d0dad5b09a218 100644 --- a/paddle/fluid/operators/math/context_project.h +++ b/paddle/fluid/operators/math/context_project.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/math/cos_sim_functor.cu b/paddle/fluid/operators/math/cos_sim_functor.cu index 55c1e726335dfe010e39847ac90b84cc49955360..4e6ff5ee0a449b42762748ba1a103876beee01f2 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.cu +++ b/paddle/fluid/operators/math/cos_sim_functor.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/cos_sim_functor.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu index f4935c2813c9f84699f1182df6a9adb613190506..6d2ba2bd0d653ecf83f9e2abc1413ae551dc8bb7 100644 --- a/paddle/fluid/operators/math/cross_entropy.cu +++ b/paddle/fluid/operators/math/cross_entropy.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/cross_entropy.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { @@ -31,11 +32,11 @@ __global__ void CrossEntropyKernel(T* Y, const T* X, const int64_t* label, template __device__ __forceinline__ T sum_single_warp(T val) { - val += __shfl_down(val, 16); - val += __shfl_down(val, 8); - val += __shfl_down(val, 4); - val += __shfl_down(val, 2); - val += __shfl_down(val, 1); + val += platform::__shfl_down_sync(0, val, 16); + val += platform::__shfl_down_sync(0, val, 8); + val += platform::__shfl_down_sync(0, val, 4); + val += platform::__shfl_down_sync(0, val, 2); + val += platform::__shfl_down_sync(0, val, 1); return val; } @@ -108,7 +109,9 @@ class CrossEntropyFunctor { if (softLabel) { const T* label_data = labels->data(); - int block = class_num > 512 ? 512 : pow(2, int(std::log2(class_num))); + int block = class_num > 512 + ? 512 + : pow(2, static_cast(std::log2(class_num))); SoftCrossEntropyKernel<<< batch_size, block, block * sizeof(T), diff --git a/paddle/fluid/operators/math/depthwise_conv.cu b/paddle/fluid/operators/math/depthwise_conv.cu index a5e6e4031bbaddc2d09c660d34a975b2a496bc63..027e2de48d229761f12f974dc73625c8ea1b3567 100644 --- a/paddle/fluid/operators/math/depthwise_conv.cu +++ b/paddle/fluid/operators/math/depthwise_conv.cu @@ -12,8 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/fluid/operators/math/depthwise_conv.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/depthwise_conv.h b/paddle/fluid/operators/math/depthwise_conv.h index 081bda891d044041f6814964e7076e28e812039c..97aec401889a56d3fc9ac08e766d931bb3725b01 100644 --- a/paddle/fluid/operators/math/depthwise_conv.h +++ b/paddle/fluid/operators/math/depthwise_conv.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/hostdevice.h" diff --git a/paddle/fluid/operators/math/detail/activation_functions.h b/paddle/fluid/operators/math/detail/activation_functions.h index d205ebf210818b91c3cf6d4563fca1e56702bcf9..b127fbe8c8515e7fe57b07ea1d4291675ec4efca 100644 --- a/paddle/fluid/operators/math/detail/activation_functions.h +++ b/paddle/fluid/operators/math/detail/activation_functions.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include +#include #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/hostdevice.h" diff --git a/paddle/fluid/operators/math/detail/gru_cpu_kernel.h b/paddle/fluid/operators/math/detail/gru_cpu_kernel.h index 1e5ff8ef46db960ddf88ebf03041893b176c1950..26e6adafdfcd28dcc51109b9dec812f1c82c8a0e 100644 --- a/paddle/fluid/operators/math/detail/gru_cpu_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_cpu_kernel.h @@ -89,14 +89,14 @@ void hl_avx_gru_forward_reset_output(OpResetOutput op_reset_output, __m256 r_value_reset_gate; __m256 r_value_reset_output; __m256 r_prev_out = _mm256_set1_ps(0.0f); - __m256 *update_gate = (__m256 *)gate_value; - __m256 *reset_gate = (__m256 *)(gate_value + frame_size); + __m256 *update_gate = reinterpret_cast<__m256 *>(gate_value); + __m256 *reset_gate = reinterpret_cast<__m256 *>(gate_value + frame_size); for (int i = 0; i < frame_size / 8; i++) { r_value_update_gate = update_gate[i]; r_value_reset_gate = reset_gate[i]; if (prev_output_value) { - r_prev_out = ((__m256 *)prev_output_value)[i]; + r_prev_out = (reinterpret_cast<__m256 *>(prev_output_value))[i]; } op_reset_output(r_value_update_gate, r_value_reset_gate, r_prev_out, @@ -104,7 +104,7 @@ void hl_avx_gru_forward_reset_output(OpResetOutput op_reset_output, update_gate[i] = r_value_update_gate; reset_gate[i] = r_value_reset_gate; - ((__m256 *)reset_output_value)[i] = r_value_reset_output; + (reinterpret_cast<__m256 *>(reset_output_value))[i] = r_value_reset_output; } #endif } @@ -119,21 +119,21 @@ void hl_avx_gru_forward_final_output(OpFinalOutput op_final_output, __m256 r_value_frame_state; __m256 r_prev_out = _mm256_set1_ps(0.0f); __m256 r_output; - __m256 *update_gate = (__m256 *)gate_value; - __m256 *frame_state = (__m256 *)(gate_value + frame_size * 2); + __m256 *update_gate = reinterpret_cast<__m256 *>(gate_value); + __m256 *frame_state = reinterpret_cast<__m256 *>(gate_value + frame_size * 2); for (int i = 0; i < frame_size / 8; i++) { r_value_update_gate = update_gate[i]; r_value_frame_state = frame_state[i]; if (prev_output_value) { - r_prev_out = ((__m256 *)prev_output_value)[i]; + r_prev_out = (reinterpret_cast<__m256 *>(prev_output_value))[i]; } op_final_output(r_value_update_gate, r_value_frame_state, r_prev_out, r_output, active_node); frame_state[i] = r_value_frame_state; - ((__m256 *)output_value)[i] = r_output; + (reinterpret_cast<__m256 *>(output_value))[i] = r_output; } #endif } @@ -284,20 +284,22 @@ void hl_avx_gru_backward_state_grad(OpStateGrad op_state_grad, T *gate_value, __m256 r_out_grad; __m256 r_prev_out_value = _mm256_set1_ps(0.0f); __m256 r_prev_out_grad = _mm256_set1_ps(0.0f); - __m256 *update_gate_value = (__m256 *)gate_value; - __m256 *update_gate_grad = (__m256 *)gate_grad; - __m256 *frame_state_value = (__m256 *)(gate_value + frame_size * 2); - __m256 *frame_state_grad = (__m256 *)(gate_grad + frame_size * 2); + __m256 *update_gate_value = reinterpret_cast<__m256 *>(gate_value); + __m256 *update_gate_grad = reinterpret_cast<__m256 *>(gate_grad); + __m256 *frame_state_value = + reinterpret_cast<__m256 *>(gate_value + frame_size * 2); + __m256 *frame_state_grad = + reinterpret_cast<__m256 *>(gate_grad + frame_size * 2); for (int i = 0; i < frame_size / 8; i++) { r_update_gate_value = update_gate_value[i]; r_frame_state_value = frame_state_value[i]; - r_out_grad = ((__m256 *)output_grad)[i]; + r_out_grad = (reinterpret_cast<__m256 *>(output_grad))[i]; if (prev_out_value) { - r_prev_out_value = ((__m256 *)prev_out_value)[i]; + r_prev_out_value = (reinterpret_cast<__m256 *>(prev_out_value))[i]; } if (prev_out_grad) { - r_prev_out_grad = ((__m256 *)prev_out_grad)[i]; + r_prev_out_grad = (reinterpret_cast<__m256 *>(prev_out_grad))[i]; } op_state_grad(r_update_gate_value, r_update_gate_grad, r_frame_state_value, @@ -307,7 +309,7 @@ void hl_avx_gru_backward_state_grad(OpStateGrad op_state_grad, T *gate_value, update_gate_grad[i] = r_update_gate_grad; frame_state_grad[i] = r_frame_state_grad; if (prev_out_grad) { - ((__m256 *)prev_out_grad)[i] = r_prev_out_grad; + (reinterpret_cast<__m256 *>(prev_out_grad))[i] = r_prev_out_grad; } } #endif @@ -327,10 +329,11 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, __m256 r_reset_output_grad = _mm256_set1_ps(0.0f); __m256 r_prev_out_value = _mm256_set1_ps(0.0f); __m256 r_prev_out_grad = _mm256_set1_ps(0.0f); - __m256 *update_gate_value = (__m256 *)gate_value; - __m256 *update_gate_grad = (__m256 *)gate_grad; - __m256 *reset_gate_value = (__m256 *)(gate_value + frame_size); - __m256 *reset_gate_grad = (__m256 *)(gate_grad + frame_size); + __m256 *update_gate_value = reinterpret_cast<__m256 *>(gate_value); + __m256 *update_gate_grad = reinterpret_cast<__m256 *>(gate_grad); + __m256 *reset_gate_value = + reinterpret_cast<__m256 *>(gate_value + frame_size); + __m256 *reset_gate_grad = reinterpret_cast<__m256 *>(gate_grad + frame_size); for (int i = 0; i < frame_size / 8; i++) { r_update_gate_value = update_gate_value[i]; @@ -338,13 +341,13 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, r_reset_gate_value = reset_gate_value[i]; if (prev_out_value && prev_out_grad) { - r_reset_output_grad = ((__m256 *)reset_output_grad)[i]; + r_reset_output_grad = (reinterpret_cast<__m256 *>(reset_output_grad))[i]; } if (prev_out_value) { - r_prev_out_value = ((__m256 *)prev_out_value)[i]; + r_prev_out_value = (reinterpret_cast<__m256 *>(prev_out_value))[i]; } if (prev_out_grad) { - r_prev_out_grad = ((__m256 *)prev_out_grad)[i]; + r_prev_out_grad = (reinterpret_cast<__m256 *>(prev_out_grad))[i]; } op_reset_grad(r_update_gate_value, r_update_gate_grad, r_reset_gate_value, @@ -354,7 +357,7 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, update_gate_grad[i] = r_update_gate_grad; reset_gate_grad[i] = r_reset_gate_grad; if (prev_out_grad) { - ((__m256 *)prev_out_grad)[i] = r_prev_out_grad; + (reinterpret_cast<__m256 *>(prev_out_grad))[i] = r_prev_out_grad; } } #endif diff --git a/paddle/fluid/operators/math/detail/gru_gpu_kernel.h b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h index 657652562780ae9932a4394335bfa3c3b397bb80..da25a7d2137cfe5160e28c4e590dd5c43cd99ccf 100644 --- a/paddle/fluid/operators/math/detail/gru_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/gru_compute.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { diff --git a/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h index 6ad77830fd7a9809c4922878cc8ccdff1e8e0ef7..19f6b213aa3bc06f7f5750fa42745fd8755c51b9 100644 --- a/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h @@ -164,10 +164,12 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, __m256 r_state_atv; __m256 r_out; - __m256 *value_in = (__m256 *)value.gate_value; - __m256 *value_ig = (__m256 *)(value.gate_value + frame_size); - __m256 *value_fg = (__m256 *)(value.gate_value + frame_size * 2); - __m256 *value_og = (__m256 *)(value.gate_value + frame_size * 3); + __m256 *value_in = reinterpret_cast<__m256 *>(value.gate_value); + __m256 *value_ig = reinterpret_cast<__m256 *>(value.gate_value + frame_size); + __m256 *value_fg = + reinterpret_cast<__m256 *>(value.gate_value + frame_size * 2); + __m256 *value_og = + reinterpret_cast<__m256 *>(value.gate_value + frame_size * 3); for (int i = 0; i < frame_size / 8; i++) { r_value_in = value_in[i]; @@ -175,13 +177,13 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, r_value_fg = value_fg[i]; r_value_og = value_og[i]; if (value.check_ig) { - r_checkI = ((__m256 *)value.check_ig)[i]; - r_checkF = ((__m256 *)value.check_fg)[i]; - r_checkO = ((__m256 *)value.check_og)[i]; + r_checkI = (reinterpret_cast<__m256 *>(value.check_ig))[i]; + r_checkF = (reinterpret_cast<__m256 *>(value.check_fg))[i]; + r_checkO = (reinterpret_cast<__m256 *>(value.check_og))[i]; } if (value.prev_state_value) { - r_prev_state = ((__m256 *)value.prev_state_value)[i]; + r_prev_state = (reinterpret_cast<__m256 *>(value.prev_state_value))[i]; } op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_prev_state, r_state, @@ -192,9 +194,9 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, value_ig[i] = r_value_ig; value_fg[i] = r_value_fg; value_og[i] = r_value_og; - ((__m256 *)value.state_value)[i] = r_state; - ((__m256 *)value.state_active_value)[i] = r_state_atv; - ((__m256 *)value.output_value)[i] = r_out; + (reinterpret_cast<__m256 *>(value.state_value))[i] = r_state; + (reinterpret_cast<__m256 *>(value.state_active_value))[i] = r_state_atv; + (reinterpret_cast<__m256 *>(value.output_value))[i] = r_out; } #endif } @@ -227,14 +229,16 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, __m256 r_checkFGrad; __m256 r_checkOGrad; - __m256 *value_in = (__m256 *)value.gate_value; - __m256 *value_ig = (__m256 *)(value.gate_value + frame_size); - __m256 *value_fg = (__m256 *)(value.gate_value + frame_size * 2); - __m256 *value_og = (__m256 *)(value.gate_value + frame_size * 3); - __m256 *grad_in = (__m256 *)grad.gate_grad; - __m256 *grad_ig = (__m256 *)(grad.gate_grad + frame_size); - __m256 *grad_fg = (__m256 *)(grad.gate_grad + frame_size * 2); - __m256 *grad_og = (__m256 *)(grad.gate_grad + frame_size * 3); + __m256 *value_in = reinterpret_cast<__m256 *>(value.gate_value); + __m256 *value_ig = reinterpret_cast<__m256 *>(value.gate_value + frame_size); + __m256 *value_fg = + reinterpret_cast<__m256 *>(value.gate_value + frame_size * 2); + __m256 *value_og = + reinterpret_cast<__m256 *>(value.gate_value + frame_size * 3); + __m256 *grad_in = reinterpret_cast<__m256 *>(grad.gate_grad); + __m256 *grad_ig = reinterpret_cast<__m256 *>(grad.gate_grad + frame_size); + __m256 *grad_fg = reinterpret_cast<__m256 *>(grad.gate_grad + frame_size * 2); + __m256 *grad_og = reinterpret_cast<__m256 *>(grad.gate_grad + frame_size * 3); for (int i = 0; i < frame_size / 8; i++) { r_value_in = value_in[i]; @@ -242,16 +246,16 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, r_value_fg = value_fg[i]; r_value_og = value_og[i]; if (value.check_ig) { - r_checkI = ((__m256 *)value.check_ig)[i]; - r_checkF = ((__m256 *)value.check_fg)[i]; - r_checkO = ((__m256 *)value.check_og)[i]; + r_checkI = (reinterpret_cast<__m256 *>(value.check_ig))[i]; + r_checkF = (reinterpret_cast<__m256 *>(value.check_fg))[i]; + r_checkO = (reinterpret_cast<__m256 *>(value.check_og))[i]; } - r_state = ((__m256 *)value.state_value)[i]; - r_state_atv = ((__m256 *)value.state_active_value)[i]; - r_output_grad = ((__m256 *)grad.output_grad)[i]; - r_state_grad = ((__m256 *)grad.state_grad)[i]; + r_state = (reinterpret_cast<__m256 *>(value.state_value))[i]; + r_state_atv = (reinterpret_cast<__m256 *>(value.state_active_value))[i]; + r_output_grad = (reinterpret_cast<__m256 *>(grad.output_grad))[i]; + r_state_grad = (reinterpret_cast<__m256 *>(grad.state_grad))[i]; if (value.prev_state_value) { - r_prev_state = ((__m256 *)value.prev_state_value)[i]; + r_prev_state = (reinterpret_cast<__m256 *>(value.prev_state_value))[i]; } op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_grad_in, r_grad_ig, @@ -264,15 +268,18 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, grad_ig[i] = r_grad_ig; grad_fg[i] = r_grad_fg; grad_og[i] = r_grad_og; - ((__m256 *)grad.state_grad)[i] = r_state_grad; + (reinterpret_cast<__m256 *>(grad.state_grad))[i] = r_state_grad; if (grad.prev_state_grad) - ((__m256 *)grad.prev_state_grad)[i] = r_prev_state_grad; + (reinterpret_cast<__m256 *>(grad.prev_state_grad))[i] = r_prev_state_grad; if (value.prev_state_value) { - if (grad.check_ig_grad) ((__m256 *)grad.check_ig_grad)[i] += r_checkIGrad; - if (grad.check_fg_grad) ((__m256 *)grad.check_fg_grad)[i] += r_checkFGrad; + if (grad.check_ig_grad) + (reinterpret_cast<__m256 *>(grad.check_ig_grad))[i] += r_checkIGrad; + if (grad.check_fg_grad) + (reinterpret_cast<__m256 *>(grad.check_fg_grad))[i] += r_checkFGrad; } - if (grad.check_og_grad) ((__m256 *)grad.check_og_grad)[i] += r_checkOGrad; + if (grad.check_og_grad) + (reinterpret_cast<__m256 *>(grad.check_og_grad))[i] += r_checkOGrad; } #endif } diff --git a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h index ee7b16da4187e0f7f7839eff5c8753d2eb4f9c6d..d29c780dcfb1f1a3cbab25256238769d3a5ccd93 100644 --- a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h @@ -13,13 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include + #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/lstm_compute.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/device_context.h" -#include - namespace paddle { namespace operators { namespace math { diff --git a/paddle/fluid/operators/math/im2col.cc b/paddle/fluid/operators/math/im2col.cc index 123e10586f60f65d6c31a7c1f837bf32610d4ea5..336d6febc2ce3a55e82ed613bbc1081101f822f0 100644 --- a/paddle/fluid/operators/math/im2col.cc +++ b/paddle/fluid/operators/math/im2col.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/im2col.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/im2col.cu b/paddle/fluid/operators/math/im2col.cu index f41c78140fb601cd27a4e9997a736f46e903efc7..eecb233d22cea06da016b2671fd606b70eddf5a5 100644 --- a/paddle/fluid/operators/math/im2col.cu +++ b/paddle/fluid/operators/math/im2col.cu @@ -12,8 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include "paddle/fluid/operators/math/im2col.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/im2col.h b/paddle/fluid/operators/math/im2col.h index 451ec9d53498caa6d53a28dbfc764775d3ed3ecf..26d94e0f2e6163eb7452cf1fbea5966b4344ace1 100644 --- a/paddle/fluid/operators/math/im2col.h +++ b/paddle/fluid/operators/math/im2col.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc index b3978536bca7dc276bf5417bed36761a2d496536..8e3f0f286823c383bb0c44d0e7887040ec9b20a0 100644 --- a/paddle/fluid/operators/math/im2col_test.cc +++ b/paddle/fluid/operators/math/im2col_test.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/im2col.h" #include +#include template void testIm2col() { @@ -62,7 +63,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopySync(input_tmp, *place, &input); } output_cfo.mutable_data( {1, filter_size, filter_size, output_height, output_width}, *place); @@ -87,7 +88,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output_cfo.data(); } else { - TensorCopy(output_cfo, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopySync(output_cfo, paddle::platform::CPUPlace(), &output_tmp); out_cfo_ptr = output_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -98,7 +99,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_ocf_ptr = output_ocf.data(); } else { - TensorCopy(output_ocf, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopySync(output_ocf, paddle::platform::CPUPlace(), &output_tmp); out_ocf_ptr = output_tmp.data(); } @@ -119,7 +120,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopySync(input_tmp, *place, &input); } col2im(*context, output_cfo, dilation, stride, padding, &input); @@ -128,7 +129,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp); in_ptr = input_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -140,7 +141,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopySync(input_tmp, *place, &input); } col2im_ocf(*context, output_ocf, dilation, stride, padding, &input); @@ -148,7 +149,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp); in_ptr = input_tmp.data(); } for (int i = 0; i < 6; ++i) { diff --git a/paddle/fluid/operators/math/math_function_impl.h b/paddle/fluid/operators/math/math_function_impl.h index f9d4e4532428e3387ae39621d155434f05eb89d3..b9bd49d77d935e985705f78402ffe1ea90f24cb3 100644 --- a/paddle/fluid/operators/math/math_function_impl.h +++ b/paddle/fluid/operators/math/math_function_impl.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/math/math_function_test.cu b/paddle/fluid/operators/math/math_function_test.cu index 8982d9d066165a9da0461288685baa0c60e5f114..b84bb9974930a1619edeb85de4281b19da22fbbd 100644 --- a/paddle/fluid/operators/math/math_function_test.cu +++ b/paddle/fluid/operators/math/math_function_test.cu @@ -23,32 +23,29 @@ void fill_fp16_data(paddle::platform::float16* in_ptr, size_t size, } TEST(math_function, notrans_mul_trans_fp32) { - using namespace paddle::framework; - using namespace paddle::platform; + paddle::framework::Tensor input1; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor out_gpu; + paddle::framework::Tensor out; - Tensor input1; - Tensor input1_gpu; - Tensor input2_gpu; - Tensor out_gpu; - Tensor out; - - CPUPlace cpu_place; - CUDAPlace gpu_place(0); - CUDADeviceContext context(gpu_place); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CUDADeviceContext context(gpu_place); float* input1_ptr = input1.mutable_data({2, 3}, cpu_place); float arr[6] = {0, 1, 2, 3, 4, 5}; memcpy(input1_ptr, arr, 6 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu); + paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu); out_gpu.mutable_data({2, 2}, gpu_place); - paddle::operators::math::matmul( + paddle::operators::math::matmul( context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); - TensorCopy(out_gpu, cpu_place, context, &out); + paddle::framework::TensorCopySync(out_gpu, cpu_place, &out); float* out_ptr = out.data(); context.Wait(); @@ -59,39 +56,38 @@ TEST(math_function, notrans_mul_trans_fp32) { } TEST(math_function, notrans_mul_trans_fp16) { - using namespace paddle::framework; - using namespace paddle::platform; - - Tensor input1; - Tensor input1_gpu; - Tensor input2_gpu; - Tensor out_gpu; - Tensor out; + paddle::framework::Tensor input1; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor out_gpu; + paddle::framework::Tensor out; - CPUPlace cpu_place; - CUDAPlace gpu_place(0); - CUDADeviceContext context(gpu_place); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CUDADeviceContext context(gpu_place); // fp16 GEMM in cublas requires GPU compute capability >= 53 if (context.GetComputeCapability() < 53) { return; } - float16* input1_ptr = input1.mutable_data({2, 3}, cpu_place); + paddle::platform::float16* input1_ptr = + input1.mutable_data({2, 3}, cpu_place); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu); + paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu); - out_gpu.mutable_data({2, 2}, gpu_place); + out_gpu.mutable_data({2, 2}, gpu_place); - paddle::operators::math::matmul( - context, input1_gpu, false, input2_gpu, true, float16(1), &out_gpu, - float16(0)); + paddle::operators::math::matmul( + context, input1_gpu, false, input2_gpu, true, + paddle::platform::float16(1), &out_gpu, paddle::platform::float16(0)); - TensorCopy(out_gpu, cpu_place, context, &out); + paddle::framework::TensorCopySync(out_gpu, cpu_place, &out); - float16* out_ptr = out.data(); + paddle::platform::float16* out_ptr = out.data(); context.Wait(); EXPECT_EQ(static_cast(out_ptr[0]), 5); EXPECT_EQ(static_cast(out_ptr[1]), 14); @@ -100,32 +96,29 @@ TEST(math_function, notrans_mul_trans_fp16) { } TEST(math_function, trans_mul_notrans_fp32) { - using namespace paddle::framework; - using namespace paddle::platform; + paddle::framework::Tensor input1; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor out_gpu; + paddle::framework::Tensor out; - Tensor input1; - Tensor input1_gpu; - Tensor input2_gpu; - Tensor out_gpu; - Tensor out; - - CPUPlace cpu_place; - CUDAPlace gpu_place(0); - CUDADeviceContext context(gpu_place); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CUDADeviceContext context(gpu_place); float* input1_ptr = input1.mutable_data({2, 3}, cpu_place); float arr[6] = {0, 1, 2, 3, 4, 5}; memcpy(input1_ptr, arr, 6 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu); + paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu); out_gpu.mutable_data({3, 3}, gpu_place); paddle::operators::math::matmul( context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); - TensorCopy(out_gpu, cpu_place, context, &out); + paddle::framework::TensorCopySync(out_gpu, cpu_place, &out); float* out_ptr = out.data(); context.Wait(); @@ -141,39 +134,38 @@ TEST(math_function, trans_mul_notrans_fp32) { } TEST(math_function, trans_mul_notrans_fp16) { - using namespace paddle::framework; - using namespace paddle::platform; - - Tensor input1; - Tensor input1_gpu; - Tensor input2_gpu; - Tensor out_gpu; - Tensor out; + paddle::framework::Tensor input1; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor out_gpu; + paddle::framework::Tensor out; - CPUPlace cpu_place; - CUDAPlace gpu_place(0); - CUDADeviceContext context(gpu_place); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CUDADeviceContext context(gpu_place); // fp16 GEMM in cublas requires GPU compute capability >= 53 if (context.GetComputeCapability() < 53) { return; } - float16* input1_ptr = input1.mutable_data({2, 3}, cpu_place); + paddle::platform::float16* input1_ptr = + input1.mutable_data({2, 3}, cpu_place); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu); + paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu); - out_gpu.mutable_data({3, 3}, gpu_place); + out_gpu.mutable_data({3, 3}, gpu_place); - paddle::operators::math::matmul( - context, input1_gpu, true, input2_gpu, false, float16(1), &out_gpu, - float16(0)); + paddle::operators::math::matmul( + context, input1_gpu, true, input2_gpu, false, + paddle::platform::float16(1), &out_gpu, paddle::platform::float16(0)); - TensorCopy(out_gpu, cpu_place, context, &out); + paddle::framework::TensorCopySync(out_gpu, cpu_place, &out); - float16* out_ptr = out.data(); + paddle::platform::float16* out_ptr = out.data(); context.Wait(); EXPECT_EQ(static_cast(out_ptr[0]), 9); EXPECT_EQ(static_cast(out_ptr[1]), 12); @@ -187,19 +179,16 @@ TEST(math_function, trans_mul_notrans_fp16) { } TEST(math_function, gemm_notrans_cublas_fp32) { - using namespace paddle::framework; - using namespace paddle::platform; + paddle::framework::Tensor input1; + paddle::framework::Tensor input2; + paddle::framework::Tensor input3; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor input3_gpu; - Tensor input1; - Tensor input2; - Tensor input3; - Tensor input1_gpu; - Tensor input2_gpu; - Tensor input3_gpu; - - CPUPlace cpu_place; - CUDAPlace gpu_place(0); - CUDADeviceContext context(gpu_place); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CUDADeviceContext context(gpu_place); int m = 2; int n = 3; @@ -214,9 +203,9 @@ TEST(math_function, gemm_notrans_cublas_fp32) { float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; memcpy(input3_ptr, arr3, 8 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); + paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu); + paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu); + paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu); float* a = input1_gpu.data(); float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(gpu_place); @@ -224,7 +213,7 @@ TEST(math_function, gemm_notrans_cublas_fp32) { paddle::operators::math::gemm( context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3); // numpy code: // a = np.arange(6).reshape(2, 3) @@ -244,19 +233,16 @@ TEST(math_function, gemm_notrans_cublas_fp32) { } TEST(math_function, gemm_notrans_cublas_fp16) { - using namespace paddle::framework; - using namespace paddle::platform; - - Tensor input1; - Tensor input2; - Tensor input3; - Tensor input1_gpu; - Tensor input2_gpu; - Tensor input3_gpu; + paddle::framework::Tensor input1; + paddle::framework::Tensor input2; + paddle::framework::Tensor input3; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor input3_gpu; - CPUPlace cpu_place; - CUDAPlace gpu_place(0); - CUDADeviceContext context(gpu_place); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CUDADeviceContext context(gpu_place); // fp16 GEMM in cublas requires GPU compute capability >= 53 if (context.GetComputeCapability() < 53) { @@ -266,26 +252,31 @@ TEST(math_function, gemm_notrans_cublas_fp16) { int m = 2; int n = 3; int k = 3; - float16* input1_ptr = input1.mutable_data({2, 3}, cpu_place); + paddle::platform::float16* input1_ptr = + input1.mutable_data({2, 3}, cpu_place); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); - float16* input2_ptr = input2.mutable_data({3, 4}, cpu_place); + paddle::platform::float16* input2_ptr = + input2.mutable_data({3, 4}, cpu_place); fill_fp16_data(input2_ptr, input2.numel(), {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); - float16* input3_ptr = input3.mutable_data({2, 4}, cpu_place); + paddle::platform::float16* input3_ptr = + input3.mutable_data({2, 4}, cpu_place); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); - float16* a = input1_gpu.data(); - float16* b = input2_gpu.data(); - float16* c = input3_gpu.mutable_data(gpu_place); + paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu); + paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu); + paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu); + paddle::platform::float16* a = input1_gpu.data(); + paddle::platform::float16* b = input2_gpu.data(); + paddle::platform::float16* c = + input3_gpu.mutable_data(gpu_place); - paddle::operators::math::gemm( - context, false, false, m, n, k, float16(1), a, 3, b + 1, 4, float16(1), - c + 1, 4); + paddle::operators::math::gemm( + context, false, false, m, n, k, paddle::platform::float16(1), a, 3, b + 1, + 4, paddle::platform::float16(1), c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3); // numpy code: // a = np.arange(6).reshape(2, 3) @@ -305,19 +296,16 @@ TEST(math_function, gemm_notrans_cublas_fp16) { } TEST(math_function, gemm_trans_cublas_fp32) { - using namespace paddle::framework; - using namespace paddle::platform; - - Tensor input1; - Tensor input2; - Tensor input3; - Tensor input1_gpu; - Tensor input2_gpu; - Tensor input3_gpu; + paddle::framework::Tensor input1; + paddle::framework::Tensor input2; + paddle::framework::Tensor input3; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor input3_gpu; - CPUPlace cpu_place; - CUDAPlace gpu_place(0); - CUDADeviceContext context(gpu_place); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CUDADeviceContext context(gpu_place); int m = 2; int n = 3; @@ -332,9 +320,9 @@ TEST(math_function, gemm_trans_cublas_fp32) { float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; memcpy(input3_ptr, arr3, 8 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); + paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu); + paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu); + paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu); float* a = input1_gpu.data(); float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(gpu_place); @@ -342,7 +330,7 @@ TEST(math_function, gemm_trans_cublas_fp32) { paddle::operators::math::gemm( context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3); context.Wait(); EXPECT_EQ(input3_ptr[0], 0); @@ -356,19 +344,16 @@ TEST(math_function, gemm_trans_cublas_fp32) { } TEST(math_function, gemm_trans_cublas_fp16) { - using namespace paddle::framework; - using namespace paddle::platform; + paddle::framework::Tensor input1; + paddle::framework::Tensor input2; + paddle::framework::Tensor input3; + paddle::framework::Tensor input1_gpu; + paddle::framework::Tensor input2_gpu; + paddle::framework::Tensor input3_gpu; - Tensor input1; - Tensor input2; - Tensor input3; - Tensor input1_gpu; - Tensor input2_gpu; - Tensor input3_gpu; - - CPUPlace cpu_place; - CUDAPlace gpu_place(0); - CUDADeviceContext context(gpu_place); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CUDADeviceContext context(gpu_place); // fp16 GEMM in cublas requires GPU compute capability >= 53 if (context.GetComputeCapability() < 53) { @@ -378,26 +363,31 @@ TEST(math_function, gemm_trans_cublas_fp16) { int m = 2; int n = 3; int k = 3; - float16* input1_ptr = input1.mutable_data({2, 3}, cpu_place); + paddle::platform::float16* input1_ptr = + input1.mutable_data({2, 3}, cpu_place); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); - float16* input2_ptr = input2.mutable_data({4, 3}, cpu_place); + paddle::platform::float16* input2_ptr = + input2.mutable_data({4, 3}, cpu_place); fill_fp16_data(input2_ptr, input2.numel(), {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11}); - float16* input3_ptr = input3.mutable_data({2, 4}, cpu_place); + paddle::platform::float16* input3_ptr = + input3.mutable_data({2, 4}, cpu_place); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); - float16* a = input1_gpu.data(); - float16* b = input2_gpu.data(); - float16* c = input3_gpu.mutable_data(gpu_place); + paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu); + paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu); + paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu); + paddle::platform::float16* a = input1_gpu.data(); + paddle::platform::float16* b = input2_gpu.data(); + paddle::platform::float16* c = + input3_gpu.mutable_data(gpu_place); - paddle::operators::math::gemm( - context, false, true, m, n, k, float16(1), a, 3, b + 3, 3, float16(1), - c + 1, 4); + paddle::operators::math::gemm( + context, false, true, m, n, k, paddle::platform::float16(1), a, 3, b + 3, + 3, paddle::platform::float16(1), c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3); context.Wait(); EXPECT_EQ(static_cast(input3_ptr[0]), 0); @@ -412,24 +402,21 @@ TEST(math_function, gemm_trans_cublas_fp16) { template void GemvTest(int m, int n, bool trans) { - using namespace paddle::framework; - using namespace paddle::platform; - - Tensor mat_a; - Tensor vec_b; - Tensor vec_c; + paddle::framework::Tensor mat_a; + paddle::framework::Tensor vec_b; + paddle::framework::Tensor vec_c; - CPUPlace cpu_place; - CUDAPlace gpu_place(0); - CUDADeviceContext context(gpu_place); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CUDADeviceContext context(gpu_place); T* data_a = mat_a.mutable_data({m, n}, cpu_place); T* data_b = vec_b.mutable_data({trans ? m : n}, cpu_place); T* data_c = vec_c.mutable_data({trans ? n : m}, cpu_place); - Tensor g_mat_a; - Tensor g_vec_b; - Tensor g_vec_c; + paddle::framework::Tensor g_mat_a; + paddle::framework::Tensor g_vec_b; + paddle::framework::Tensor g_vec_c; T* g_data_a = g_mat_a.mutable_data(mat_a.dims(), gpu_place); T* g_data_b = g_vec_b.mutable_data(vec_b.dims(), gpu_place); T* g_data_c = g_vec_c.mutable_data(vec_c.dims(), gpu_place); @@ -441,14 +428,14 @@ void GemvTest(int m, int n, bool trans) { data_b[i] = static_cast(i); } - TensorCopy(mat_a, gpu_place, context, &g_mat_a); - TensorCopy(vec_b, gpu_place, context, &g_vec_b); + paddle::framework::TensorCopySync(mat_a, gpu_place, &g_mat_a); + paddle::framework::TensorCopySync(vec_b, gpu_place, &g_vec_b); - paddle::operators::math::gemv( + paddle::operators::math::gemv( context, trans, static_cast(m), static_cast(n), 1., g_data_a, g_data_b, 0., g_data_c); - TensorCopy(g_vec_c, cpu_place, context, &vec_c); + paddle::framework::TensorCopySync(g_vec_c, cpu_place, &vec_c); if (!trans) { for (int i = 0; i < m; ++i) { diff --git a/paddle/fluid/operators/math/matmul.h b/paddle/fluid/operators/math/matmul.h index 6e2d35cd0f3581c742197a66842696a8e3a936b1..0006c5062f3639da589eea44d47917d879933615 100644 --- a/paddle/fluid/operators/math/matmul.h +++ b/paddle/fluid/operators/math/matmul.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/operators/math/math_function.h" namespace paddle { diff --git a/paddle/fluid/operators/math/maxouting.cu b/paddle/fluid/operators/math/maxouting.cu index 1e1a6a221c71c9d9cb9fda468360cb502c5ea52f..d9a23299a4d5750fc8c7fe3e5d1f8cd94bcb9cae 100644 --- a/paddle/fluid/operators/math/maxouting.cu +++ b/paddle/fluid/operators/math/maxouting.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/maxouting.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/pooling.cu b/paddle/fluid/operators/math/pooling.cu index 274263c69c535249fceee11075c5948b1fc34358..267f8c409df301f9b1a8c68f337473198cf827f4 100644 --- a/paddle/fluid/operators/math/pooling.cu +++ b/paddle/fluid/operators/math/pooling.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/pooling.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sampler.cc b/paddle/fluid/operators/math/sampler.cc index 3ec6538d7fb0a902f468aa848c3064bd0260fabb..3066dc0ba284611af89c4927f45089a570ab88bc 100644 --- a/paddle/fluid/operators/math/sampler.cc +++ b/paddle/fluid/operators/math/sampler.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "sampler.h" +#include "paddle/fluid/operators/math/sampler.h" namespace paddle { namespace random { diff --git a/paddle/fluid/operators/math/sampler.h b/paddle/fluid/operators/math/sampler.h index 9d6a6c28c4304019d0347a30be605e1374c169ee..b82691f269c5d0f267ca98c78646efe9b26f0b34 100644 --- a/paddle/fluid/operators/math/sampler.h +++ b/paddle/fluid/operators/math/sampler.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include #include -typedef long int64; namespace paddle { namespace operators { namespace math { @@ -27,25 +27,25 @@ namespace math { */ class Sampler { public: - explicit Sampler(int64 range) : range_(range) { + explicit Sampler(int64_t range) : range_(range) { PADDLE_ENFORCE_GT(range, 0); std::random_device r; seed_ = r(); } - explicit Sampler(int64 range, unsigned int seed) + explicit Sampler(int64_t range, unsigned int seed) : range_(range), seed_(seed) { PADDLE_ENFORCE_GT(range, 0); } virtual ~Sampler(); // Sample a single value - virtual int64 Sample() const = 0; + virtual int64_t Sample() const = 0; // The probability that a single call to Sample() returns the given value. - virtual float Probability(int64 value) const = 0; + virtual float Probability(int64_t value) const = 0; - int64 range() { return range_; }; + int64 range() { return range_; } protected: - const int64 range_; + const int64_t range_; unsigned int seed_; }; @@ -56,15 +56,15 @@ class Sampler { */ class UniformSampler : public Sampler { public: - explicit UniformSampler(int64 range); + explicit UniformSampler(int64_t range); - explicit UniformSampler(int64 range, unsigned int seed); + explicit UniformSampler(int64_t range, unsigned int seed); ~UniformSampler() override {} int64 Sample() const override; - float Probability(int64 value) const override; + float Probability(int64_t value) const override; private: const float inv_range_; @@ -79,15 +79,15 @@ class UniformSampler : public Sampler { */ class LogUniformSampler : public Sampler { public: - explicit LogUniformSampler(int64 range); + explicit LogUniformSampler(int64_t range); - explicit LogUniformSampler(int64 range, unsigned int seed); + explicit LogUniformSampler(int64_t range, unsigned int seed); ~LogUniformSampler() override {} int64 Sample() const override; - float Probability(int64 value) const override; + float Probability(int64_t value) const override; private: const float log_range_; @@ -95,6 +95,6 @@ class LogUniformSampler : public Sampler { std::shared_ptr> dist_; }; -} // math +} // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/math/selected_rows_functor.cc b/paddle/fluid/operators/math/selected_rows_functor.cc index 5da3d15277cff8b413a116819b17f50061632b5d..a830dc5250a6aea7e622da4046b512d0c7c5d6f9 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cc +++ b/paddle/fluid/operators/math/selected_rows_functor.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" diff --git a/paddle/fluid/operators/math/selected_rows_functor.cu b/paddle/fluid/operators/math/selected_rows_functor.cu index 5d78fd9d213556204d56087128dc84fe6a91e97d..a92762c7fea865fad2c7784736cce93a8af21892 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cu +++ b/paddle/fluid/operators/math/selected_rows_functor.cu @@ -13,10 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/selected_rows_functor_test.cc b/paddle/fluid/operators/math/selected_rows_functor_test.cc index 679b6568ad09ce3a42a7ad4222310711e707d9a8..70bed820ee58885861fa8c5535c931f258625572 100644 --- a/paddle/fluid/operators/math/selected_rows_functor_test.cc +++ b/paddle/fluid/operators/math/selected_rows_functor_test.cc @@ -13,41 +13,50 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/selected_rows_functor.h" +#include #include "gtest/gtest.h" #include "paddle/fluid/operators/math/math_function.h" TEST(selected_rows_functor, cpu_add) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators::math; - - CPUPlace cpu_place; - CPUDeviceContext ctx(cpu_place); - SetConstant functor; + paddle::platform::CPUPlace cpu_place; + paddle::platform::CPUDeviceContext ctx(cpu_place); + paddle::operators::math::SetConstant + functor; int64_t height = 10; int64_t row_numel = 10; std::vector rows1{0, 4, 7}; - std::unique_ptr selected_rows1{new SelectedRows(rows1, height)}; + std::unique_ptr selected_rows1{ + new paddle::framework::SelectedRows(rows1, height)}; auto* in1_value = selected_rows1->mutable_value(); in1_value->mutable_data( - make_ddim({static_cast(rows1.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows1.size()), row_numel}), + cpu_place); functor(ctx, in1_value, 1.0); std::vector rows2{0, 5, 7, 9}; - std::unique_ptr selected_rows2{new SelectedRows(rows2, height)}; + std::unique_ptr selected_rows2{ + new paddle::framework::SelectedRows(rows2, height)}; auto* in2_value = selected_rows2->mutable_value(); in2_value->mutable_data( - make_ddim({static_cast(rows2.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows2.size()), row_numel}), + cpu_place); functor(ctx, in2_value, 2.0); - std::unique_ptr output{new SelectedRows()}; + std::unique_ptr output{ + new paddle::framework::SelectedRows()}; auto* out_value = output->mutable_value(); // simplely concat two SelectedRows - out_value->mutable_data(make_ddim({7, 10}), cpu_place); + out_value->mutable_data(paddle::framework::make_ddim({7, 10}), + cpu_place); - SelectedRowsAdd add_functor; + paddle::operators::math::SelectedRowsAdd + add_functor; add_functor(ctx, *selected_rows1, *selected_rows2, output.get()); auto out_height = output->height(); @@ -78,14 +87,20 @@ TEST(selected_rows_functor, cpu_add) { EXPECT_EQ(out_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_data[6 * row_numel + 9], 2.0); - std::unique_ptr tensor1{new Tensor()}; - tensor1->mutable_data(make_ddim({height, row_numel}), cpu_place); + std::unique_ptr tensor1{ + new paddle::framework::Tensor()}; + tensor1->mutable_data( + paddle::framework::make_ddim({height, row_numel}), cpu_place); functor(ctx, tensor1.get(), 3.0); - std::unique_ptr tensor2{new Tensor()}; - tensor2->mutable_data(make_ddim({height, row_numel}), cpu_place); + std::unique_ptr tensor2{ + new paddle::framework::Tensor()}; + tensor2->mutable_data( + paddle::framework::make_ddim({height, row_numel}), cpu_place); - SelectedRowsAddTensor add_tensor_functor; + paddle::operators::math::SelectedRowsAddTensor< + paddle::platform::CPUDeviceContext, float> + add_tensor_functor; add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); auto* tensor2_data = tensor2->data(); @@ -106,38 +121,46 @@ TEST(selected_rows_functor, cpu_add) { } TEST(selected_rows_functor, cpu_add_to) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators::math; - - CPUPlace cpu_place; - CPUDeviceContext ctx(cpu_place); - SetConstant functor; + paddle::platform::CPUPlace cpu_place; + paddle::platform::CPUDeviceContext ctx(cpu_place); + paddle::operators::math::SetConstant + functor; int64_t height = 10; int64_t row_numel = 10; std::vector rows1{0, 4, 7}; - std::unique_ptr selected_rows1{new SelectedRows(rows1, height)}; + std::unique_ptr selected_rows1{ + new paddle::framework::SelectedRows(rows1, height)}; auto* in1_value = selected_rows1->mutable_value(); in1_value->mutable_data( - make_ddim({static_cast(rows1.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows1.size()), row_numel}), + cpu_place); functor(ctx, in1_value, 1.0); std::vector rows2{0, 5, 7, 9}; - std::unique_ptr selected_rows2{new SelectedRows(rows2, height)}; + std::unique_ptr selected_rows2{ + new paddle::framework::SelectedRows(rows2, height)}; auto* in2_value = selected_rows2->mutable_value(); in2_value->mutable_data( - make_ddim({static_cast(rows2.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows2.size()), row_numel}), + cpu_place); functor(ctx, in2_value, 2.0); - std::unique_ptr output{new SelectedRows()}; + std::unique_ptr output{ + new paddle::framework::SelectedRows()}; output->set_height(height); auto* out_value = output->mutable_value(); // simplely concat two SelectedRows - out_value->mutable_data(make_ddim({7, 10}), cpu_place); + out_value->mutable_data(paddle::framework::make_ddim({7, 10}), + cpu_place); - SelectedRowsAddTo add_to_functor; + paddle::operators::math::SelectedRowsAddTo + add_to_functor; add_to_functor(ctx, *selected_rows1, 0, output.get()); add_to_functor(ctx, *selected_rows2, in1_value->numel(), output.get()); @@ -169,11 +192,15 @@ TEST(selected_rows_functor, cpu_add_to) { EXPECT_EQ(out_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_data[6 * row_numel + 9], 2.0); - std::unique_ptr tensor1{new Tensor()}; - tensor1->mutable_data(make_ddim({height, row_numel}), cpu_place); + std::unique_ptr tensor1{ + new paddle::framework::Tensor()}; + tensor1->mutable_data( + paddle::framework::make_ddim({height, row_numel}), cpu_place); functor(ctx, tensor1.get(), 3.0); - SelectedRowsAddToTensor add_to_tensor_functor; + paddle::operators::math::SelectedRowsAddToTensor< + paddle::platform::CPUDeviceContext, float> + add_to_tensor_functor; add_to_tensor_functor(ctx, *output, tensor1.get()); auto* tensor1_data = tensor1->data(); diff --git a/paddle/fluid/operators/math/selected_rows_functor_test.cu b/paddle/fluid/operators/math/selected_rows_functor_test.cu index 942d9b13fc1a8f578da779351be9ba9de7fcce33..e89b27855bdeba3a5189feff94eb063ddfb9b9b8 100644 --- a/paddle/fluid/operators/math/selected_rows_functor_test.cu +++ b/paddle/fluid/operators/math/selected_rows_functor_test.cu @@ -12,43 +12,52 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "gtest/gtest.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" TEST(selected_rows_functor, gpu_add) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators::math; - - CUDAPlace gpu_place(0); - CPUPlace cpu_place; - CUDADeviceContext ctx(gpu_place); - SetConstant functor; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDADeviceContext ctx(gpu_place); + paddle::operators::math::SetConstant + functor; int64_t height = 10; int64_t row_numel = 10; std::vector rows1{0, 4, 7}; - std::unique_ptr selected_rows1{new SelectedRows(rows1, height)}; + std::unique_ptr selected_rows1{ + new paddle::framework::SelectedRows(rows1, height)}; auto* in1_value = selected_rows1->mutable_value(); in1_value->mutable_data( - make_ddim({static_cast(rows1.size()), row_numel}), gpu_place); + paddle::framework::make_ddim( + {static_cast(rows1.size()), row_numel}), + gpu_place); functor(ctx, in1_value, 1.0); std::vector rows2{0, 5, 7, 9}; - std::unique_ptr selected_rows2{new SelectedRows(rows2, height)}; + std::unique_ptr selected_rows2{ + new paddle::framework::SelectedRows(rows2, height)}; auto* in2_value = selected_rows2->mutable_value(); in2_value->mutable_data( - make_ddim({static_cast(rows2.size()), row_numel}), gpu_place); + paddle::framework::make_ddim( + {static_cast(rows2.size()), row_numel}), + gpu_place); functor(ctx, in2_value, 2.0); - std::unique_ptr output{new SelectedRows()}; + std::unique_ptr output{ + new paddle::framework::SelectedRows()}; auto* out_value = output->mutable_value(); - // simplely concat two SelectedRows - out_value->mutable_data(make_ddim({7, 10}), gpu_place); + // simply concat two SelectedRows + out_value->mutable_data(paddle::framework::make_ddim({7, 10}), + gpu_place); - SelectedRowsAdd add_functor; + paddle::operators::math::SelectedRowsAdd + add_functor; add_functor(ctx, *selected_rows1, *selected_rows2, output.get()); auto out_height = output->height(); @@ -66,8 +75,8 @@ TEST(selected_rows_functor, gpu_add) { EXPECT_EQ(out_rows[5], 7); EXPECT_EQ(out_rows[6], 9); - Tensor out_cpu; - TensorCopy(*out_value, cpu_place, ctx, &out_cpu); + paddle::framework::Tensor out_cpu; + paddle::framework::TensorCopy(*out_value, cpu_place, ctx, &out_cpu); ctx.Wait(); auto* out_cpu_data = out_cpu.data(); @@ -83,18 +92,24 @@ TEST(selected_rows_functor, gpu_add) { EXPECT_EQ(out_cpu_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_cpu_data[6 * row_numel + 9], 2.0); - std::unique_ptr tensor1{new Tensor()}; - tensor1->mutable_data(make_ddim({height, row_numel}), gpu_place); + std::unique_ptr tensor1{ + new paddle::framework::Tensor()}; + tensor1->mutable_data( + paddle::framework::make_ddim({height, row_numel}), gpu_place); functor(ctx, tensor1.get(), 3.0); - std::unique_ptr tensor2{new Tensor()}; - tensor2->mutable_data(make_ddim({height, row_numel}), gpu_place); + std::unique_ptr tensor2{ + new paddle::framework::Tensor()}; + tensor2->mutable_data( + paddle::framework::make_ddim({height, row_numel}), gpu_place); - SelectedRowsAddTensor add_tensor_functor; + paddle::operators::math::SelectedRowsAddTensor< + paddle::platform::CUDADeviceContext, float> + add_tensor_functor; add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); - Tensor tensor2_cpu; - TensorCopy(*tensor2, cpu_place, ctx, &tensor2_cpu); + paddle::framework::Tensor tensor2_cpu; + paddle::framework::TensorCopy(*tensor2, cpu_place, ctx, &tensor2_cpu); ctx.Wait(); auto* tensor2_cpu_data = tensor2_cpu.data(); @@ -115,39 +130,47 @@ TEST(selected_rows_functor, gpu_add) { } TEST(selected_rows_functor, gpu_add_to) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators::math; - - CUDAPlace gpu_place(0); - CPUPlace cpu_place; - CUDADeviceContext ctx(gpu_place); - SetConstant functor; + paddle::platform::CUDAPlace gpu_place(0); + paddle::platform::CPUPlace cpu_place; + paddle::platform::CUDADeviceContext ctx(gpu_place); + paddle::operators::math::SetConstant + functor; int64_t height = 10; int64_t row_numel = 10; std::vector rows1{0, 4, 7}; - std::unique_ptr selected_rows1{new SelectedRows(rows1, height)}; + std::unique_ptr selected_rows1{ + new paddle::framework::SelectedRows(rows1, height)}; auto* in1_value = selected_rows1->mutable_value(); in1_value->mutable_data( - make_ddim({static_cast(rows1.size()), row_numel}), gpu_place); + paddle::framework::make_ddim( + {static_cast(rows1.size()), row_numel}), + gpu_place); functor(ctx, in1_value, 1.0); std::vector rows2{0, 5, 7, 9}; - std::unique_ptr selected_rows2{new SelectedRows(rows2, height)}; + std::unique_ptr selected_rows2{ + new paddle::framework::SelectedRows(rows2, height)}; auto* in2_value = selected_rows2->mutable_value(); in2_value->mutable_data( - make_ddim({static_cast(rows2.size()), row_numel}), gpu_place); + paddle::framework::make_ddim( + {static_cast(rows2.size()), row_numel}), + gpu_place); functor(ctx, in2_value, 2.0); - std::unique_ptr output{new SelectedRows()}; + std::unique_ptr output{ + new paddle::framework::SelectedRows()}; output->set_height(height); auto* out_value = output->mutable_value(); - // simplely concat two SelectedRows - out_value->mutable_data(make_ddim({7, 10}), gpu_place); + // simply concat two SelectedRows + out_value->mutable_data(paddle::framework::make_ddim({7, 10}), + gpu_place); - SelectedRowsAddTo add_to_functor; + paddle::operators::math::SelectedRowsAddTo< + paddle::platform::CUDADeviceContext, float> + add_to_functor; add_to_functor(ctx, *selected_rows1, 0, output.get()); add_to_functor(ctx, *selected_rows2, in1_value->numel(), output.get()); @@ -166,8 +189,8 @@ TEST(selected_rows_functor, gpu_add_to) { EXPECT_EQ(out_rows[5], 7); EXPECT_EQ(out_rows[6], 9); - Tensor out_cpu; - TensorCopy(*out_value, cpu_place, ctx, &out_cpu); + paddle::framework::Tensor out_cpu; + paddle::framework::TensorCopy(*out_value, cpu_place, ctx, &out_cpu); ctx.Wait(); auto* out_cpu_data = out_cpu.data(); @@ -183,15 +206,19 @@ TEST(selected_rows_functor, gpu_add_to) { EXPECT_EQ(out_cpu_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_cpu_data[6 * row_numel + 9], 2.0); - std::unique_ptr tensor1{new Tensor()}; - tensor1->mutable_data(make_ddim({height, row_numel}), gpu_place); + std::unique_ptr tensor1{ + new paddle::framework::Tensor()}; + tensor1->mutable_data( + paddle::framework::make_ddim({height, row_numel}), gpu_place); functor(ctx, tensor1.get(), 3.0); - SelectedRowsAddToTensor add_to_tensor_functor; + paddle::operators::math::SelectedRowsAddToTensor< + paddle::platform::CUDADeviceContext, float> + add_to_tensor_functor; add_to_tensor_functor(ctx, *output, tensor1.get()); - Tensor tensor1_cpu; - TensorCopy(*tensor1, cpu_place, ctx, &tensor1_cpu); + paddle::framework::Tensor tensor1_cpu; + paddle::framework::TensorCopy(*tensor1, cpu_place, ctx, &tensor1_cpu); ctx.Wait(); auto* tensor1_cpu_data = tensor1_cpu.data(); diff --git a/paddle/fluid/operators/math/sequence2batch.cc b/paddle/fluid/operators/math/sequence2batch.cc index 8899abff360ea867872d3433722cdb37ef358500..b546b8728217ed6013247555dcd5d7180ddeae74 100644 --- a/paddle/fluid/operators/math/sequence2batch.cc +++ b/paddle/fluid/operators/math/sequence2batch.cc @@ -23,11 +23,11 @@ class CopyMatrixRowsFunctor { public: void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& src, - framework::Vector index_lod, framework::Tensor& dst, + framework::Vector index_lod, framework::Tensor* dst, bool is_src_index) { size_t* index = index_lod.data(); auto src_dims = src.dims(); - auto dst_dims = dst.dims(); + auto dst_dims = dst->dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2UL, "The src must be matrix with rank 2."); PADDLE_ENFORCE_EQ(dst_dims.size(), 2UL, @@ -37,7 +37,7 @@ class CopyMatrixRowsFunctor { auto height = dst_dims[0]; auto width = dst_dims[1]; auto* src_data = src.data(); - auto* dst_data = dst.data(); + auto* dst_data = dst->data(); for (int i = 0; i < height; ++i) { if (is_src_index) { memcpy(dst_data + i * width, src_data + index[i] * width, diff --git a/paddle/fluid/operators/math/sequence2batch.cu b/paddle/fluid/operators/math/sequence2batch.cu index 3185f10d4180437ab5a3f78df8583613edd9ed43..be73adfc0cbe37ed8831b5ad34e66bc95e342e9d 100644 --- a/paddle/fluid/operators/math/sequence2batch.cu +++ b/paddle/fluid/operators/math/sequence2batch.cu @@ -43,10 +43,10 @@ class CopyMatrixRowsFunctor { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& src, - framework::Vector index_lod, framework::Tensor& dst, + framework::Vector index_lod, framework::Tensor* dst, bool is_src_index) { auto src_dims = src.dims(); - auto dst_dims = dst.dims(); + auto dst_dims = dst->dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2, "The src must be matrix with rank 2."); PADDLE_ENFORCE_EQ(dst_dims.size(), 2, @@ -56,7 +56,7 @@ class CopyMatrixRowsFunctor { auto height = dst_dims[0]; auto width = dst_dims[1]; auto* src_data = src.data(); - auto* dst_data = dst.data(); + auto* dst_data = dst->data(); dim3 threads(128, 8); dim3 grid(8, 1); diff --git a/paddle/fluid/operators/math/sequence2batch.h b/paddle/fluid/operators/math/sequence2batch.h index e78aafd37d1dda91a035f3ed850537e80f188cb2..0abda999a52bcbb94e6503692bd11aff26e849ba 100644 --- a/paddle/fluid/operators/math/sequence2batch.h +++ b/paddle/fluid/operators/math/sequence2batch.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor.h" @@ -35,7 +37,7 @@ class CopyMatrixRowsFunctor { // copy the input src to the indexed rows of output dst. // The indexed rows are based on the input index. void operator()(const DeviceContext& context, const framework::Tensor& src, - framework::Vector index_lod, framework::Tensor& dst, + framework::Vector index_lod, framework::Tensor* dst, bool is_src_index); }; @@ -58,10 +60,10 @@ class LoDTensor2BatchFunctor { public: void operator()(const DeviceContext& context, const framework::LoDTensor& lod_tensor, - framework::LoDTensor& batch, bool is_cal_batch_lod, + framework::LoDTensor* batch, bool is_cal_batch_lod, bool is_reverse = false) const { if (!is_cal_batch_lod) { - auto lods = batch.lod(); + auto lods = batch->lod(); PADDLE_ENFORCE_GT(lods.size(), 2UL); PADDLE_ENFORCE_EQ(lods[1].size(), static_cast(lod_tensor.dims()[0])); @@ -141,7 +143,7 @@ class LoDTensor2BatchFunctor { for (size_t i = 0; i < seq_info.size(); ++i) { seq_order[i] = seq_info[i].seq_idx; } - batch.set_lod(batch_lods); + batch->set_lod(batch_lods); CopyMatrixRowsFunctor to_batch; to_batch(context, lod_tensor, batch_lods[1], batch, true); @@ -153,11 +155,11 @@ class Batch2LoDTensorFunctor { public: void operator()(const DeviceContext& context, const framework::LoDTensor& batch, - framework::LoDTensor& lod_tensor) const { + framework::LoDTensor* lod_tensor) const { auto in_lod = batch.lod(); PADDLE_ENFORCE_GT(in_lod.size(), 2UL); PADDLE_ENFORCE_EQ(in_lod[1].size(), - static_cast(lod_tensor.dims()[0])); + static_cast(lod_tensor->dims()[0])); CopyMatrixRowsFunctor to_seq; to_seq(context, batch, in_lod[1], lod_tensor, false); } diff --git a/paddle/fluid/operators/math/sequence_padding_test.cc b/paddle/fluid/operators/math/sequence_padding_test.cc index bece46e75374cc38512d77c9d6b2fc6584e39db2..e3d62144856eb30a64007b54c16719cc028c70f2 100644 --- a/paddle/fluid/operators/math/sequence_padding_test.cc +++ b/paddle/fluid/operators/math/sequence_padding_test.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/sequence_padding.h" #include +#include template void TestSequencePadding(const paddle::framework::LoD& lod, @@ -75,7 +76,7 @@ void TestSequencePadding(const paddle::framework::LoD& lod, delete place; delete context; -}; +} TEST(Seq2BatchPadding, CPU) { paddle::framework::LoD lod1; diff --git a/paddle/fluid/operators/math/sequence_pooling.cc b/paddle/fluid/operators/math/sequence_pooling.cc index 5ae42ab973c81d3794fbbbe088e37ab02168c8dc..f25d3d3f1ee1f89d46b8e7c88ca68048f5203544 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cc +++ b/paddle/fluid/operators/math/sequence_pooling.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/sequence_pooling.h" +#include #include "paddle/fluid/operators/math/math_function.h" namespace paddle { diff --git a/paddle/fluid/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu index 1935364da37e9a9881651455d2da4ecef1b1e266..97c2e69fe5327956fc2828781fe3a37b88cc1b99 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cu +++ b/paddle/fluid/operators/math/sequence_pooling.cu @@ -12,9 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence_pooling.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence_pooling.h b/paddle/fluid/operators/math/sequence_pooling.h index 38e780222955644c14e5bbbf16dee720c7758f5c..8dcbee65d0b63a137e5f422ec8667cc950641b4a 100644 --- a/paddle/fluid/operators/math/sequence_pooling.h +++ b/paddle/fluid/operators/math/sequence_pooling.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/math/sequence_scale.cc b/paddle/fluid/operators/math/sequence_scale.cc index 2c46d4183b5ccd6db909e4142797f97a626c43d5..ee5b22ca855b4fa26e9626aadb84fa9b93b72952 100644 --- a/paddle/fluid/operators/math/sequence_scale.cc +++ b/paddle/fluid/operators/math/sequence_scale.cc @@ -21,15 +21,15 @@ namespace math { template class ScaleLoDTensorFunctor { public: - void operator()(const platform::CPUDeviceContext& context, - framework::LoDTensor& seq, const T* scales) { + void operator()(const platform::CPUDeviceContext& context, const T* scales, + framework::LoDTensor* seq) { const size_t level = 0; - auto lod = seq.lod(); + auto lod = seq->lod(); const size_t num_seq = lod[level].size() - 1; - size_t seq_width = seq.dims()[1]; + size_t seq_width = seq->dims()[1]; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); - T* seq_data = seq.mutable_data(context.GetPlace()); + T* seq_data = seq->mutable_data(context.GetPlace()); for (size_t i = 0; i < num_seq; ++i) { for (size_t j = lod[level][i] * seq_width; j < lod[level][i + 1] * seq_width; ++j) { diff --git a/paddle/fluid/operators/math/sequence_scale.cu b/paddle/fluid/operators/math/sequence_scale.cu index 74085153c62354771f6126b58746229b5564f2d0..079338c1d3dac6a9403c5871f3face9f1f8e77d2 100644 --- a/paddle/fluid/operators/math/sequence_scale.cu +++ b/paddle/fluid/operators/math/sequence_scale.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/sequence_scale.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { @@ -35,14 +35,14 @@ __global__ void SequenceScaleKernel(T* seq, size_t* lod, const T* scales, template class ScaleLoDTensorFunctor { public: - void operator()(const platform::CUDADeviceContext& context, - framework::LoDTensor& seq, const T* scales) { + void operator()(const platform::CUDADeviceContext& context, const T* scales, + framework::LoDTensor* seq) { const size_t level = 0; - auto lod = seq.lod(); + auto lod = seq->lod(); const size_t num_seq = lod[level].size() - 1; - const size_t seq_width = seq.numel() / seq.dims()[0]; + const size_t seq_width = seq->numel() / seq->dims()[0]; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); - T* seq_data = seq.mutable_data(context.GetPlace()); + T* seq_data = seq->mutable_data(context.GetPlace()); SequenceScaleKernel<<< num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>( diff --git a/paddle/fluid/operators/math/sequence_scale.h b/paddle/fluid/operators/math/sequence_scale.h index 6cdcbe21cbf82881679d90de470f342f75b3e2f3..202243985c125cd518a27477eb370bf1a325fe16 100644 --- a/paddle/fluid/operators/math/sequence_scale.h +++ b/paddle/fluid/operators/math/sequence_scale.h @@ -46,8 +46,8 @@ namespace math { template class ScaleLoDTensorFunctor { public: - void operator()(const DeviceContext& context, framework::LoDTensor& seq, - const T* scales); + void operator()(const DeviceContext& context, const T* scales, + framework::LoDTensor* seq); }; } // namespace math diff --git a/paddle/fluid/operators/math/unpooling.cu b/paddle/fluid/operators/math/unpooling.cu index 367f343d51712d38edbb7eb50b41433258cf8c9d..c467ae8427d8f461b332eed8075631ed7e47b96e 100644 --- a/paddle/fluid/operators/math/unpooling.cu +++ b/paddle/fluid/operators/math/unpooling.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/unpooling.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/vol2col.cc b/paddle/fluid/operators/math/vol2col.cc index 09e9f85cca349be1dc46a3f3a0b2d919485d6fa1..e92adc09ba01b032aba8eba94bcb4ba96524c641 100644 --- a/paddle/fluid/operators/math/vol2col.cc +++ b/paddle/fluid/operators/math/vol2col.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/vol2col.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/vol2col.cu b/paddle/fluid/operators/math/vol2col.cu index 619730d394d075d05a016e421fd75c4549015216..28e1a752e34cf0171785a0341d8f0d8d3712fc7b 100644 --- a/paddle/fluid/operators/math/vol2col.cu +++ b/paddle/fluid/operators/math/vol2col.cu @@ -12,8 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include "paddle/fluid/operators/math/vol2col.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/vol2col.h b/paddle/fluid/operators/math/vol2col.h index dbc2ed7a6939cfb5dca61082decb8960e5f9ebda..5f59de8f02a52209a3901ca03680eb2d0dbc2658 100644 --- a/paddle/fluid/operators/math/vol2col.h +++ b/paddle/fluid/operators/math/vol2col.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/math/vol2col_test.cc b/paddle/fluid/operators/math/vol2col_test.cc index eb91f862e39d9f158200f69fd48e7245dde47171..aa979c4f10907e604758c3e2cfb776cb994c9ceb 100644 --- a/paddle/fluid/operators/math/vol2col_test.cc +++ b/paddle/fluid/operators/math/vol2col_test.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/vol2col.h" #include #include +#include template void testVol2col() { @@ -71,7 +72,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - paddle::framework::TensorCopy(input_tmp, *place, *context, &input); + paddle::framework::TensorCopySync(input_tmp, *place, &input); } output.mutable_data({1, filter_size, filter_size, filter_size, output_depth, output_height, output_width}, @@ -85,7 +86,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output.data(); } else { - TensorCopy(output, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopySync(output, paddle::platform::CPUPlace(), &output_tmp); out_cfo_ptr = output_tmp.data(); } @@ -99,7 +100,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopySync(input_tmp, *place, &input); } paddle::operators::math::Col2VolFunctor col2vol; @@ -109,7 +110,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp); in_ptr = input_tmp.data(); } diff --git a/paddle/fluid/operators/nccl/nccl_gpu_common.h b/paddle/fluid/operators/nccl/nccl_gpu_common.h index 113f93e346681e568524f9fb6a0ab9a56de8569e..558ff4cc09603eebbcd95a234ff1aa63ada7fbb2 100644 --- a/paddle/fluid/operators/nccl/nccl_gpu_common.h +++ b/paddle/fluid/operators/nccl/nccl_gpu_common.h @@ -15,9 +15,9 @@ limitations under the License. */ #pragma once #include -#include +#include // NOLINT #include -#include +#include // NOLINT #include #include #include diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc index 20b8a5c98ab16ac8121cb2fd01deb8ecc1966d44..ef54d79fdf2becde98c68044d14bd4347773b975 100644 --- a/paddle/fluid/operators/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl_op_test.cu.cc @@ -228,10 +228,8 @@ TEST_F(NCCLTester, ncclReduceOp) { result_tensor->Resize(kDims); auto *ct = result_tensor->mutable_data(cpu_place); - paddle::memory::Copy( - cpu_place, ct, p::CUDAPlace(gpu_list_[kRoot]), rt, - recv_tensor.numel() * sizeof(float), - static_cast(dev_ctxs_[kRoot])->stream()); + paddle::memory::Copy(cpu_place, ct, p::CUDAPlace(gpu_list_[kRoot]), rt, + recv_tensor.numel() * sizeof(float), nullptr); for (int64_t j = 0; j < f::product(kDims); ++j) { ASSERT_NEAR(ct[j], expected_result, 1e-5); diff --git a/paddle/fluid/operators/one_hot_op.cu b/paddle/fluid/operators/one_hot_op.cu index 240ac895e2c8391322411d347384f4834995eb7c..625065692c1f32c89d9e566d00051e237ac9a3af 100644 --- a/paddle/fluid/operators/one_hot_op.cu +++ b/paddle/fluid/operators/one_hot_op.cu @@ -13,7 +13,7 @@ // limitations under the License. #include "paddle/fluid/operators/one_hot_op.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_info.h" namespace paddle { diff --git a/paddle/fluid/operators/reader/CMakeLists.txt b/paddle/fluid/operators/reader/CMakeLists.txt index 845528860f91d0b479bb3c4dbbe05e32c68dc16f..3106978eb0149b14849dfd1aaad8bbe76791f2f6 100644 --- a/paddle/fluid/operators/reader/CMakeLists.txt +++ b/paddle/fluid/operators/reader/CMakeLists.txt @@ -23,5 +23,7 @@ reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_o reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc) reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc) reader_library(create_threaded_reader_op SRCS create_threaded_reader_op.cc) + +cc_test(reader_blocking_queue_test SRCS reader_blocking_queue_test.cc) # Export local libraries to parent set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE) diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..71684b14176edc8f71efbefa9a7decffc8f3011e --- /dev/null +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -0,0 +1,112 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include // NOLINT +#include + +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace operators { +namespace reader { + +template +class BlockingQueue { + // BlockingQueue is for buffered reading and is supposed to use only the + // reader package. It is true that we could and we should have been using + // framework::Channel, but which has currently a deadlock bug. BlockingQueue + // is a workaround and a simplified version of framework::Channel as it + // doesn't support GPU and it implements on buffered blocking queue. + public: + explicit BlockingQueue(size_t capacity) + : capacity_(capacity), closed_(false) { + PADDLE_ENFORCE_GT( + capacity_, 0, + "The capacity of a reader::BlockingQueue must be greater than 0."); + } + + bool Send(const T& elem) { + std::unique_lock lock(mutex_); + send_cv_.wait(lock, [&] { return queue_.size() < capacity_ || closed_; }); + if (closed_) { + VLOG(5) + << "WARNING: Sending an element to a closed reader::BlokcingQueue."; + return false; + } + PADDLE_ENFORCE_LT(queue_.size(), capacity_); + queue_.push_back(elem); + receive_cv_.notify_one(); + return true; + } + + bool Send(T&& elem) { + std::unique_lock lock(mutex_); + send_cv_.wait(lock, [&] { return queue_.size() < capacity_ || closed_; }); + if (closed_) { + VLOG(5) + << "WARNING: Sending an element to a closed reader::BlokcingQueue."; + return false; + } + PADDLE_ENFORCE_LT(queue_.size(), capacity_); + queue_.emplace_back(std::move(elem)); + receive_cv_.notify_one(); + return true; + } + + bool Receive(T* elem) { + std::unique_lock lock(mutex_); + receive_cv_.wait(lock, [&] { return !queue_.empty() || closed_; }); + if (!queue_.empty()) { + PADDLE_ENFORCE_NOT_NULL(elem); + *elem = queue_.front(); + queue_.pop_front(); + send_cv_.notify_one(); + return true; + } else { + PADDLE_ENFORCE(closed_); + return false; + } + } + + void Close() { + std::lock_guard lock(mutex_); + closed_ = true; + send_cv_.notify_all(); + receive_cv_.notify_all(); + } + + bool IsClosed() { + std::lock_guard lock(mutex_); + return closed_; + } + + size_t Cap() { + std::lock_guard lock(mutex_); + return capacity_; + } + + private: + size_t capacity_; + bool closed_; + std::deque queue_; + + std::mutex mutex_; + std::condition_variable receive_cv_; + std::condition_variable send_cv_; +}; +} // namespace reader +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index 4372f23fc1dbd85e43b04a9d644977392316c2e9..e5efac461512a9a1869318d6547233589ca45a77 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -14,7 +14,7 @@ #include // NOLINT -#include "paddle/fluid/framework/channel.h" +#include "paddle/fluid/operators/reader/blocking_queue.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { @@ -23,13 +23,13 @@ namespace reader { // 'Double buffer' means we shall maintain two batches of input data at the same // time. So the kCacheSize shoul be at least 2. -static constexpr size_t kCacheSize = 2; +static constexpr size_t kCacheSize = 3; // There will be two bacthes out of the channel during training: // 1. the one waiting to be sent to the channel // 2. the one just be received from the channel, which is also being used by // subsequent operators. // So the channel size should be kChacheSize - 2 -static constexpr size_t kChannelSize = 0; // kCacheSize - 2 +static constexpr size_t kChannelSize = 1; // kCacheSize - 2 class DoubleBufferReader : public framework::DecoratedReader { public: @@ -55,10 +55,8 @@ class DoubleBufferReader : public framework::DecoratedReader { ~DoubleBufferReader() { EndPrefetcher(); } private: - bool HasNext() const; - void StartPrefetcher() { - channel_ = framework::MakeChannel(kChannelSize); + channel_ = new reader::BlockingQueue(kChannelSize); prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); } @@ -74,7 +72,7 @@ class DoubleBufferReader : public framework::DecoratedReader { void PrefetchThreadFunc(); std::thread prefetcher_; - framework::Channel* channel_; + reader::BlockingQueue* channel_; platform::Place place_; std::vector> cpu_tensor_cache_; std::vector> gpu_tensor_cache_; @@ -139,17 +137,16 @@ class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { }; void DoubleBufferReader::ReadNext(std::vector* out) { - out->clear(); - if (HasNext()) { - size_t cached_tensor_id; - channel_->Receive(&cached_tensor_id); + size_t cached_tensor_id; + if (channel_->Receive(&cached_tensor_id)) { if (platform::is_gpu_place(place_)) { *out = gpu_tensor_cache_[cached_tensor_id]; - ctxs_[cached_tensor_id]->Wait(); } else { // CPU place *out = cpu_tensor_cache_[cached_tensor_id]; } + } else { + out->clear(); } } @@ -159,12 +156,6 @@ void DoubleBufferReader::ReInit() { StartPrefetcher(); } -bool DoubleBufferReader::HasNext() const { - while (!channel_->IsClosed() && !channel_->CanReceive()) { - } - return channel_->CanReceive(); -} - void DoubleBufferReader::PrefetchThreadFunc() { VLOG(5) << "A new prefetch thread starts."; size_t cached_tensor_id = 0; @@ -177,18 +168,14 @@ void DoubleBufferReader::PrefetchThreadFunc() { } if (platform::is_gpu_place(place_)) { auto& gpu_batch = gpu_tensor_cache_[cached_tensor_id]; - auto* gpu_ctx = ctxs_[cached_tensor_id].get(); gpu_batch.resize(cpu_batch.size()); for (size_t i = 0; i < cpu_batch.size(); ++i) { - framework::TensorCopy(cpu_batch[i], place_, *gpu_ctx, &gpu_batch[i], - true); + // TODO(fengjiayi): Use asynchronous TensorCopy instead + framework::TensorCopySync(cpu_batch[i], place_, &gpu_batch[i]); gpu_batch[i].set_lod(cpu_batch[i].lod()); } } - try { - size_t tmp = cached_tensor_id; - channel_->Send(&tmp); - } catch (paddle::platform::EnforceNotMet e) { + if (!channel_->Send(cached_tensor_id)) { VLOG(5) << "WARNING: The double buffer channel has been closed. The " "prefetch thread will terminate."; break; diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index 779dc8a6a0deb7792e0540071e3a2588102fa708..91ad7d56583446ee4686e74187de166f387125df 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -14,7 +14,7 @@ #include // NOLINT -#include "paddle/fluid/framework/channel.h" +#include "paddle/fluid/operators/reader/blocking_queue.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { @@ -37,7 +37,6 @@ class MultiFileReader : public framework::ReaderBase { ~MultiFileReader() { EndScheduler(); } private: - bool HasNext(); void StartNewScheduler(); void EndScheduler(); void ScheduleThreadFunc(); @@ -48,15 +47,14 @@ class MultiFileReader : public framework::ReaderBase { std::thread scheduler_; std::vector prefetchers_; size_t buffer_size_; - framework::Channel* waiting_file_idx_; - framework::Channel* available_thread_idx_; - framework::Channel>* buffer_; + reader::BlockingQueue* waiting_file_idx_; + reader::BlockingQueue* available_thread_idx_; + reader::BlockingQueue>* buffer_; }; void MultiFileReader::ReadNext(std::vector* out) { - out->clear(); - if (HasNext()) { - buffer_->Receive(out); + if (!buffer_->Receive(out)) { + out->clear(); } } @@ -65,25 +63,19 @@ void MultiFileReader::ReInit() { StartNewScheduler(); } -bool MultiFileReader::HasNext() { - while (!buffer_->IsClosed() && !buffer_->CanReceive()) { - } - return buffer_->CanReceive(); -} - void MultiFileReader::StartNewScheduler() { size_t thread_num = prefetchers_.size(); - waiting_file_idx_ = framework::MakeChannel(file_names_.size()); - available_thread_idx_ = framework::MakeChannel(thread_num); - buffer_ = - framework::MakeChannel>(buffer_size_); + waiting_file_idx_ = new reader::BlockingQueue(file_names_.size()); + available_thread_idx_ = new reader::BlockingQueue(thread_num); + buffer_ = new reader::BlockingQueue>( + buffer_size_); for (size_t i = 0; i < file_names_.size(); ++i) { - waiting_file_idx_->Send(&i); + waiting_file_idx_->Send(i); } waiting_file_idx_->Close(); for (size_t i = 0; i < thread_num; ++i) { - available_thread_idx_->Send(&i); + available_thread_idx_->Send(i); } scheduler_ = std::thread([this] { ScheduleThreadFunc(); }); @@ -149,7 +141,7 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name, break; } try { - buffer_->Send(&ins); + buffer_->Send(std::move(ins)); } catch (paddle::platform::EnforceNotMet e) { VLOG(5) << "WARNING: The buffer channel has been closed. The prefetch " "thread of file '" @@ -158,9 +150,7 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name, } } - try { - available_thread_idx_->Send(&thread_idx); - } catch (paddle::platform::EnforceNotMet e) { + if (!available_thread_idx_->Send(thread_idx)) { VLOG(5) << "WARNING: The available_thread_idx_ channel has been closed. " "Fail to send thread_idx."; } diff --git a/paddle/fluid/operators/reader/reader_blocking_queue_test.cc b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..7d1b381d56c8cdc1e79e594b18c1a1ed59ab5284 --- /dev/null +++ b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc @@ -0,0 +1,219 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include // NOLINT +#include +#include // NOLINT +#include +#include "gtest/gtest.h" + +#include "paddle/fluid/operators/reader/blocking_queue.h" + +using paddle::operators::reader::BlockingQueue; + +TEST(BlockingQueue, CapacityTest) { + size_t cap = 10; + BlockingQueue q(cap); + EXPECT_EQ(q.Cap(), cap); +} + +void FirstInFirstOut(size_t queue_cap, size_t elem_num, size_t send_time_gap, + size_t receive_time_gap) { + BlockingQueue q(queue_cap); + std::thread sender([&]() { + for (size_t i = 0; i < elem_num; ++i) { + std::this_thread::sleep_for(std::chrono::milliseconds(send_time_gap)); + EXPECT_TRUE(q.Send(i)); + } + q.Close(); + }); + size_t count = 0; + while (true) { + std::this_thread::sleep_for(std::chrono::milliseconds(receive_time_gap)); + size_t elem; + if (!q.Receive(&elem)) { + break; + } + EXPECT_EQ(elem, count++); + } + sender.join(); + EXPECT_EQ(count, elem_num); + EXPECT_TRUE(q.IsClosed()); +} + +TEST(BlockingQueue, FirstInFirstOutTest) { + FirstInFirstOut(2, 5, 2, 50); + FirstInFirstOut(2, 5, 50, 2); + FirstInFirstOut(10, 3, 50, 2); + FirstInFirstOut(10, 3, 2, 50); +} + +TEST(BlockingQueue, SenderBlockingTest) { + const size_t queue_cap = 2; + BlockingQueue q(queue_cap); + size_t send_count = 0; + std::thread sender([&]() { + for (size_t i = 0; i < 5; ++i) { + if (!q.Send(i)) { + break; + } + ++send_count; + } + }); + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + q.Close(); + sender.join(); + EXPECT_EQ(send_count, queue_cap); + std::vector res; + while (true) { + size_t elem; + if (!q.Receive(&elem)) { + break; + } + res.push_back(elem); + } + EXPECT_EQ(res.size(), queue_cap); + for (size_t i = 0; i < res.size(); ++i) { + EXPECT_EQ(res[i], i); + } +} + +TEST(BlockingQueue, ReceiverBlockingTest) { + const size_t queue_cap = 5; + BlockingQueue q(queue_cap); + std::vector receive_res; + std::thread receiver([&]() { + size_t elem; + while (true) { + if (!q.Receive(&elem)) { + break; + } + receive_res.push_back(elem); + } + }); + std::vector to_send{2, 1, 7}; + for (auto e : to_send) { + q.Send(e); + } + q.Close(); + receiver.join(); + EXPECT_EQ(receive_res.size(), to_send.size()); + for (size_t i = 0; i < to_send.size(); ++i) { + EXPECT_EQ(receive_res[i], to_send[i]); + } +} + +void CheckIsUnorderedSame(const std::vector>& v1, + const std::vector>& v2) { + std::set s1; + std::set s2; + for (auto vec : v1) { + for (size_t elem : vec) { + s1.insert(elem); + } + } + for (auto vec : v2) { + for (size_t elem : vec) { + s2.insert(elem); + } + } + EXPECT_EQ(s1.size(), s2.size()); + auto it1 = s1.begin(); + auto it2 = s2.begin(); + while (it1 != s1.end()) { + EXPECT_EQ(*it1, *it2); + ++it1; + ++it2; + } +} + +void MultiSenderMultiReceiver(const size_t queue_cap, + const std::vector>& to_send, + size_t receiver_num, size_t send_time_gap, + size_t receive_time_gap) { + BlockingQueue q(queue_cap); + size_t sender_num = to_send.size(); + std::vector senders; + for (size_t s_idx = 0; s_idx < sender_num; ++s_idx) { + senders.emplace_back(std::thread([&, s_idx] { + for (size_t elem : to_send[s_idx]) { + std::this_thread::sleep_for(std::chrono::milliseconds(send_time_gap)); + EXPECT_TRUE(q.Send(elem)); + } + })); + } + std::vector receivers; + std::mutex mu; + std::vector> res; + for (size_t r_idx = 0; r_idx < receiver_num; ++r_idx) { + receivers.emplace_back(std::thread([&] { + std::vector receiver_res; + while (true) { + std::this_thread::sleep_for( + std::chrono::milliseconds(receive_time_gap)); + size_t elem; + if (!q.Receive(&elem)) { + break; + } + receiver_res.push_back(elem); + } + std::lock_guard lock(mu); + res.push_back(receiver_res); + })); + } + for (auto& t : senders) { + t.join(); + } + q.Close(); + for (auto& t : receivers) { + t.join(); + } + CheckIsUnorderedSame(to_send, res); +} + +TEST(BlockingQueue, MultiSenderMultiReaderTest) { + std::vector> to_send_1{{2, 3, 4}, {9}, {0, 7, 15, 6}}; + MultiSenderMultiReceiver(2, to_send_1, 2, 0, 0); + MultiSenderMultiReceiver(10, to_send_1, 2, 0, 0); + MultiSenderMultiReceiver(2, to_send_1, 20, 0, 0); + MultiSenderMultiReceiver(2, to_send_1, 2, 50, 0); + MultiSenderMultiReceiver(2, to_send_1, 2, 0, 50); + + std::vector> to_send_2{ + {2, 3, 4}, {}, {0, 7, 15, 6, 9, 32}}; + MultiSenderMultiReceiver(2, to_send_2, 3, 0, 0); + MultiSenderMultiReceiver(20, to_send_2, 3, 0, 0); + MultiSenderMultiReceiver(2, to_send_2, 30, 0, 0); + MultiSenderMultiReceiver(2, to_send_2, 3, 50, 0); + MultiSenderMultiReceiver(2, to_send_2, 3, 0, 50); +} + +struct MyClass { + MyClass() : val_(0) {} + explicit MyClass(int val) : val_(val) {} + MyClass(const MyClass& b) { val_ = b.val_; } + MyClass(MyClass&& b) { val_ = b.val_; } + void operator=(const MyClass& b) { val_ = b.val_; } + + int val_; +}; + +TEST(BlockingQueue, MyClassTest) { + BlockingQueue q(2); + MyClass a(200); + q.Send(std::move(a)); + MyClass b; + q.Receive(&b); + EXPECT_EQ(a.val_, b.val_); +} diff --git a/paddle/fluid/operators/reader/reader_op_registry.cc b/paddle/fluid/operators/reader/reader_op_registry.cc index fc8dc747ff0c2286f4516d8350f75d9887361924..3ff4536819b128d9c593b97f4942a0292a3b6b36 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.cc +++ b/paddle/fluid/operators/reader/reader_op_registry.cc @@ -12,7 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "reader_op_registry.h" +#include "paddle/fluid/operators/reader/reader_op_registry.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reader/reader_op_registry.h b/paddle/fluid/operators/reader/reader_op_registry.h index 929d32ad8b367865e33530f8517343c513ee9878..ec25f55ef5c3bb691b1213328b996c080656bb7b 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.h +++ b/paddle/fluid/operators/reader/reader_op_registry.h @@ -14,6 +14,8 @@ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/reader.h" diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index 8320c257c9ab15efec29eabe99eca5b6f74c9e31..ccd7063fe69e0f21b4d2a821bb70902b39c9b9de 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -93,8 +93,14 @@ class ReshapeOp : public framework::OperatorWithKernel { if (unk_dim_idx != -1) { output_shape[unk_dim_idx] = -in_size / capacity; - PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, - "Invalid shape is given."); + // in_size < 0 and is un-determinate in compile time, skip the check, + // for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8], + // capacity = -24, in_size = -8, output_shape[0] = 0 + // the following check will fail. + if (in_size > 0) { + PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, + "Invalid shape is given."); + } } else { PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given."); } @@ -124,10 +130,8 @@ class ReshapeKernel : public framework::OpKernel { auto *shape_data = shape_tensor->data(); framework::Tensor cpu_shape_tensor; if (platform::is_gpu_place(ctx.GetPlace())) { - TensorCopy(*shape_tensor, platform::CPUPlace(), ctx.device_context(), - &cpu_shape_tensor); + TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); shape_data = cpu_shape_tensor.data(); - ctx.device_context().Wait(); } auto shape = std::vector(shape_data, shape_data + shape_tensor->numel()); @@ -146,9 +150,7 @@ class ReshapeKernel : public framework::OpKernel { out->Resize(out_dims); if (!inplace) { out->mutable_data(ctx.GetPlace()); - framework::TensorCopy(*in, ctx.GetPlace(), ctx.device_context(), out); - ctx.device_context().Wait(); - // TensorCopy will resize to in_dims. + framework::TensorCopySync(*in, ctx.GetPlace(), out); out->Resize(out_dims); } else { out->ShareDataWith(*in); diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index 224ec93d28ec75c52848d7c8400e684df0d69209..397e49ef20ac45515a852f466d693f358ef5461b 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -18,8 +18,7 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; - -static constexpr int kROISize = 5; +using LoDTensor = framework::LoDTensor; class ROIPoolOp : public framework::OperatorWithKernel { public: @@ -40,11 +39,11 @@ class ROIPoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(input_dims.size() == 4, "The format of input tensor is NCHW."); PADDLE_ENFORCE(rois_dims.size() == 2, - "ROIs should be a 2-D tensor of shape (num_rois, 5)" - "given as [[batch_id, x1, y1, x2, y2], …]."); + "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], …]."); PADDLE_ENFORCE(rois_dims[1] == kROISize, - "ROIs should be a 2-D tensor of shape (num_rois, 5)" - "given as [[batch_id, x1, y1, x2, y2], …]."); + "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], …]."); int pooled_height = ctx->Attrs().Get("pooled_height"); int pooled_width = ctx->Attrs().Get("pooled_width"); @@ -109,10 +108,10 @@ class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { "H is the height of the feature, and " "W is the width of the feature."); AddInput("ROIs", - "(Tensor), " + "(LoDTensor), " "ROIs (Regions of Interest) to pool over. " - "should be a 2-D tensor of shape (num_rois, 5)" - "given as [[batch_id, x1, y1, x2, y2], …]. " + "should be a 2-D LoDTensor of shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], …]. " "Where batch_id is the id of the data, " "(x1, y1) is the top left coordinates, and " "(x2, y2) is the bottom right coordinates."); diff --git a/paddle/fluid/operators/roi_pool_op.cu b/paddle/fluid/operators/roi_pool_op.cu index 1931629d1340758edb6664a5e3ffdba126b33717..f905d690f984a20622c5fbcbcc813d888dfb19d9 100644 --- a/paddle/fluid/operators/roi_pool_op.cu +++ b/paddle/fluid/operators/roi_pool_op.cu @@ -13,16 +13,16 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/roi_pool_op.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; -static constexpr int kROISize = 5; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, @@ -30,13 +30,11 @@ static inline int NumBlocks(const int N) { } template -__global__ void GPUROIPoolForward(const int nthreads, const T* input_data, - const int64_t* input_rois, - const float spatial_scale, const int channels, - const int height, const int width, - const int pooled_height, - const int pooled_width, T* output_data, - int64_t* argmax_data) { +__global__ void GPUROIPoolForward( + const int nthreads, const T* input_data, const int64_t* input_rois, + const float spatial_scale, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + int* roi_batch_id_data, T* output_data, int64_t* argmax_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { @@ -46,11 +44,11 @@ __global__ void GPUROIPoolForward(const int nthreads, const T* input_data, int n = index / pooled_width / pooled_height / channels; const int64_t* offset_input_rois = input_rois + n * kROISize; - int roi_batch_ind = offset_input_rois[0]; - int roi_start_w = round(offset_input_rois[1] * spatial_scale); - int roi_start_h = round(offset_input_rois[2] * spatial_scale); - int roi_end_w = round(offset_input_rois[3] * spatial_scale); - int roi_end_h = round(offset_input_rois[4] * spatial_scale); + int roi_batch_ind = roi_batch_id_data[n]; + int roi_start_w = round(offset_input_rois[0] * spatial_scale); + int roi_start_h = round(offset_input_rois[1] * spatial_scale); + int roi_end_w = round(offset_input_rois[2] * spatial_scale); + int roi_end_h = round(offset_input_rois[3] * spatial_scale); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); @@ -93,7 +91,8 @@ __global__ void GPUROIPoolBackward( const int nthreads, const int64_t* input_rois, const T* output_grad, const int64_t* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, T* input_grad) { + const int pooled_height, const int pooled_width, int* roi_batch_id_data, + T* input_grad) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { @@ -102,8 +101,7 @@ __global__ void GPUROIPoolBackward( int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; - const int64_t* offset_input_rois = input_rois + n * kROISize; - int roi_batch_ind = offset_input_rois[0]; + int roi_batch_ind = roi_batch_id_data[n]; int input_offset = (roi_batch_ind * channels + c) * height * width; int output_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_output_grad = output_grad + output_offset; @@ -124,7 +122,7 @@ class GPUROIPoolOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* out = ctx.Output("Out"); auto* argmax = ctx.Output("Argmax"); @@ -133,23 +131,46 @@ class GPUROIPoolOpKernel : public framework::OpKernel { auto spatial_scale = ctx.Attr("spatial_scale"); auto in_dims = in->dims(); + int batch_size = in_dims[0]; auto in_stride = framework::stride(in_dims); int channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; - size_t rois_num = rois->dims()[0]; + int rois_num = rois->dims()[0]; if (rois_num == 0) return; int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(platform::CPUPlace()); + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + PADDLE_ENFORCE_EQ( + rois_batch_size, batch_size, + "The rois_batch_size and imgs batch_size must be the same."); + int rois_num_with_lod = rois_lod[rois_batch_size]; + PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, + "The rois_num from input and lod must be the same."); + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } + } + + framework::Tensor roi_batch_id_list_gpu; + framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(), + ctx.device_context(), &roi_batch_id_list_gpu); + GPUROIPoolForward< T><<>>( output_size, in->data(), rois->data(), spatial_scale, channels, height, width, pooled_height, pooled_width, - out->mutable_data(ctx.GetPlace()), + roi_batch_id_list_gpu.data(), out->mutable_data(ctx.GetPlace()), argmax->mutable_data(ctx.GetPlace())); } }; @@ -159,7 +180,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* argmax = ctx.Input("Argmax"); auto* out_grad = ctx.Input(framework::GradVarName("Out")); @@ -169,12 +190,27 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { auto pooled_width = ctx.Attr("pooled_width"); auto spatial_scale = ctx.Attr("spatial_scale"); - size_t rois_num = rois->dims()[0]; + int rois_num = rois->dims()[0]; int channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (x_grad) { + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(platform::CPUPlace()); + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } + } + framework::Tensor roi_batch_id_list_gpu; + framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(), + ctx.device_context(), &roi_batch_id_list_gpu); + x_grad->mutable_data(ctx.GetPlace()); math::SetConstant set_zero; set_zero(ctx.cuda_device_context(), x_grad, static_cast(0)); @@ -189,6 +225,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { output_grad_size, rois->data(), out_grad->data(), argmax->data(), rois_num, spatial_scale, channels, height, width, pooled_height, pooled_width, + roi_batch_id_list_gpu.data(), x_grad->mutable_data(ctx.GetPlace())); } } diff --git a/paddle/fluid/operators/roi_pool_op.h b/paddle/fluid/operators/roi_pool_op.h index 54e07490319cf1da749bd33449a7b51efd6c3d65..c4f739b2c6b2d62ebebcc15fd627ebad040e7b3f 100644 --- a/paddle/fluid/operators/roi_pool_op.h +++ b/paddle/fluid/operators/roi_pool_op.h @@ -21,12 +21,14 @@ limitations under the License. */ namespace paddle { namespace operators { +static constexpr int kROISize = 4; + template class CPUROIPoolOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* out = ctx.Output("Out"); auto* argmax = ctx.Output("Argmax"); @@ -47,24 +49,36 @@ class CPUROIPoolOpKernel : public framework::OpKernel { auto out_stride = framework::stride(out->dims()); const T* input_data = in->data(); - const int64_t* rois_data = rois->data(); - T* output_data = out->mutable_data(ctx.GetPlace()); - int64_t* argmax_data = argmax->mutable_data(ctx.GetPlace()); - for (int n = 0; n < rois_num; ++n) { - int roi_batch_id = rois_data[0]; - PADDLE_ENFORCE_GE(roi_batch_id, 0); - PADDLE_ENFORCE_LT(roi_batch_id, batch_size); - rois_data += roi_stride[0]; + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(ctx.GetPlace()); + + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + PADDLE_ENFORCE_EQ( + rois_batch_size, batch_size, + "The rois_batch_size and imgs batch_size must be the same."); + int rois_num_with_lod = rois_lod[rois_batch_size]; + PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, + "The rois_num from input and lod must be the same."); + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } } - rois_data = rois->data(); + T* output_data = out->mutable_data(ctx.GetPlace()); + int64_t* argmax_data = argmax->mutable_data(ctx.GetPlace()); + + const int64_t* rois_data = rois->data(); for (int n = 0; n < rois_num; ++n) { - int roi_batch_id = rois_data[0]; - int roi_start_w = round(rois_data[1] * spatial_scale); - int roi_start_h = round(rois_data[2] * spatial_scale); - int roi_end_w = round(rois_data[3] * spatial_scale); - int roi_end_h = round(rois_data[4] * spatial_scale); + int roi_batch_id = roi_batch_id_data[n]; + int roi_start_w = round(rois_data[0] * spatial_scale); + int roi_start_h = round(rois_data[1] * spatial_scale); + int roi_end_w = round(rois_data[2] * spatial_scale); + int roi_end_h = round(rois_data[3] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_height = std::max(roi_end_h - roi_start_h + 1, 1); @@ -133,7 +147,7 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* argmax = ctx.Input("Argmax"); auto* out_grad = ctx.Input(framework::GradVarName("Out")); @@ -143,6 +157,20 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel { auto pooled_width = ctx.Attr("pooled_width"); if (in_grad) { + int rois_num = rois->dims()[0]; + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(ctx.GetPlace()); + + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } + } + const int64_t* rois_data = rois->data(); const T* out_grad_data = out_grad->data(); const int64_t* argmax_data = argmax->data(); @@ -156,11 +184,10 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel { auto roi_stride = framework::stride(rois->dims()); auto out_stride = framework::stride(out_grad->dims()); - int rois_num = rois->dims()[0]; int channels = in->dims()[1]; for (int n = 0; n < rois_num; ++n) { - int roi_batch_idx = rois_data[0]; + int roi_batch_idx = roi_batch_id_data[n]; T* batch_grad_data = in_grad_data + roi_batch_idx * in_stride[0]; for (int c = 0; c < channels; ++c) { for (int ph = 0; ph < pooled_height; ++ph) { diff --git a/paddle/fluid/operators/row_conv_op.cu b/paddle/fluid/operators/row_conv_op.cu index 67083455a7579a4bbb6d9598a77b68a8375cf815..dd8e62aca47a3b34a3788a43cc0043a887af818f 100644 --- a/paddle/fluid/operators/row_conv_op.cu +++ b/paddle/fluid/operators/row_conv_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/row_conv_op.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { @@ -220,7 +220,7 @@ __global__ void RowConvGradFilterImproved(const T *in, const T *dout, for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. - val += __shfl_down(val, offset); + val += platform::__shfl_down_sync(0, val, offset); } __syncthreads(); @@ -276,7 +276,7 @@ __global__ void RowConvGradFilter(const T *in, const T *dout, int num_sequence, for (int offset = 16; offset > 0; offset = offset / 2) { // blockDim.x is 32. - val += __shfl_down(val, offset); + val += platform::__shfl_down_sync(0, val, offset); } __syncthreads(); diff --git a/paddle/fluid/operators/save_load_op_test.cc b/paddle/fluid/operators/save_load_op_test.cc index a7ba1e0ae1d22a22cf2943c9aaf0c394ef4ae326..74385ee47543e3f4887081c2225212996d3df3f1 100644 --- a/paddle/fluid/operators/save_load_op_test.cc +++ b/paddle/fluid/operators/save_load_op_test.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "gtest/gtest.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/float16.h" USE_NO_KERNEL_OP(save); USE_NO_KERNEL_OP(load); @@ -61,3 +62,35 @@ TEST(SaveLoadOp, CPU) { } } } + +TEST(SaveLoadFP16Op, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + auto var = scope.Var("test_var"); + auto tensor = var->GetMutable(); + tensor->Resize({3, 10}); + + float* expect = tensor->mutable_data(place); + for (int64_t i = 0; i < tensor->numel(); ++i) { + expect[i] = static_cast(paddle::platform::float16(i)); + } + + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string("tensor.save")}); + attrs.insert({"save_as_fp16", true}); + + auto save_op = paddle::framework::OpRegistry::CreateOp( + "save", {{"X", {"test_var"}}}, {}, attrs); + save_op->Run(scope, place); + + auto load_var = scope.Var("out_var"); + auto target = load_var->GetMutable(); + auto load_op = paddle::framework::OpRegistry::CreateOp( + "load", {}, {{"Out", {"out_var"}}}, attrs); + load_op->Run(scope, place); + paddle::platform::float16* actual = target->data(); + for (int64_t i = 0; i < tensor->numel(); ++i) { + EXPECT_EQ(expect[i], static_cast(actual[i])); + } +} diff --git a/paddle/fluid/operators/save_op.cc b/paddle/fluid/operators/save_op.cc index 4a715c4baab2da7b7af86ada22ee88a16b05a814..f45d07ed90d52d204e9a3a5c2efe6df6b27ebfe6 100644 --- a/paddle/fluid/operators/save_op.cc +++ b/paddle/fluid/operators/save_op.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" @@ -68,6 +69,7 @@ class SaveOp : public framework::OperatorBase { const platform::Place &place) const override { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); + auto save_as_fp16 = Attr("save_as_fp16"); if (FileExists(filename) && !overwrite) { PADDLE_THROW("%s is existed, cannot save to it when overwrite=false", @@ -96,7 +98,18 @@ class SaveOp : public framework::OperatorBase { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); - framework::SerializeToStream(fout, tensor, dev_ctx); + auto in_dtype = framework::ToDataType(tensor.type()); + auto out_dtype = save_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; + + if (in_dtype != out_dtype) { + auto in_kernel_type = framework::OpKernelType(in_dtype, place); + auto out_kernel_type = framework::OpKernelType(out_dtype, place); + framework::LoDTensor out; + framework::TransDataType(in_kernel_type, out_kernel_type, tensor, &out); + framework::SerializeToStream(fout, out, dev_ctx); + } else { + framework::SerializeToStream(fout, tensor, dev_ctx); + } } }; @@ -114,6 +127,12 @@ This operator will serialize and write a tensor variable to file on disk. "(boolean, default true)" "Overwrite the output file if exist") .SetDefault(true); + AddAttr("save_as_fp16", + "(boolean, default false)" + "If true, the tensor will be converted to float16 data " + "type and then saved. Otherwise, the tensor will be " + "directly saved without data type conversion.") + .SetDefault(false); AddAttr("file_path", "(string)" "The \"file_path\" where the variable will be saved.") diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index 82ff087d0a7a4b482aef842e618f593b17dca171..e4386b640a298cd216bb60104653f20c4a96e7dc 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -41,6 +41,8 @@ class SendOp : public framework::OperatorBase { std::vector endpoints = Attr>("endpoints"); + bool sync_mode = Attr("sync_mode"); + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); @@ -64,11 +66,13 @@ class SendOp : public framework::OperatorBase { } PADDLE_ENFORCE(rpc_client->Wait()); - for (auto& ep : endpoints) { - VLOG(3) << "batch barrier, ep: " << ep; - rpc_client->AsyncSendBatchBarrier(ep); + if (sync_mode) { + for (auto& ep : endpoints) { + VLOG(3) << "batch barrier, ep: " << ep; + rpc_client->AsyncSendBatchBarrier(ep); + } + PADDLE_ENFORCE(rpc_client->Wait()); } - PADDLE_ENFORCE(rpc_client->Wait()); if (outs.size() > 0) { for (size_t i = 0; i < outs.size(); i++) { @@ -112,6 +116,7 @@ This operator will send tensor to recv_op at the parameter server. "Server endpoints in the order of input " "variables for mapping") .SetDefault({}); + AddAttr("sync_mode", "work in sync_mode or not").SetDefault(true); } }; diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 81350fee38df058d1b63eb5a8cd0b770e0626ae4..d2e1f3cb2ff9c8254cd4815a0f8750966a6e161c 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -137,6 +137,8 @@ void StartServerNet(bool is_sparse) { attrs.insert({"GradList", std::vector({"x1"})}); attrs.insert({"OptimizeBlock", optimize_block}); attrs.insert({"PrefetchBlock", prefetch_block}); + attrs.insert({"grad_to_block_id", std::vector({""})}); + attrs.insert({"sync_mode", true}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); listen_and_serv_op->Run(scope, place); diff --git a/paddle/fluid/operators/sequence_erase_op.cu b/paddle/fluid/operators/sequence_erase_op.cu index fc9b91c351defb92246e0966b9993fd1e288aaac..3a58e47f1132cd1ac85584b2470e8c6cddcfb28a 100644 --- a/paddle/fluid/operators/sequence_erase_op.cu +++ b/paddle/fluid/operators/sequence_erase_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/operators/sequence_erase_op.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_expand_op.cu b/paddle/fluid/operators/sequence_expand_op.cu index c00765e5d59af068e5682b39ebace5f3d7a62250..550677b22694085059e914678a5361d914b455bc 100644 --- a/paddle/fluid/operators/sequence_expand_op.cu +++ b/paddle/fluid/operators/sequence_expand_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/sequence_expand_op.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sgd_op.cu b/paddle/fluid/operators/sgd_op.cu index 9d211541c0bf729393b8190edb18e101d5e07d1a..4722be7a666d3e8f3c25c9499f88ddda835f60e3 100644 --- a/paddle/fluid/operators/sgd_op.cu +++ b/paddle/fluid/operators/sgd_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/sgd_op.h" -#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h index afbfe69973830bde93ec0af8d1c844580a786663..85131d002595f7681e4bec4135e28fe49cf842fb 100644 --- a/paddle/fluid/operators/warpctc_op.h +++ b/paddle/fluid/operators/warpctc_op.h @@ -222,8 +222,8 @@ class WarpCTCGradKernel : public framework::OpKernel { const T* loss_grad_data = loss_grad->data(); math::ScaleLoDTensorFunctor()( - ctx.template device_context(), *logits_grad, - loss_grad_data); + ctx.template device_context(), loss_grad_data, + logits_grad); } }; diff --git a/paddle/fluid/platform/cuda_helper.h b/paddle/fluid/platform/cuda_primitives.h similarity index 85% rename from paddle/fluid/platform/cuda_helper.h rename to paddle/fluid/platform/cuda_primitives.h index 8758af0804ae08fec6fa64d7387f197f046ce20e..866ff30a8be7be124a72a8dc7e70ef4140ee716a 100644 --- a/paddle/fluid/platform/cuda_helper.h +++ b/paddle/fluid/platform/cuda_primitives.h @@ -66,5 +66,18 @@ CUDA_ATOMIC_WRAPPER(Add, double) { } #endif +// __shfl_down has been deprecated as of CUDA 9.0. +#if CUDA_VERSION < 9000 +template +__forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) { + return __shfl_down(val, delta); +} +#define CREATE_SHFL_MASK(mask, predicate) mask = 0u; +#else +#define FULL_WARP_MASK 0xFFFFFFFF +#define CREATE_SHFL_MASK(mask, predicate) \ + mask = __ballot_sync(FULL_WARP_MASK, (predicate)) +#endif + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h index 1ab55d6b9bf8fdbd14c9c2bd978e3e99dba3e73e..81acaff87d3c2025cf0d6185a1590b018bfbd83c 100644 --- a/paddle/fluid/platform/dynload/cublas.h +++ b/paddle/fluid/platform/dynload/cublas.h @@ -14,10 +14,12 @@ #pragma once +#include #include #include #include #include // NOLINT +#include #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -37,14 +39,14 @@ extern void *cublas_dso_handle; #ifdef PADDLE_USE_DSO #define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \ struct DynLoad__##__name { \ + using FUNC_TYPE = decltype(&::__name); \ template \ inline cublasStatus_t operator()(Args... args) { \ - typedef cublasStatus_t (*cublasFunc)(Args...); \ std::call_once(cublas_dso_flag, []() { \ cublas_dso_handle = paddle::platform::dynload::GetCublasDsoHandle(); \ }); \ void *p_##__name = dlsym(cublas_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ + return reinterpret_cast(p_##__name)(args...); \ } \ }; \ extern DynLoad__##__name __name @@ -71,8 +73,8 @@ extern void *cublas_dso_handle; __macro(cublasDgemm_v2); \ __macro(cublasHgemm); \ __macro(cublasSgemmEx); \ - __macro(cublasSgeam_v2); \ - __macro(cublasDgeam_v2); \ + __macro(cublasSgeam); \ + __macro(cublasDgeam); \ __macro(cublasCreate_v2); \ __macro(cublasDestroy_v2); \ __macro(cublasSetStream_v2); \ diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index 24475b62ca2825c45ff7edb39328dece3b822b25..34d83e395694f55eafca74d63ebf363169ab30e8 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -34,7 +34,7 @@ extern void EnforceCUDNNLoaded(const char* fn_name); struct DynLoad__##__name { \ template \ auto operator()(Args... args) -> decltype(__name(args...)) { \ - using cudnn_func = decltype(__name(args...)) (*)(Args...); \ + using cudnn_func = decltype(&::__name); \ std::call_once(cudnn_dso_flag, []() { \ cudnn_dso_handle = paddle::platform::dynload::GetCUDNNDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/cupti.h b/paddle/fluid/platform/dynload/cupti.h index d0d676b9d8ac462900b48246bec43166d04ef97b..e64de7c20fc9d145e51cfc4528e321b3c4ec86c8 100644 --- a/paddle/fluid/platform/dynload/cupti.h +++ b/paddle/fluid/platform/dynload/cupti.h @@ -41,7 +41,7 @@ extern void *cupti_dso_handle; struct DynLoad__##__name { \ template \ inline CUptiResult CUPTIAPI operator()(Args... args) { \ - typedef CUptiResult CUPTIAPI (*cuptiFunc)(Args...); \ + using cuptiFunc = decltype(&::__name); \ std::call_once(cupti_dso_flag, []() { \ cupti_dso_handle = paddle::platform::dynload::GetCUPTIDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/curand.h b/paddle/fluid/platform/dynload/curand.h index 4697fb6cd96770127206bdabeea77e43eb09d1f5..46ad4379d5f9572d415ef1d747077217ae29391e 100644 --- a/paddle/fluid/platform/dynload/curand.h +++ b/paddle/fluid/platform/dynload/curand.h @@ -30,7 +30,7 @@ extern void *curand_dso_handle; struct DynLoad__##__name { \ template \ curandStatus_t operator()(Args... args) { \ - typedef curandStatus_t (*curandFunc)(Args...); \ + using curandFunc = decltype(&::__name); \ std::call_once(curand_dso_flag, []() { \ curand_dso_handle = paddle::platform::dynload::GetCurandDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/nccl.h b/paddle/fluid/platform/dynload/nccl.h index c5a10a78a4f432b431680c089f255fea777277cb..37902ae20c5d9d64486232bbd468375c4a50a615 100644 --- a/paddle/fluid/platform/dynload/nccl.h +++ b/paddle/fluid/platform/dynload/nccl.h @@ -33,7 +33,7 @@ extern void* nccl_dso_handle; struct DynLoad__##__name { \ template \ auto operator()(Args... args) -> decltype(__name(args...)) { \ - using nccl_func = decltype(__name(args...)) (*)(Args...); \ + using nccl_func = decltype(&::__name); \ std::call_once(nccl_dso_flag, []() { \ nccl_dso_handle = paddle::platform::dynload::GetNCCLDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/warpctc.h b/paddle/fluid/platform/dynload/warpctc.h index 7fa468370463a51c486b80317f401612930bc72e..7c70649d21c547beb824576d4a8ecf6219a9bddf 100644 --- a/paddle/fluid/platform/dynload/warpctc.h +++ b/paddle/fluid/platform/dynload/warpctc.h @@ -36,7 +36,7 @@ extern void* warpctc_dso_handle; struct DynLoad__##__name { \ template \ auto operator()(Args... args) -> decltype(__name(args...)) { \ - using warpctcFunc = decltype(__name(args...)) (*)(Args...); \ + using warpctcFunc = decltype(&::__name); \ std::call_once(warpctc_dso_flag, []() { \ warpctc_dso_handle = paddle::platform::dynload::GetWarpCTCDsoHandle(); \ }); \ diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index b20b514fcdd0b41fefa0933bc2d22645e7d4b6d6..c925686f8382da1758fb7cdc048253290ef69513 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -502,11 +502,11 @@ All parameter, weight, gradient are variables in Paddle. const std::unordered_set &bcast_vars, const ProgramDesc &main_program, const std::string &loss_var_name, Scope *scope, std::vector &local_scopes, - bool allow_op_delay, bool customize_loss_grad) { - new (&self) ParallelExecutor(num_threads, use_event, places, - params, bcast_vars, main_program, - loss_var_name, scope, local_scopes, - allow_op_delay, customize_loss_grad); + bool allow_op_delay, bool use_default_grad_scale) { + new (&self) ParallelExecutor( + num_threads, use_event, places, params, bcast_vars, + main_program, loss_var_name, scope, local_scopes, + allow_op_delay, use_default_grad_scale); }) .def("bcast_params", &ParallelExecutor::BCastParamsToGPUs) // NOTE: even we return a vec* to Python use reference policy. diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 159d1d5f4e70033fabf93514bd63b38f83675bff..dcd711a33ff3a35fdd51d11f54a3343a0bb491c9 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -63,15 +63,9 @@ struct CastToPyBufferImpl { auto *dst_ptr = static_cast(dst_tensor.mutable_data( tensor.dims(), platform::CPUPlace())); - platform::DeviceContextPool &pool = - platform::DeviceContextPool::Instance(); - auto dev_ctx = static_cast( - pool.Get(tensor.place())); - - paddle::platform::GpuMemcpyAsync( - dst_ptr, src_ptr, sizeof(CUR_TYPE) * tensor.numel(), - cudaMemcpyDeviceToHost, dev_ctx->stream()); - dev_ctx->Wait(); + paddle::platform::GpuMemcpySync(dst_ptr, src_ptr, + sizeof(CUR_TYPE) * tensor.numel(), + cudaMemcpyDeviceToHost); #else PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); #endif @@ -184,17 +178,8 @@ void PyCUDATensorSetFromArray( self->Resize(framework::make_ddim(dims)); auto *dst = self->mutable_data(place); - - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto dev_ctx = - static_cast(pool.Get(place)); - paddle::platform::GpuMemcpyAsync(dst, array.data(), sizeof(T) * array.size(), - cudaMemcpyHostToDevice, dev_ctx->stream()); - // NOTE: For safety, here wait the copy complete. - // It because the CPU array.data() could be destroyed after this method. - // If we make this method async, it could be copied data from a memory buffer - // that has been freed. - dev_ctx->Wait(); + paddle::platform::GpuMemcpySync(dst, array.data(), sizeof(T) * array.size(), + cudaMemcpyHostToDevice); } template <> @@ -214,18 +199,9 @@ void PyCUDATensorSetFromArray( self->Resize(framework::make_ddim(dims)); auto *dst = self->mutable_data(place); - - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto dev_ctx = - static_cast(pool.Get(place)); - paddle::platform::GpuMemcpyAsync(dst, array.data(), - sizeof(uint16_t) * array.size(), - cudaMemcpyHostToDevice, dev_ctx->stream()); - // NOTE: For safety, here wait the copy complete. - // It because the CPU array.data() could be destroyed after this method. - // If we make this method async, it could be copied data from a memory buffer - // that has been freed. - dev_ctx->Wait(); + paddle::platform::GpuMemcpySync(dst, array.data(), + sizeof(uint16_t) * array.size(), + cudaMemcpyHostToDevice); } template diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 94628270228b9e7fd32405bdcb5e11c163ba4791..7e00bd38487902227c3b4521db20cdbe314059be 100755 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -155,7 +155,7 @@ EOF function gen_dockerfile() { # Set BASE_IMAGE according to env variables if [[ ${WITH_GPU} == "ON" ]]; then - BASE_IMAGE="nvidia/cuda:8.0-cudnn7-runtime-ubuntu16.04" + BASE_IMAGE="nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04" else BASE_IMAGE="ubuntu:16.04" fi diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 654c8272a18e5adb01e75be94985a80502ba2c8d..53455fd86041f87d4050eba89f37c3435fed1ae1 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -208,8 +208,8 @@ EOF --platform=android-$ANDROID_API \ --install-dir=$ANDROID_STANDALONE_TOOLCHAIN - BUILD_ROOT=${PADDLE_ROOT}/build - DEST_ROOT={PADDLE_ROOT}/install + BUILD_ROOT=${PADDLE_ROOT}/build_android + DEST_ROOT=${PADDLE_ROOT}/install_android mkdir -p $BUILD_ROOT cd $BUILD_ROOT @@ -349,13 +349,18 @@ function gen_docs() { ======================================== EOF cmake .. \ + -DCMAKE_BUILD_TYPE=Release \ -DWITH_DOC=ON \ -DWITH_GPU=OFF \ - -DWITH_AVX=${WITH_AVX:-ON} \ - -DWITH_SWIG_PY=ON \ + -DWITH_MKL=OFF \ -DWITH_STYLE_CHECK=OFF make -j `nproc` paddle_docs paddle_apis + + # check websites for broken links + linkchecker doc/v2/en/html/index.html + linkchecker doc/v2/cn/html/index.html + linkchecker doc/v2/api/en/html/index.html } function gen_html() { diff --git a/paddle/scripts/paddle_docker_build.sh b/paddle/scripts/paddle_docker_build.sh index 53df94980fdd3c9fdaa4cf077880a8f7737bbd8a..311eb576014a741c67c0c6e62740b954a4a02462 100755 --- a/paddle/scripts/paddle_docker_build.sh +++ b/paddle/scripts/paddle_docker_build.sh @@ -28,11 +28,16 @@ function start_build_docker() { docker rm -f "${CONTAINER_ID}" 1>/dev/null fi + apt_mirror='s#http://archive.ubuntu.com/ubuntu#mirror://mirrors.ubuntu.com/mirrors.txt#g' DOCKER_ENV=$(cat < - -#include -#include +#include // NOLINT +#include // NOLINT #include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/Locks.h" @@ -39,14 +37,10 @@ void testNormalImpl( threads.reserve(FLAGS_test_thread_num); for (int32_t i = 0; i < FLAGS_test_thread_num; ++i) { - threads.emplace_back(new std::thread([&tracer, - &countDown, - &layerSize, - &startBarrier, - &doneBarrier, - &callback] { - callback(tracer, countDown, layerSize, startBarrier, doneBarrier); - })); + threads.emplace_back( + new std::thread([&tracer, &startBarrier, &doneBarrier, &callback] { + callback(tracer, countDown, layerSize, startBarrier, doneBarrier); + })); } size_t cntDown = countDown; while (cntDown-- > 0) { diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index e2502990d5b78eb0db7bdfd0c8ef9fb6688016df..1e6482e3c18a6fe8289ca301c684cb9834cacb21 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -20,6 +20,16 @@ from framework import * import executor from executor import * +import trainer +from trainer import Trainer +from trainer import Event + +import inferencer +from inferencer import Inferencer + +import params +from params import Params + import io import evaluator import initializer @@ -47,7 +57,8 @@ from parallel_executor import ParallelExecutor Tensor = LoDTensor -__all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ +__all__ = framework.__all__ + executor.__all__ + concurrency.__all__ +\ + trainer.__all__ + inferencer.__all__ + params.__all__ + [ 'io', 'initializer', 'layers', @@ -111,7 +122,9 @@ def __bootstrap__(): 'eager_delete_scope' ] if core.is_compiled_with_cuda(): - read_env_flags += ['fraction_of_gpu_memory_to_use'] + read_env_flags += [ + 'fraction_of_gpu_memory_to_use', 'cudnn_algo_use_autotune' + ] core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) core.init_glog(sys.argv[0]) diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index d07e0f696e79cfb98efc09a9f40d7961678b6af4..44542749f895ce52ded95302ef8ea38891715d97 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -143,7 +143,8 @@ class DistributeTranspiler: program=None, pservers="127.0.0.1:6174", trainers=1, - split_method=splitter.round_robin): + split_method=splitter.round_robin, + sync_mode=True): """ Transpile the program to distributed data-parallelism programs. The main_program will be transformed to use a remote parameter server @@ -184,6 +185,9 @@ class DistributeTranspiler: :param split_method: A function to determin how to split variables to different servers equally. :type split_method: function + :type sync_mode: boolean default True + :param sync_mode: if sync_mode is set True, it means that dist transpiler + will transpile the program into sync_mode pserver and trainer program. """ assert (callable(split_method)) if program is None: @@ -191,6 +195,7 @@ class DistributeTranspiler: self.origin_program = program self.trainer_num = trainers self.optimize_ops = optimize_ops + self.sync_mode = sync_mode # TODO(typhoonzero): currently trainer_id is fetched from cluster system # like Kubernetes, we should port this to use etcd later when developing # fluid distributed training with fault-tolerance. @@ -295,8 +300,11 @@ class DistributeTranspiler: inputs={"X": send_inputs}, outputs={"Out": send_outputs, "RPCClient": rpc_client_var}, - attrs={"endpoints": pserver_endpoints, - "epmap": eplist}) + attrs={ + "endpoints": pserver_endpoints, + "epmap": eplist, + "sync_mode": self.sync_mode + }) # step4: Concat the parameters splits together after recv. for varname, splited_var in param_var_mapping.iteritems(): if len(splited_var) <= 1: @@ -356,7 +364,7 @@ class DistributeTranspiler: type=v.type, dtype=v.dtype, shape=v.shape) - if self.trainer_num > 1: + if self.sync_mode and self.trainer_num > 1: for trainer_id in xrange(self.trainer_num): var = pserver_program.global_block().create_var( name="%s.trainer_%d" % (orig_var_name, trainer_id), @@ -402,13 +410,13 @@ class DistributeTranspiler: for op in self.optimize_ops: if op.type == "scale": for in_name in op.input_arg_names: - if in_name.startswith("beta1_pow_acc") or\ - in_name.startswith("beta2_pow_acc"): + if in_name.startswith("beta1_pow_acc") or \ + in_name.startswith("beta2_pow_acc"): global_ops.append(op) - def __append_optimize_op__(op, block): + def __append_optimize_op__(op, block, grad_to_block_id): if self._is_opt_op(op): - self._append_pserver_ops(block, op, endpoint, + self._append_pserver_ops(block, op, endpoint, grad_to_block_id, default_main_program()) else: self._append_pserver_non_opt_ops(block, op) @@ -422,21 +430,22 @@ class DistributeTranspiler: self._append_pserver_non_opt_ops(lr_decay_block, op) # append op to the current block + grad_to_block_id = [] pre_block_idx = pserver_program.num_blocks - 1 for idx, opt_op in enumerate(opt_op_on_pserver): per_opt_block = pserver_program.create_block(pre_block_idx) for _, op in enumerate(self.optimize_ops): # optimizer is connected to itself if ufind.is_connected(op, opt_op) and op not in global_ops: - __append_optimize_op__(op, per_opt_block) + __append_optimize_op__(op, per_opt_block, grad_to_block_id) # append global ops - opt_state_block = None if global_ops: opt_state_block = pserver_program.create_block( pserver_program.num_blocks - 1) for glb_op in global_ops: - __append_optimize_op__(glb_op, opt_state_block) + __append_optimize_op__(glb_op, opt_state_block, + grad_to_block_id) # NOT USED: single block version: # @@ -472,7 +481,9 @@ class DistributeTranspiler: "OptimizeBlock": pserver_program.block(1), "endpoint": endpoint, "Fanin": self.trainer_num, - "PrefetchBlock": prefetch_block + "PrefetchBlock": prefetch_block, + "sync_mode": self.sync_mode, + "grad_to_block_id": grad_to_block_id }) pserver_program.sync_with_cpp() @@ -683,17 +694,6 @@ class DistributeTranspiler: self.table_name)], persistable=False) - # create grad vars in pserver program - table_grad_var = self.table_param_grad[1] - table_grad_list = [ - pserver_program.global_block().create_var( - name="%s.trainer_%d.pserver_%d" % - (table_grad_var.name, index, pserver_index), - type=table_grad_var.type, - shape=table_grad_var.shape, - dtype=table_grad_var.dtype) for index in range(self.trainer_num) - ] - # create table optimize block in pserver program table_opt_op = [ op for op in self.optimize_ops @@ -703,11 +703,24 @@ class DistributeTranspiler: # only support sgd now assert table_opt_op.type == "sgd" - # append sum op for table_grad_list - table_opt_block.append_op( - type="sum", - inputs={"X": table_grad_list}, - outputs={"Out": [grad_var]}) + if self.sync_mode: + # create grad vars in pserver program + table_grad_var = self.table_param_grad[1] + table_grad_list = [ + pserver_program.global_block().create_var( + name="%s.trainer_%d.pserver_%d" % + (table_grad_var.name, index, pserver_index), + type=table_grad_var.type, + shape=table_grad_var.shape, + dtype=table_grad_var.dtype) + for index in range(self.trainer_num) + ] + + # append sum op for table_grad_list + table_opt_block.append_op( + type="sum", + inputs={"X": table_grad_list}, + outputs={"Out": [grad_var]}) lr_var = pserver_program.global_block().vars[table_opt_op.input( "LearningRate")[0]] @@ -746,7 +759,7 @@ class DistributeTranspiler: for varname, splited in block_map.iteritems(): orig_var = program.global_block().var(varname) if len(splited) == 1: - if add_trainer_suffix: + if self.sync_mode and add_trainer_suffix: new_var_name = "%s.trainer_%d" % \ (orig_var.name, self.trainer_id) program.global_block().rename_var(varname, new_var_name) @@ -770,7 +783,7 @@ class DistributeTranspiler: if len(orig_shape) >= 2: splited_shape.extend(orig_shape[1:]) new_var_name = "" - if add_trainer_suffix: + if self.sync_mode and add_trainer_suffix: new_var_name = "%s.block%d.trainer_%d" % \ (varname, i, self.trainer_id) else: @@ -879,7 +892,7 @@ class DistributeTranspiler: return orig_var_name def _append_pserver_ops(self, optimize_block, opt_op, endpoint, - origin_program): + grad_to_block_id, origin_program): program = optimize_block.program pserver_block = program.global_block() new_inputs = dict() @@ -900,7 +913,9 @@ class DistributeTranspiler: return merged_var = \ pserver_block.vars[self._orig_varname(grad_block.name)] - if self.trainer_num > 1: + grad_to_block_id.append(merged_var.name + ":" + str( + optimize_block.idx)) + if self.sync_mode and self.trainer_num > 1: vars2merge = [] for i in xrange(self.trainer_num): per_trainer_name = "%s.trainer_%d" % \ @@ -918,6 +933,7 @@ class DistributeTranspiler: inputs={"X": merged_var}, outputs={"Out": merged_var}, attrs={"scale": 1.0 / float(self.trainer_num)}) + new_inputs[key] = merged_var elif key == "Param": # param is already created on global program diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 340882ea9e7b0e2a0c52749c771308c6b860ed07..2cdf010926b415665986214100256d6eea086d83 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -658,10 +658,10 @@ class Operator(object): class Block(object): def __init__(self, program, idx): self.desc = program.desc.block(idx) - self.vars = dict() # var_name --> var + self.vars = collections.OrderedDict() # var_name --> var self.ops = list() # operator list self.program = program - self.removed_vars = dict() + self.removed_vars = collections.OrderedDict() def __str__(self): return self.to_string(True) @@ -1070,16 +1070,25 @@ class Program(object): for t in targets: if not isinstance(t, Operator): if isinstance(t, Variable): - if t.op is None: - global_block = self.global_block() - for op in global_block.ops: - if t.name in op.output_arg_names: - t.op = op - break + # After transpiler processing, the op that output this + # variable maybe has been changed, so t.op is not reliable + # and we need to find the current op that generate this + # variable here. + t.op = None + global_block = self.global_block() + for idx, op in enumerate(global_block.ops): + if t.name in op.output_arg_names: + t.op = op + break + t = t.op + if t is None: + raise ValueError( + "The target variable must have an " + "associated operator that generates it.") else: - raise ValueError(("All targets of prune() can only be " - "Variable or Operator.")) + raise ValueError("All targets of prune() can only be " + "Variable or Operator.") targets_idx.append([t.block.idx, t.idx]) res = Program() diff --git a/python/paddle/fluid/inference_transpiler.py b/python/paddle/fluid/inference_transpiler.py index 39b01610f96018e1775405a30147e77006cecc16..f4ad717b9e72e281940fab0cfd06296306b587fc 100644 --- a/python/paddle/fluid/inference_transpiler.py +++ b/python/paddle/fluid/inference_transpiler.py @@ -121,7 +121,60 @@ class InferenceTranspiler: # And a better solution will be considered later. program = program.clone() + def float16_transpile(self, program, place, scope=None): + ''' + Transpile the program desc and cast the weights to float16 data type to + enable float16 inference. + + Since the operator in a program desc will automatically choose the + right compute kernel to run based on the data type of the input tensor. + We actually don't need to change the program desc to run in float16 mode. + + However, in this way, users who are used to feeding and fetching tensors + of float32 data type when running typical inference may find it confusing + and difficult to run inference in float16 mode as they need to convert + input data to float16 dtype and then convert the results back to float32 + dtype to match the rest of code. + + So this function appends cast ops to the program desc where necessary so + that users are able to run inference in float16 mode while providing input + tensor (feed_holder) of float data type and obtaining output tensor + (fetch_holder) of float data type. + + Moreover, it is desired that when we have the scope and program desc to run + inference in float32 mode, we can use a single API to do the necessary + modification and then user can run float16 inference on the fly. To make + this happen, this function also create new parameters in the scope to have the + converted float16 weights and change the operators in program desc to use + these new parameters. + + :param program: program to transpile + :type program: Program + :param place: inference place + :type place: Place + :param scope: inference scope + :type scope: Scope + ''' + if scope is None: + scope = global_scope() + + self.scope = scope + self.place = place + self.block = program.block(0) + self.input_map = {} # store the input names should be adjusted + + self._modify_feed_fetch() + self._convert_param_to_float16() + self._adjust_input(skip=True) + self._remove_unused_var() + + # TODO(luotao): use clone() method to flush the program.desc in force, + # since some large program.desc will not be flushed immediately. + # And a better solution will be considered later. + program = program.clone() + # ====================== private transpiler functions ===================== + def _insert_bias_op(self, index, current_op, bn_op): ''' Construct elementwise_add operator for adding bias @@ -216,9 +269,27 @@ class InferenceTranspiler: # collect the renamed input self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0] - def _adjust_input(self): + def _adjust_input(self, skip=False): + ''' + Change the input variable name in operators. + + When we are in the process of modifying a program desc, we usually + replace some variables with some other variables, where we create + a dictionary input_map to record the one-to-one correspondence + between each old variable and the new one. + + After that, this function will search all the operators that use the + old variables and change the info in op to use the new variables. There + maybe some exceptions to this rule when we are using the float16 transpiler + and insert cast ops to cast float32 variable to float16 one. After we + insert the cast op to cast var_1 to var_1_fp16, we don't want to change + the input of cast op to var_1_fp16 after using this function. + ''' + skip_ops = {"cast"} for i in range(len(self.block.ops)): current_op = self.block.ops[i] + if skip and current_op.type in skip_ops: + continue for input_arg in current_op.input_arg_names: if input_arg in self.input_map: current_op.rename_input(input_arg, @@ -238,3 +309,138 @@ class InferenceTranspiler: for var in self.block.vars.keys(): if var not in args: self.block.remove_var(var) + + def _modify_feed_fetch(self): + ''' + Modify feed fetch op/vars for float16 inference. + + For each feed op: + feed_op->feed_target_var + + Change it to: + feed_op->feed_target_var->cast_op(from other dtype to float16)->tmp_var + + For each fetch op: + fetch_target_var->fetch_op + + Change it to: + tmp_var->cast_op(from float16 to other dtype)->fetch_target_var->fetch_op + + :return: None + ''' + + def find_op(var): + # It is possible that var.op is not up to date after some + # modifications to program desc. Here we force to make it up to date. + var.op = None + for op in self.block.ops: + if var.name in op.output_arg_names: + var.op = op + break + + if var.op is None: + raise ValueError("The target variable must have an " + "associated operator that generates it.") + + i = 0 + while i < len(self.block.ops): + cur_op = self.block.ops[i] + if cur_op.type == "feed": + var_name = cur_op.output("Out")[0] + tmp_var_name = var_name + ".fp16" + var = self.block.vars[var_name] + tmp_var = self.block.create_var( + name=tmp_var_name.encode('ascii'), + type=var.type, + dtype=core.VarDesc.VarType.FP16, + shape=var.shape, + persistable=var.persistable) + self.block.insert_op( + i + 1, + type="cast", + inputs={"X": var}, + outputs={"Out": tmp_var}, + attrs={ + 'in_dtype': int(var.dtype), + 'out_dtype': int(tmp_var.dtype) + }) + self.input_map[var_name] = tmp_var_name + i = i + 1 + elif cur_op.type == "fetch": + var_name = cur_op.input("X")[0] + tmp_var_name = var_name + ".fp16" + var = self.block.vars[var_name] + tmp_var = self.block.create_var( + name=tmp_var_name.encode('ascii'), + type=var.type, + dtype=core.VarDesc.VarType.FP16, + shape=var.shape, + persistable=var.persistable) + find_op(var) + var.op.rename_output(var_name, tmp_var_name) + self.block.insert_op( + i, + type="cast", + inputs={"X": tmp_var}, + outputs={"Out": var}, + attrs={ + 'in_dtype': int(tmp_var.dtype), + 'out_dtype': int(var.dtype) + }) + i = i + 1 + i = i + 1 + + def _convert_param_to_float16(self): + def _get_no_fp16_conversion_var_names(): + ''' + Get the set of input variable names that shouldn't be converted to float16. + + When we want to run inference in float16 mode, most parameters need to be + firstly converted to float16. However, there are some parameters that + shouldn't be converted to float16 because the corresponding operator + requires float32 parameters even in float16 mode (when the input data is + of float16 data type). Currently, the only operator that has this exclusion + is the batch norm op. + + :return: set of input variable names + :type var_names: set + ''' + op_names = {'batch_norm'} + var_names = [] + for op in self.block.ops: + if op.type in op_names: + var_names += op.input_arg_names + return set(var_names) + + def _should_be_converted(var): + return var.persistable and \ + var.name not in self.no_conversion_vars and \ + var.type != core.VarDesc.VarType.FEED_MINIBATCH and \ + var.type != core.VarDesc.VarType.FETCH_LIST + + self.no_conversion_vars = _get_no_fp16_conversion_var_names() + conversion_var_list = filter(_should_be_converted, + self.block.vars.values()) + for var in conversion_var_list: + fp16_var_name = var.name + ".fp16" + fp16_var = self.block.create_parameter( + name=fp16_var_name.encode('ascii'), + type=var.type, + dtype=core.VarDesc.VarType.FP16, + shape=var.shape) + + # cast the data in the tensor of the original var to float16 + # data type and store it in the tensor of the new float16 var + self.scope.var(fp16_var_name) + fp16_tensor = self.scope.find_var(fp16_var_name).get_tensor() + tensor = np.array(self.scope.find_var(var.name).get_tensor()) + # After the old tensor data is converted to np.float16, view(np.uint16) + # is used so that the internal memory of the numpy array will be + # reinterpreted to be of np.uint16 data type, which is binded to fluid + # float16 data type via the help of pybind in tensor_py.h. + fp16_tensor.set( + tensor.astype(np.float16).view(np.uint16), self.place) + + # old var will be replaced by the fp16 var in program desc + self.input_map[var.name] = fp16_var_name + self.block.remove_var(var.name) diff --git a/python/paddle/fluid/inferencer.py b/python/paddle/fluid/inferencer.py new file mode 100644 index 0000000000000000000000000000000000000000..3ea50bf196d00152e6579623c981ecbfb57b8e3b --- /dev/null +++ b/python/paddle/fluid/inferencer.py @@ -0,0 +1,31 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = ['Inferencer', ] + + +class Inferencer(object): + def __init__(self, network_func, params, place=None): + # 1. we need to generate a framework.Program by calling + # network_func. Reference: fluid.program_guard in test_word2vec.py + + # 2. move the default_main_program to self.program. + + # 3. run the default_startup program. + self.params = params + self.place = place + + def infer(self, inputs): + # run self.program + pass diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index f7f1ca2598a3e679b24fa8d62c52e4f4de788fe2..08b8a878b6490bc989620085f3f9c06c7032d882 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -336,7 +336,7 @@ def save_inference_model(dirname, if main_program is None: main_program = default_main_program() - copy_program = main_program + copy_program = main_program.clone() if not os.path.isdir(dirname): os.makedirs(dirname) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9a0c328033cdfdae39da050fc482abba17032dd9..7f16bf2a0c430213b2f52dafe8fa948b9e350f96 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -79,6 +79,7 @@ __all__ = [ 'lrn', 'pad', 'label_smooth', + 'roi_pool', ] @@ -3759,3 +3760,53 @@ def label_smooth(label, outputs={"Out": smooth_label}, attrs={"epsilon": float(epsilon)}) return smooth_label + + +def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): + """ + Region of interest pooling (also known as RoI pooling) is to perform + is to perform max pooling on inputs of nonuniform sizes to obtain + fixed-size feature maps (e.g. 7*7). + The operator has three steps: + 1. Dividing each region proposal into equal-sized sections with + the pooled_width and pooled_height + 2. Finding the largest value in each section + 3. Copying these max values to the output buffer + + Args: + input (Variable): The input for ROI pooling. + rois (Variable): ROIs (Regions of Interest) to pool over. It should + be a 2-D one level LoTensor of shape [num_rois, 4]. + The layout is [x1, y1, x2, y2], where (x1, y1) + is the top left coordinates, and (x2, y2) is the + bottom right coordinates. The num_rois is the + total number of ROIs in this batch data. + pooled_height (integer): The pooled output height. Default: 1 + pooled_width (integer): The pooled output width. Default: 1 + spatial_scale (float): Multiplicative spatial scale factor. To + translate ROI coords from their input scale + to the scale used when pooling. Default: 1.0 + + Returns: + pool_out (Variable): The output is a 4-D tensor of the shape + (num_rois, channels, pooled_h, pooled_w). + + Examples: + pool_out = fluid.layers.roi_pool(input=x, rois=rois, 7, 7, 1.0) + """ + helper = LayerHelper('roi_pool', **locals()) + dtype = helper.input_dtype() + pool_out = helper.create_tmp_variable(dtype) + argmaxes = helper.create_tmp_variable(dtype='int32') + helper.append_op( + type="roi_pool", + inputs={"X": input, + "ROIs": rois}, + outputs={"Out": pool_out, + "Argmax": argmaxes}, + attrs={ + "pooled_height": pooled_height, + "pooled_width": pooled_width, + "spatial_scale": spatial_scale + }) + return pool_out diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index da066c34bdeba1f1b76f8d1cafd9244b2f7708fa..4be0dc6a6bfeed3ac254f5c363d3560973c031b4 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -193,10 +193,7 @@ def assign(input, output): helper = LayerHelper('assign', **locals()) if isinstance(input, Variable): helper.append_op( - type='scale', - inputs={'X': [input]}, - outputs={'Out': [output]}, - attrs={'scale': 1.0}) + type='assign', inputs={'X': [input]}, outputs={'Out': [output]}) elif isinstance(input, numpy.ndarray): dtype = convert_np_dtype_to_dtype_(input.dtype) if dtype == VarDesc.VarType.FP32: diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 4adbb2ea99b58c78c5c08c7ac8a556ca1de1615e..f4128dcbe935ea5caf63b8c0b377f9bed36d6705 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -30,7 +30,7 @@ class ParallelExecutor(object): num_threads=None, allow_op_delay=False, share_vars_from=None, - customize_loss_grad=False): + use_default_grad_scale=True): """ ParallelExecutor can run program in parallel. @@ -46,6 +46,11 @@ class ParallelExecutor(object): improve performance in some cases, defalut False. share_vars_from(ParallelExecutor, default None): If provied, it will share variables from the specified ParallelExecutor. + use_default_grad_scale(bool, default True): If set True, a default + scale value equal to `1./device_count` would be multiplied to + gradients of each device and scaled gradients would be + aggregated. Otherwise, a customized scale value should be fed + to the network. Returns: A ParallelExecutor object. @@ -124,7 +129,7 @@ class ParallelExecutor(object): scope, local_scopes, allow_op_delay, - customize_loss_grad) + use_default_grad_scale) self.scope = scope def run(self, fetch_list, feed=None, feed_dict=None): diff --git a/python/paddle/fluid/params.py b/python/paddle/fluid/params.py new file mode 100644 index 0000000000000000000000000000000000000000..a5d257e53a2958acd1b8f6ef29d0f9f531b36678 --- /dev/null +++ b/python/paddle/fluid/params.py @@ -0,0 +1,39 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import core + +__all__ = ['Params', ] + + +class Params(object): + def __init__(self, path=None): + self.scope = core.Scope() + + if path: + self._load(path) + + def _load(self, path): + # reference: load_persistables in io.py + pass + + def save(self, path): + # reference: save_persistables in io.py + pass + + def add_params(self, scope): + # take the keys from the scope, + # if not already exists in self.scope, + # add the key and value into self.scope. + pass diff --git a/python/paddle/fluid/tests/book/image_classification/notest_image_classification_resnet.py b/python/paddle/fluid/tests/book/image_classification/notest_image_classification_resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..5cbfdef91a64ae7c58d060edfb7b9f3bc8160f2b --- /dev/null +++ b/python/paddle/fluid/tests/book/image_classification/notest_image_classification_resnet.py @@ -0,0 +1,145 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +import numpy + + +def resnet_cifar10(input, depth=32): + def conv_bn_layer(input, + ch_out, + filter_size, + stride, + padding, + act='relu', + bias_attr=False): + tmp = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=bias_attr) + return fluid.layers.batch_norm(input=tmp, act=act) + + def shortcut(input, ch_in, ch_out, stride): + if ch_in != ch_out: + return conv_bn_layer(input, ch_out, 1, stride, 0, None) + else: + return input + + def basicblock(input, ch_in, ch_out, stride): + tmp = conv_bn_layer(input, ch_out, 3, stride, 1) + tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) + short = shortcut(input, ch_in, ch_out, stride) + return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') + + def layer_warp(block_func, input, ch_in, ch_out, count, stride): + tmp = block_func(input, ch_in, ch_out, stride) + for i in range(1, count): + tmp = block_func(tmp, ch_out, ch_out, 1) + return tmp + + assert (depth - 2) % 6 == 0 + n = (depth - 2) / 6 + conv1 = conv_bn_layer( + input=input, ch_out=16, filter_size=3, stride=1, padding=1) + res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) + res2 = layer_warp(basicblock, res1, 16, 32, n, 2) + res3 = layer_warp(basicblock, res2, 32, 64, n, 2) + pool = fluid.layers.pool2d( + input=res3, pool_size=8, pool_type='avg', pool_stride=1) + return pool + + +def inference_network(): + classdim = 10 + data_shape = [3, 32, 32] + images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + net = resnet_cifar10(images, 32) + predict = fluid.layers.fc(input=net, size=classdim, act='softmax') + return predict + + +def train_network(): + predict = inference_network() + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=predict, label=label) + return avg_cost, accuracy + + +def train(use_cuda, save_path): + BATCH_SIZE = 128 + EPOCH_NUM = 1 + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10(), buf_size=128 * 10), + batch_size=BATCH_SIZE) + + test_reader = paddle.batch( + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) + + def event_handler(event): + if isinstance(event, fluid.EndIteration): + if (event.batch_id % 10) == 0: + avg_cost, accuracy = trainer.test(reader=test_reader) + + print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format( + event.batch_id + 1, avg_cost, accuracy)) + + if accuracy > 0.01: # Low threshold for speeding up CI + trainer.params.save(save_path) + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + trainer = fluid.Trainer( + train_network, + optimizer=fluid.optimizer.Adam(learning_rate=0.001), + place=place, + event_handler=event_handler) + trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler) + + +def infer(use_cuda, save_path): + params = fluid.Params(save_path) + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + inferencer = fluid.Inferencer(inference_network, params, place=place) + + # The input's dimension of conv should be 4-D or 5-D. + # Use normilized image pixels as input data, which should be in the range + # [0, 1.0]. + tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32") + results = inferencer.infer({'pixel': tensor_img}) + + print("infer results: ", results) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + save_path = "image_classification_resnet.inference.model" + train(use_cuda, save_path) + infer(use_cuda, save_path) + + +if __name__ == '__main__': + for use_cuda in (False, True): + main(use_cuda=use_cuda) diff --git a/python/paddle/fluid/tests/book/image_classification/notest_image_classification_vgg.py b/python/paddle/fluid/tests/book/image_classification/notest_image_classification_vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..8a6a5ff61a913ad6cbc609f8376afcbc621d60e2 --- /dev/null +++ b/python/paddle/fluid/tests/book/image_classification/notest_image_classification_vgg.py @@ -0,0 +1,124 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +import numpy + + +def vgg16_bn_drop(input): + def conv_block(input, num_filter, groups, dropouts): + return fluid.nets.img_conv_group( + input=input, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max') + + conv1 = conv_block(input, 64, 2, [0.3, 0]) + conv2 = conv_block(conv1, 128, 2, [0.4, 0]) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) + + drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) + fc1 = fluid.layers.fc(input=drop, size=4096, act=None) + bn = fluid.layers.batch_norm(input=fc1, act='relu') + drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) + fc2 = fluid.layers.fc(input=drop2, size=4096, act=None) + return fc2 + + +def inference_network(): + classdim = 10 + data_shape = [3, 32, 32] + images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + net = vgg16_bn_drop(images) + predict = fluid.layers.fc(input=net, size=classdim, act='softmax') + return predict + + +def train_network(): + predict = inference_network() + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=predict, label=label) + return avg_cost, accuracy + + +def train(use_cuda, save_path): + BATCH_SIZE = 128 + EPOCH_NUM = 1 + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10(), buf_size=128 * 10), + batch_size=BATCH_SIZE) + + test_reader = paddle.batch( + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) + + def event_handler(event): + if isinstance(event, fluid.EndIteration): + if (event.batch_id % 10) == 0: + avg_cost, accuracy = trainer.test(reader=test_reader) + + print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format( + event.batch_id + 1, avg_cost, accuracy)) + + if accuracy > 0.01: # Low threshold for speeding up CI + trainer.params.save(save_path) + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + trainer = fluid.Trainer( + train_network, + optimizer=fluid.optimizer.Adam(learning_rate=0.001), + place=place, + event_handler=event_handler) + trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler) + + +def infer(use_cuda, save_path): + params = fluid.Params(save_path) + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + inferencer = fluid.Inferencer(inference_network, params, place=place) + + # The input's dimension of conv should be 4-D or 5-D. + # Use normilized image pixels as input data, which should be in the range + # [0, 1.0]. + tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32") + results = inferencer.infer({'pixel': tensor_img}) + + print("infer results: ", results) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + save_path = "image_classification_vgg.inference.model" + train(use_cuda, save_path) + infer(use_cuda, save_path) + + +if __name__ == '__main__': + for use_cuda in (False, True): + main(use_cuda=use_cuda) diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index db96c82ce2d8376b029e9dcc54ffab669f1def9a..09f994c37020c4612223e4168be4cf535157f60b 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -252,6 +252,26 @@ def infer(use_cuda, save_dirname=None): fetch_targets, exe, inference_transpiler_program) + if use_cuda and fluid.core.is_float16_supported(place): + # Use float16_transpiler to speedup + fp16_transpiler_program = inference_transpiler_program.clone() + t.float16_transpile(fp16_transpiler_program, place) + + fp16_results = exe.run(fp16_transpiler_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + + assert len(results[0]) == len(fp16_results[0]) + for i in range(len(results[0])): + np.testing.assert_almost_equal( + results[0][i], fp16_results[0][i], decimal=2) + + print("float16 infer results: ", fp16_results[0]) + + fluid.io.save_inference_model("float16_" + save_dirname, + feed_target_names, fetch_targets, exe, + fp16_transpiler_program) + def main(net_type, use_cuda, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 6afb6fa6e753d3d6478313c840b158c3895b3efb..a0e78a460703778b46191b50c75e92bfbcaec411 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -275,10 +275,7 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference): class TestBatchNormOpTraining(unittest.TestCase): def __assert_close(self, tensor, np_array, msg, atol=1e-4): - if not np.allclose(np.array(tensor), np_array, atol=atol): - import pdb - pdb.set_trace() - self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + np.allclose(np.array(tensor), np_array, atol=atol) def test_forward_backward(self): def test_with_place(place, data_layout, shape): diff --git a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py index e33436b63c0e0b41f5e4a5bc6190517d7c648277..8f62ac20a5c13257a1519128292e2abc4962bf84 100644 --- a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py +++ b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py @@ -14,6 +14,7 @@ import unittest import numpy as np +import numpy.random as random import sys import math from op_test import OpTest @@ -25,14 +26,27 @@ class TestIOUSimilarityOp(OpTest): def setUp(self): self.op_type = "iou_similarity" - self.boxes1 = np.array( - [[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]).astype('float32') - self.boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], - [0.0, 0.0, 20.0, 20.0]]).astype('float32') - self.output = np.array( - [[2.0 / 16.0, 0, 6.0 / 400.0], - [1.0 / 16.0, 0.0, 5.0 / 400.0]]).astype('float32') - + self.boxes1 = random.rand(2, 4).astype('float32') + self.boxes2 = random.rand(3, 4).astype('float32') + self.output = random.rand(2, 3).astype('float32') + for row in range(self.boxes1.shape[0]): + for col in range(self.boxes2.shape[0]): + xmin1, ymin1, xmax1, ymax1 = self.boxes1[row] + xmin2, ymin2, xmax2, ymax2 = self.boxes2[col] + area1 = (ymax1 - ymin1) * (xmax1 - xmin1) + area2 = (ymax2 - ymin2) * (xmax2 - xmin2) + inter_xmax = min(xmax1, xmax2) + inter_ymax = min(ymax1, ymax2) + inter_xmin = max(xmin1, xmin2) + inter_ymin = max(ymin1, ymin2) + inter_height = inter_ymax - inter_ymin + inter_width = inter_xmax - inter_xmin + inter_height = max(inter_height, 0) + inter_width = max(inter_width, 0) + inter_area = inter_width * inter_height + union_area = area1 + area2 - inter_area + sim_score = inter_area / union_area + self.output[row, col] = sim_score self.inputs = {'X': self.boxes1, 'Y': self.boxes2} self.outputs = {'Out': self.output} diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 17d6afdee161426e5da398ffa2ec148a027c905e..c5414abf0fee6b686dccf7c97e9c6d5408ecf62a 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -359,6 +359,16 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(indices) print(str(program)) + def test_roi_pool(self): + program = Program() + with program_guard(program): + x = layers.data(name="x", shape=[256, 30, 30], dtype="float32") + rois = layers.data( + name="rois", shape=[4], dtype="float32", lod_level=1) + output = layers.roi_pool(x, rois, 7, 7, 0.6) + self.assertIsNotNone(output) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py index e556d51b021217063e23190e40bc0e8f9fdc816c..3d754aff3a73e7168e2123483b26e5e3a3585a4e 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py @@ -25,7 +25,7 @@ class TestROIPoolOp(OpTest): self.make_rois() self.calc_roi_pool() - self.inputs = {'X': self.x, 'ROIs': self.rois} + self.inputs = {'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod)} self.attrs = { 'spatial_scale': self.spatial_scale, @@ -36,7 +36,7 @@ class TestROIPoolOp(OpTest): self.outputs = {'Out': self.outs, 'Argmax': self.argmaxes} def init_test_case(self): - self.batch_size = 5 + self.batch_size = 3 self.channels = 3 self.height = 6 self.width = 4 @@ -47,7 +47,6 @@ class TestROIPoolOp(OpTest): self.spatial_scale = 1.0 / 4.0 self.pooled_height = 2 self.pooled_width = 2 - self.rois_num = 2 self.x = np.random.random(self.x_dim).astype('float32') @@ -106,20 +105,24 @@ class TestROIPoolOp(OpTest): def make_rois(self): rois = [] - batch_ids = np.random.randint(0, self.batch_size, size=self.rois_num) - for i in range(self.rois_num): - x1 = np.random.random_integers( - 0, self.width / self.spatial_scale - self.pooled_width) - y1 = np.random.random_integers( - 0, self.height / self.spatial_scale - self.pooled_height) - - x2 = np.random.random_integers(x1 + self.pooled_width, - self.width / self.spatial_scale) - y2 = np.random.random_integers(y1 + self.pooled_height, - self.height / self.spatial_scale) - - roi = [batch_ids[i], x1, y1, x2, y2] - rois.append(roi) + self.rois_lod = [[]] + for bno in range(self.batch_size): + self.rois_lod[0].append(len(rois)) + for i in range(bno + 1): + x1 = np.random.random_integers( + 0, self.width / self.spatial_scale - self.pooled_width) + y1 = np.random.random_integers( + 0, self.height / self.spatial_scale - self.pooled_height) + + x2 = np.random.random_integers(x1 + self.pooled_width, + self.width / self.spatial_scale) + y2 = np.random.random_integers(y1 + self.pooled_height, + self.height / self.spatial_scale) + + roi = [bno, x1, y1, x2, y2] + rois.append(roi) + self.rois_lod[0].append(len(rois)) + self.rois_num = len(rois) self.rois = np.array(rois).astype("int64") def setUp(self): diff --git a/python/paddle/fluid/trainer.py b/python/paddle/fluid/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..aeda67650205a473486b05fdd5b4364753fb18ca --- /dev/null +++ b/python/paddle/fluid/trainer.py @@ -0,0 +1,55 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'Event', + 'Trainer', +] + + +class Event(object): + BEGIN_EPOCH = 0 + END_EPOCH = 1 + BEGIN_STEP = 2 + END_STEP = 3 + + def __init__(self): + self.step = 0 + self.epoch = 0 + self.type = Event.BEGIN_EPOCH + + +class Trainer(object): + def __init__(self, network_func, optimizer, params=None, place=None): + # 1. we need to generate a framework.Program by calling + # network_func. Reference: fluid.program_guard in + # test_word2vec.py + + # 2. move the default_main_program to self.program and run the + # default_startup program on an empty core.Scope() + + # 3. call self.params.add_vars with the initialized scope, it + # will add the new vars of the initialized scope into + # self.params. + self.network_func = network_func + self.optimizer = optimizer + self.params = params + self.place = place + # TODO(helin): support distributed training + + def train(self, reader, num_epochs, event_handler): + pass + + def test(self, reader): + pass diff --git a/python/requirements.txt b/python/requirements.txt index daf3f368b92408408897e33223118fe3647aa6de..ea827e9d5a0dcf8eb2ede1f6eaa88c777a138816 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -8,3 +8,4 @@ scipy>=0.19.0 Pillow nltk>=3.2.2 graphviz +LinkChecker diff --git a/tools/aws_benchmarking/server/cluster_master.py b/tools/aws_benchmarking/server/cluster_master.py index 7952e61159ec31a4be5394b50f30cbc20f9b414e..1333a942bf013a8182585b56e5843803c56945b1 100644 --- a/tools/aws_benchmarking/server/cluster_master.py +++ b/tools/aws_benchmarking/server/cluster_master.py @@ -640,6 +640,7 @@ def start_server(args): elif request_path == "/cleanup": self._set_headers() logging.info("Received request to cleanup cluster") + args.no_clean_up = False cleanup(args.task_name) self.wfile.write("cleanup in progress") diff --git a/tools/manylinux1/build_scripts/install_nccl2.sh b/tools/manylinux1/build_scripts/install_nccl2.sh index 7efc1fe8651a02684c92fa4b924a4f4416503710..282c5c290da14bd3c04346ab01fdb48423c23f88 100644 --- a/tools/manylinux1/build_scripts/install_nccl2.sh +++ b/tools/manylinux1/build_scripts/install_nccl2.sh @@ -1,11 +1,18 @@ #!/bin/bash -DEB="nccl-repo-ubuntu1604-2.1.4-ga-cuda8.0_1-1_amd64.deb" +VERSION=$(nvcc --version | grep release | grep -oEi "release ([0-9]+)\.([0-9])"| sed "s/release //") +if [ "$VERSION" == "9.0" ]; then + DEB="nccl-repo-ubuntu1604-2.1.15-ga-cuda9.0_1-1_amd64.deb" + URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.15-ga-cuda9.0_1-1_amd64.deb" +else + DEB="nccl-repo-ubuntu1604-2.1.15-ga-cuda8.0_1-1_amd64.deb" + URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.15-ga-cuda8.0_1-1_amd64.deb" +fi + DIR="/nccl2" mkdir -p $DIR # we cached the nccl2 deb package in BOS, so we can download it with wget # install nccl2: http://docs.nvidia.com/deeplearning/sdk/nccl-install-guide/index.html#down -wget -O $DIR/$DEB \ - "http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.4-ga-cuda8.0_1-1_amd64.deb?responseContentDisposition=attachment" +wget -O $DIR/$DEB $URL cd $DIR && ar x $DEB && tar xf data.tar.xz DEBS=$(find ./var/ -name "*.deb")