提交 9945265f 编写于 作者: L Luo Tao

Merge branch 'develop' into tr_convert_init

...@@ -12,7 +12,7 @@ services: ...@@ -12,7 +12,7 @@ services:
os: os:
- linux - linux
env: env:
- JOB=build_doc - JOB=doc
- JOB=check_style - JOB=check_style
- JOB=build_android - JOB=build_android
addons: addons:
...@@ -36,21 +36,18 @@ addons: ...@@ -36,21 +36,18 @@ addons:
- ccache - ccache
ssh_known_hosts: 13.229.163.131 ssh_known_hosts: 13.229.163.131
before_install: before_install:
- if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
# protobuf version.
- sudo pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt - sudo pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt
- sudo pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker - sudo pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit
- | - |
function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; } function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; }
script: script:
- | - |
# 43min timeout # 43min timeout
if [[ "$JOB" == "build_android" ]]; then timeout 2580 docker run -it --rm -v "$TRAVIS_BUILD_DIR:/paddle" paddlepaddle/paddle:latest-dev-android; if [[ "$JOB" != "doc" ]]; then timeout 2580 paddle/scripts/paddle_docker_build.sh ${JOB}; else paddle/scripts/paddle_build.sh ${JOB}; fi;
else timeout 2580 paddle/scripts/travis/${JOB}.sh; fi; if [ $? -eq 0 ] || [ $? -eq 142 ]; then true; else exit 1; fi;
RESULT=$?; if [ $RESULT -eq 0 ] || [ $RESULT -eq 142 ]; then true; else exit 1; fi;
- | - |
if [[ "$JOB" != "build_doc" ]]; then exit 0; fi; if [[ "$JOB" != "doc" ]]; then exit 0; fi;
# For document only
if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit 0; fi; if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit 0; fi;
if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then exit 0; fi; if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then exit 0; fi;
export DEPLOY_DOCS_SH=https://raw.githubusercontent.com/PaddlePaddle/PaddlePaddle.org/master/scripts/deploy/deploy_docs.sh export DEPLOY_DOCS_SH=https://raw.githubusercontent.com/PaddlePaddle/PaddlePaddle.org/master/scripts/deploy/deploy_docs.sh
......
...@@ -2,12 +2,14 @@ ...@@ -2,12 +2,14 @@
|---|---| |---|---|
| abhinavarora | Abhinav Arora | | abhinavarora | Abhinav Arora |
| backyes | Yan-Fei Wang | | backyes | Yan-Fei Wang |
| baiyfbupt | Yi-Fan Bai |
| beckett1124 | Bin Qi | | beckett1124 | Bin Qi |
| JiayiFeng | Jia-Yi Feng |
| chengxiaohua1105 | Xiao-Hua Cheng | | chengxiaohua1105 | Xiao-Hua Cheng |
| cxwangyi, yiwangbaidu, wangkuiyi | Yi Wang | | cxwangyi, yiwangbaidu, wangkuiyi | Yi Wang |
| cxysteven | Xing-Yi Cheng | | cxysteven | Xing-Yi Cheng |
| dzhwinter | Zhi-Hong Dong | | dzhwinter | Zhi-Hong Dong |
| dragonwarrior | Long Wang |
| dyning | Yuning Du |
| emailweixu | Wei Xu | | emailweixu | Wei Xu |
| gangliao | Gang Liao | | gangliao | Gang Liao |
| gongweibao | Wei-Bao Gong | | gongweibao | Wei-Bao Gong |
...@@ -16,6 +18,9 @@ ...@@ -16,6 +18,9 @@
| hedaoyuan | Dao-Yuan He | | hedaoyuan | Dao-Yuan He |
| helinwang | He-Lin Wang | | helinwang | He-Lin Wang |
| jacquesqiao | Long-Fei Qiao | | jacquesqiao | Long-Fei Qiao |
| jczaja | Jacek Czaja |
| JiayiFeng | Jia-Yi Feng |
| kbinias | Krzysztof Binias |
| kuke | Yi-Bing Liu | | kuke | Yi-Bing Liu |
| lcy-seso | Ying Cao | | lcy-seso | Ying Cao |
| lipeng-unisound | Peng Li | | lipeng-unisound | Peng Li |
...@@ -24,15 +29,20 @@ ...@@ -24,15 +29,20 @@
| llxxxll | Yong-Feng Liu | | llxxxll | Yong-Feng Liu |
| luotao01 | Tao Luo | | luotao01 | Tao Luo |
| lzhao4ever | Liang Zhao | | lzhao4ever | Liang Zhao |
| mozga-intel | Mateusz Ozga |
| NHZlX | Zhao-Long Xing | | NHZlX | Zhao-Long Xing |
| Noplz | Yuan Gao |
| pakchoi | Chuan-Jiang Song | | pakchoi | Chuan-Jiang Song |
| panyx0718 | Xin Pan |
| pengli09 | Peng Li | | pengli09 | Peng Li |
| pkuyym | Ya-Ming Yang | | pkuyym | Ya-Ming Yang |
| pzelazko-intel | Pawel Zelazko |
| QiJune | Jun Qi | | QiJune | Jun Qi |
| qingqing01 | Qing-Qing Dang | | qingqing01 | Qing-Qing Dang |
| reyoung | Yang Yu | | reyoung | Yang Yu |
| Superjom | Chun-Wei Yan | | Superjom | Chun-Wei Yan |
| tianbingsz | Tian-Bing Xu | | tianbingsz | Tian-Bing Xu |
| tpatejko | Tomasz Patejko |
| typhoonzero | Yi Wu | | typhoonzero | Yi Wu |
| wanghaoshuang | Hao-Shuang Wang | | wanghaoshuang | Hao-Shuang Wang |
| wangyang59 | Yang Wang | | wangyang59 | Yang Wang |
......
# A image for building paddle binaries # A image for building paddle binaries
# Use cuda devel base image for both cpu and gpu environment # Use cuda devel base image for both cpu and gpu environment
# When you modify it, please be aware of cudnn-runtime version # When you modify it, please be aware of cudnn-runtime version
# and libcudnn.so.x in paddle/scripts/docker/build.sh # and libcudnn.so.x in paddle/scripts/docker/build.sh
FROM nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04 FROM nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04
...@@ -24,7 +23,7 @@ ENV HOME /root ...@@ -24,7 +23,7 @@ ENV HOME /root
COPY ./paddle/scripts/docker/root/ /root/ COPY ./paddle/scripts/docker/root/ /root/
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y \ apt-get install -y --allow-downgrades \
git python-pip python-dev openssh-server bison \ git python-pip python-dev openssh-server bison \
libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \ libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \
wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \
...@@ -33,7 +32,7 @@ RUN apt-get update && \ ...@@ -33,7 +32,7 @@ RUN apt-get update && \
automake locales clang-format swig doxygen cmake \ automake locales clang-format swig doxygen cmake \
liblapack-dev liblapacke-dev \ liblapack-dev liblapacke-dev \
clang-3.8 llvm-3.8 libclang-3.8-dev \ clang-3.8 llvm-3.8 libclang-3.8-dev \
net-tools libtool && \ net-tools libtool ccache && \
apt-get clean -y apt-get clean -y
# Install Go and glide # Install Go and glide
......
...@@ -21,7 +21,7 @@ import argparse ...@@ -21,7 +21,7 @@ import argparse
import time import time
import distutils.util import distutils.util
import paddle.v2 as paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
......
...@@ -20,7 +20,7 @@ import numpy as np ...@@ -20,7 +20,7 @@ import numpy as np
import argparse import argparse
import time import time
import paddle.v2 as paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.profiler as profiler import paddle.fluid.profiler as profiler
......
...@@ -23,7 +23,7 @@ import time ...@@ -23,7 +23,7 @@ import time
import cProfile, pstats, StringIO import cProfile, pstats, StringIO
import paddle.v2 as paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.profiler as profiler import paddle.fluid.profiler as profiler
......
...@@ -23,10 +23,10 @@ import random ...@@ -23,10 +23,10 @@ import random
import time import time
import numpy import numpy
import paddle.v2 as paddle import paddle
import paddle.v2.dataset.imdb as imdb import paddle.dataset.imdb as imdb
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.v2 import batch import paddle.batch as batch
import paddle.fluid.profiler as profiler import paddle.fluid.profiler as profiler
......
...@@ -17,7 +17,7 @@ from __future__ import print_function ...@@ -17,7 +17,7 @@ from __future__ import print_function
import sys import sys
import time import time
import numpy as np import numpy as np
import paddle.v2 as paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import argparse import argparse
......
...@@ -172,6 +172,8 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF) ...@@ -172,6 +172,8 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF)
list(APPEND CUDA_NVCC_FLAGS "-std=c++11") list(APPEND CUDA_NVCC_FLAGS "-std=c++11")
list(APPEND CUDA_NVCC_FLAGS "--use_fast_math") list(APPEND CUDA_NVCC_FLAGS "--use_fast_math")
list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC") list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC")
# in cuda9, suppress cuda warning on eigen
list(APPEND CUDA_NVCC_FLAGS "-w")
# Set :expt-relaxed-constexpr to suppress Eigen warnings # Set :expt-relaxed-constexpr to suppress Eigen warnings
list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr") list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr")
......
...@@ -22,7 +22,9 @@ else() ...@@ -22,7 +22,9 @@ else()
extern_eigen3 extern_eigen3
${EXTERNAL_PROJECT_LOG_ARGS} ${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/RLovelett/eigen.git" GIT_REPOSITORY "https://github.com/RLovelett/eigen.git"
GIT_TAG 70661066beef694cadf6c304d0d07e0758825c10 # eigen on cuda9.1 missing header of math_funtions.hpp
# https://stackoverflow.com/questions/43113508/math-functions-hpp-not-found-when-using-cuda-with-eigen
GIT_TAG 917060c364181f33a735dc023818d5a54f60e54c
PREFIX ${EIGEN_SOURCE_DIR} PREFIX ${EIGEN_SOURCE_DIR}
UPDATE_COMMAND "" UPDATE_COMMAND ""
CONFIGURE_COMMAND "" CONFIGURE_COMMAND ""
......
...@@ -38,8 +38,7 @@ ENDIF() ...@@ -38,8 +38,7 @@ ENDIF()
ExternalProject_Add( ExternalProject_Add(
extern_warpctc extern_warpctc
${EXTERNAL_PROJECT_LOG_ARGS} ${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/gangliao/warp-ctc.git" GIT_REPOSITORY "https://github.com/dzhwinter/warp-ctc.git"
GIT_TAG b63a0644654a3e0ed624c85a1767bc8193aead09
PREFIX ${WARPCTC_SOURCES_DIR} PREFIX ${WARPCTC_SOURCES_DIR}
UPDATE_COMMAND "" UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
......
...@@ -56,11 +56,11 @@ DataFeeder ...@@ -56,11 +56,11 @@ DataFeeder
Reader Reader
====== ======
.. automodule:: paddle.v2.reader .. automodule:: paddle.reader
:members: :members:
:noindex: :noindex:
.. automodule:: paddle.v2.reader.creator .. automodule:: paddle.reader.creator
:members: :members:
:noindex: :noindex:
......
...@@ -479,6 +479,13 @@ label_smooth ...@@ -479,6 +479,13 @@ label_smooth
.. autofunction:: paddle.fluid.layers.label_smooth .. autofunction:: paddle.fluid.layers.label_smooth
:noindex: :noindex:
roi_pool
---------
.. autofunction:: paddle.fluid.layers.roi_pool
:noindex:
ops ops
=== ===
...@@ -820,3 +827,5 @@ topk ...@@ -820,3 +827,5 @@ topk
.. autofunction:: paddle.fluid.layers.topk .. autofunction:: paddle.fluid.layers.topk
:noindex: :noindex:
# Averaging Parameter in PaddlePaddle # Averaging Parameter in PaddlePaddle
## Why Averaging ## Why Averaging
In a large scale machine learning setup where the size of the training data is huge, it could take us a large number of iterations over the training data before we can achieve the optimal values of parameters of our model. Looking at the problem setup, it is desirable if we can obtain the optimal values of parameters by going through the data in as few passes as we can. In a large scale machine learning setup where the size of the training data is huge, it could take us a large number of iterations over the training data before we can achieve the optimal values of parameters of our model. Looking at the problem setup, it is desirable to obtain the optimal values of parameters by going through the data in as few passes as possible.
Polyak and Juditsky (1992) showed that the test performance of simple average of parameters obtained by Stochastic Gradient Descent (SGD) is as good as that of parameter values that are obtained by training the model over and over again, over the training dataset. Polyak and Juditsky (1992) showed that the test performance of simple average of parameters obtained by Stochastic Gradient Descent (SGD) is as good as that of parameter values that are obtained by training the model over and over again, over the training dataset.
...@@ -16,16 +16,16 @@ We propose averaging for any optimizer similar to how ASGD performs it, as menti ...@@ -16,16 +16,16 @@ We propose averaging for any optimizer similar to how ASGD performs it, as menti
### How to perform Parameter Averaging in PaddlePaddle ### How to perform Parameter Averaging in PaddlePaddle
Parameter Averaging in PaddlePaddle works in the following way during training : Parameter Averaging in PaddlePaddle works in the following way during training :
1. It will take in an instance of a normal optimizer as an input, e.g. RMSPropOptimizer 1. It will take in an instance of an optimizer as an input, e.g. RMSPropOptimizer
2. The optimizer itself is responsible for updating the parameters. 2. The optimizer itself is responsible for updating the parameters.
3. The ParameterAverageOptimizer maintains a separate copy of the parameters for itself: 3. The ParameterAverageOptimizer maintains a separate copy of the parameters for itself:
1. In concept, the values of this copy are the average of the values of the parameters in the most recent N batches. 1. In theory, the values of this copy are the average of the values of the parameters in the most recent N batches.
2. However, saving all the N instances of the parameters in memory is not feasible. 2. However, saving all N instances of the parameters in memory is not feasible.
3. Therefore, an approximation algorithm is used. 3. Therefore, an approximation algorithm is used.
Hence, overall we have have two copies of the parameters: one for the optimizer itself, and one for the ParameterAverageOptimizer. The former should be used in back propagation, while the latter should be used during testing and should be saved. Hence, overall we have have two copies of the parameters: one for the optimizer itself, and one for the ParameterAverageOptimizer. The former should be used in back propagation, while the latter should be used during testing and should be saved.
During the testing/ saving the model phase, we perform the following steps: During the testing/saving the model phase, we perform the following steps:
1. Perform the delayed operations. 1. Perform the delayed operations.
2. Save current values of the parameters to a temporary variable. 2. Save current values of the parameters to a temporary variable.
3. Replace the values of the parameters with the averaged values. 3. Replace the values of the parameters with the averaged values.
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
## Why float16 ## Why float16
Half precision (float16) is a binary floating-point format that occupies 16 bits in memory. float16 is half the size of traditional 32-bit single precision format (float) and has lower precision and smaller range. Half precision (float16) is a binary floating-point format that occupies 16 bits in memory. float16 is half the size of traditional 32-bit single precision format (float) and has lower precision and smaller range.
When high precision computation is not required, using float16 data type could potentially When high precision computation is not required (which is usually the case at least in the deep learning inference stage), using float16 data type could potentially
- reduce storage space, memory bandwidth, and power usages; - reduce storage space, memory bandwidth, and power usages;
- increase the chance of data fitting into a smaller cache of lower latency; - increase the chance of data fitting into a smaller cache of lower latency;
...@@ -12,7 +12,7 @@ When high precision computation is not required, using float16 data type could p ...@@ -12,7 +12,7 @@ When high precision computation is not required, using float16 data type could p
## Survey of current float16 support ## Survey of current float16 support
A brief survey of float16 support on different compilers, hardwares, and libraries can be found below. Interested readers can refer to [link1](https://github.com/PaddlePaddle/Paddle/issues/4853) and [link2](https://github.com/Xreki/Xreki.github.io/blob/master/multi_data_types_in_dl_framework/ppt/float16_and_quantized_type.md) for more info. A brief survey of float16 support on different compilers, hardwares, and libraries can be found below. Interested readers can refer to [link1](https://github.com/PaddlePaddle/Paddle/issues/4853) and [link2](https://github.com/Xreki/Xreki.github.io/blob/master/multi_data_types_in_dl_framework/ppt/float16_and_quantized_type.md) for more info.
The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernel. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier. The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernels. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier.
### Compiler ### Compiler
- nvcc supports `__half` data type after CUDA 7.5. - nvcc supports `__half` data type after CUDA 7.5.
...@@ -95,11 +95,89 @@ float half_to_float(float16 h); ...@@ -95,11 +95,89 @@ float half_to_float(float16 h);
``` ```
which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM instrinsics will be used when the corresonding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion. which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM instrinsics will be used when the corresonding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion.
## To do ## float16 inference
After float16 class is available, some of the future items are below: In Fluid, a neural network is represented as a protobuf message called [ProgramDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/program.md), whose Python wrapper is a [Program](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#program). The basic structure of a program is some nested [blocks](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#block), where each block consists of some [variable](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#variable) definitions and a sequence of [operators](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#operator). An [executor](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/executor.md) will run a given program desc by executing the sequence of operators in the entrance block of the program one by one.
- Update pybind/tensor_py.h to bind c++ float16 with numpy float16. ### Operator level requirement
Each operator has many kernels for different data types, devices, and library types. The operator will select the appropriate kernel to run based on, among other things, the data type of the input variables. By default, every Fluid operator has a float data type kernel that takes float variables as input and generates float output.
- Modify `GetKernelType()` method in `framework/operator.h` to make it compatible with float16. This means that if we provide float input to the first operator in a program, then each opeartor will use float kernel to compute float output and send it as input to the next operator to trigger the float kernel. Overall, the program will run in float mode and give us a final output of float data type.
- Create a type-casting operator that can convert the data type in tensor between float16 and other types. The same principle applies if we want a program to run in float16 mode. We provide input variable of float16 data type to the first operator, and then one by one, each operator in the program will run the float16 kernel (provided that each operator in this program has float16 kernels registered) until we finally obtain a float16 output variable.
So the preliminary requirement for float16 inference is to add float16 kernel to operators that are needed in a specific kind of program. For example, float16 inference on an image classification neural network like Vgg or Resnet, typically requires the following operators to have float16 kernels: convolution, pooling, multiplication, addition, batch norm, dropout, relu, and softmax. Please refer to [new_op_en](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/dev/new_op_en.md) for details of how to add new kernels to an operator.
### Variable level requirement
Operators including convolution and multiplication (used in fully-connected layers) takes as input not only the variables generated by the preceding operators but also [parameter](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#parameter) variables, which contains the trained weights to apply to the input data. These weights are obtained in the Fluid training process and are by default of float data type.
When these operators are running in float16 mode, the float16 kernel requires those parameter variables to contain weights of Fluid float16 data type. Thus, we need a convenient way to convert the original float weights to float16 weights.
In Fluid, we use tensor to hold actual data for a variable on the c++ end. [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h) is used to bind c++ tensors of certain data type with numpy array of the correponding numpy data type on the Python end. Each common c++ built-in data type has a corresponding numpy data type of the same name. However, since there is no built-in float16 type in c++, we cannot directly bind numpy float16 data type with the Fluid float16 class. Since both Fluid float16 and numpy float16 use uint16 as the internal data storage type, we use c++ built-in type `uint16_t` and the corresponding numpy uint16 data type to bridge the gap via [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h).
The following code demonstrates how to do the tensor conversion.
```Python
# var is the variable of float weights
# tensor is a numpy array of data copied from the tensor data in var
# fp16_var is the variable that will contain float16 weights converted from var
tensor = numpy.array(var.get_tensor())
fp16_tensor = fp16_var.get_tensor()
# After the original tensor data is converted to numpy float16 data type,
# view(numpy.uint16) is used so that the internal memory of the numpy array
# will be reinterpreted to be of uint16 data type, which is binded to
# Fluid float16 class via pybind with the help of uint16_t built-in c++ type
fp16_tensor.set(tensor.astype(numpy.float16).view(numpy.uint16), GPUPlace)
```
### Consistent API requirement
The basic inference in float16 mode requires users to feed input and obtain output both of float16 data type. However, in this way, the inference APIs are not consistent between float16 mode and float mode, and users may find it confusing and diffcult to use float16 inference since they need to do extra steps to provide float16 input data and convert float16 output data back to float. To have consistent API for different inference modes, we need to transpile the program desc in some way so that we can run float16 inference by feeding and fetching variables of float data type.
This problem can be solved by introducing a type-casting operator which takes an input variable of certain data type, cast it to another specified data type, and put the casted data into the output variable. Insert cast operator where needed can make a program internally run in float16 mode.
### float16 transpiler
Put all the above requirements in mind, we designed a float16 inference transpiler that can tranpile a float32 mode inference program desc to a float16 mode one.
Given a float inference program and the corresponding variables of float32 weights in the [scope](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/scope.md),
this transpiler mainly does the following modifications:
1. Insert cast operators at the beginning of the program so that the input float data will be converted to float16 data type before feeding to subsequent operators to invoke the float16 kernel.
2. Insert cast operators at the end of the program so that the output float16 data will be converted back to float data type before users obtain the result.
3. For each parameter variable of float weights, create in the scope a corresponding variable of float16 weights which are converted from the corresponding float weights and add this new float16 variable to the program.
4. Update the operator information in the program so that each relevant operator use the newly created float16 variable instead of its float counterpart.
Below is an example of usage:
```Python
# Get the float inference program
[float_inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# Prepare the float input data
tensor_img = numpy.random.rand(1, 3, 32, 32).astype(numpy.float32)
# Running inference_program in float mode
float_results = exe.run(float_inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
# Use float16 transpiler to speedup
float16_inference_program = float_inference_program.clone()
t = fluid.InferenceTranspiler()
t.float16_transpile(float16_inference_program, GPUPlace)
# Running
float16_results = exe.run(float16_inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
```
As we can see from the example above, users can simply use the `float16_transpile` method provided by the infernece transpiler class on an existing float inference program to run inference in float16 mode.
### Speedup on GPU
Currently, Fluid inference in float16 mode is only supported on Nvidia GPU device. There is no motivation to support float16 inference on non-ARM CPUs because float16 is not natively supported there and float16 calculation will only be slower than its float counterpart.
Nvidia started to support its native float16 data type (which has the same internal memory representation as Fluid float16 class) on CUDA 7.5. Moreover, float16 speedups on common computational intensive tasks including GEMM (general matrix-matrix multiplication) and convolution are supported since cublas 7.5 and cuDNN 5.0.
Recently, the introduction of [tensor core](https://devblogs.nvidia.com/programming-tensor-cores-cuda-9/) in volta architecture GPUs and the support of tensor core calculation in CUDA 9.0 and cuDNN 7.0 make float16 truly superior to float in certain deep learning applications. Please refer to this [benchmark report](https://github.com/kexinzhao/Paddle_benchmark/blob/master/float16_benchmark.md) for more details.
...@@ -56,11 +56,11 @@ DataFeeder ...@@ -56,11 +56,11 @@ DataFeeder
Reader Reader
====== ======
.. automodule:: paddle.v2.reader .. automodule:: paddle.reader
:members: :members:
:noindex: :noindex:
.. automodule:: paddle.v2.reader.creator .. automodule:: paddle.reader.creator
:members: :members:
:noindex: :noindex:
......
Dataset Dataset
======= =======
.. automodule:: paddle.v2.dataset .. automodule:: paddle.dataset
:members: :members:
:noindex: :noindex:
mnist mnist
+++++ +++++
.. automodule:: paddle.v2.dataset.mnist .. automodule:: paddle.dataset.mnist
:members: :members:
:noindex: :noindex:
cifar cifar
+++++ +++++
.. automodule:: paddle.v2.dataset.cifar .. automodule:: paddle.dataset.cifar
:members: :members:
:noindex: :noindex:
conll05 conll05
+++++++ +++++++
.. automodule:: paddle.v2.dataset.conll05 .. automodule:: paddle.dataset.conll05
:members: get_dict,get_embedding,test :members: get_dict,get_embedding,test
:noindex: :noindex:
imdb imdb
++++ ++++
.. automodule:: paddle.v2.dataset.imdb .. automodule:: paddle.dataset.imdb
:members: :members:
:noindex: :noindex:
imikolov imikolov
++++++++ ++++++++
.. automodule:: paddle.v2.dataset.imikolov .. automodule:: paddle.dataset.imikolov
:members: :members:
:noindex: :noindex:
movielens movielens
+++++++++ +++++++++
.. automodule:: paddle.v2.dataset.movielens .. automodule:: paddle.dataset.movielens
:members: :members:
:noindex: :noindex:
.. autoclass:: paddle.v2.dataset.movielens.MovieInfo .. autoclass:: paddle.dataset.movielens.MovieInfo
:noindex: :noindex:
.. autoclass:: paddle.v2.dataset.movielens.UserInfo .. autoclass:: paddle.dataset.movielens.UserInfo
:noindex: :noindex:
sentiment sentiment
+++++++++ +++++++++
.. automodule:: paddle.v2.dataset.sentiment .. automodule:: paddle.dataset.sentiment
:members: :members:
:noindex: :noindex:
uci_housing uci_housing
+++++++++++ +++++++++++
.. automodule:: paddle.v2.dataset.uci_housing .. automodule:: paddle.dataset.uci_housing
:members: :members:
:noindex: :noindex:
wmt14 wmt14
+++++ +++++
.. automodule:: paddle.v2.dataset.wmt14 .. automodule:: paddle.dataset.wmt14
:members: :members:
:noindex: :noindex:
wmt16 wmt16
+++++ +++++
.. automodule:: paddle.v2.dataset.wmt16 .. automodule:: paddle.dataset.wmt16
:members: :members:
:noindex: :noindex:
...@@ -228,6 +228,21 @@ extern __thread cudaStream_t default_stream; ...@@ -228,6 +228,21 @@ extern __thread cudaStream_t default_stream;
<< "CUDA error: " << hl_get_device_error_string((size_t)err); \ << "CUDA error: " << hl_get_device_error_string((size_t)err); \
} }
// __shfl has been deprecated as of CUDA 9.0.
#if CUDA_VERSION < 9000
template <typename T>
__forceinline__ __device__ T
__shfl_sync(unsigned, T val, int src_line, int width) {
return __shfl(val, src_line, width);
}
#define CREATE_SHFL_MASK(mask, predicate) mask = 0u;
#else
#define FULL_WARP_MASK 0xFFFFFFFF
#define CREATE_SHFL_MASK(mask, predicate) \
mask = __ballot_sync(FULL_WARP_MASK, (predicate))
#endif
#endif /* __NVCC__ */ #endif /* __NVCC__ */
#endif /* HL_BASE_H_ */ #endif /* HL_BASE_H_ */
...@@ -341,12 +341,15 @@ void hl_lstm_parallel_forward(real *gateValue, ...@@ -341,12 +341,15 @@ void hl_lstm_parallel_forward(real *gateValue,
} }
__device__ __forceinline__ void transpose_32x32(real a[], const int idx) { __device__ __forceinline__ void transpose_32x32(real a[], const int idx) {
int addr = idx % 32; const int warp_size = 32;
int addr = idx % warp_size;
unsigned mask = 0u;
CREATE_SHFL_MASK(mask, addr < warp_size);
#pragma unroll #pragma unroll
for (int k = 1; k < 32; k++) { for (int k = 1; k < 32; k++) {
// rSrc[k] = __shfl(rSrc[k], (threadIdx.x + k) % 32, 32); // rSrc[k] = __shfl_sync(rSrc[k], (threadIdx.x + k) % 32, 32);
addr = __shfl(addr, (idx + 1) % 32, 32); addr = __shfl_sync(mask, addr, (idx + 1) % 32, 32);
a[k] = __shfl(a[k], addr, 32); a[k] = __shfl_sync(mask, a[k], addr, 32);
} }
#pragma unroll #pragma unroll
...@@ -360,10 +363,11 @@ __device__ __forceinline__ void transpose_32x32(real a[], const int idx) { ...@@ -360,10 +363,11 @@ __device__ __forceinline__ void transpose_32x32(real a[], const int idx) {
} }
addr = (32 - idx) % 32; addr = (32 - idx) % 32;
CREATE_SHFL_MASK(mask, idx % 32 < warp_size);
#pragma unroll #pragma unroll
for (int k = 0; k < 32; k++) { for (int k = 0; k < 32; k++) {
a[k] = __shfl(a[k], addr, 32); a[k] = __shfl_sync(mask, a[k], addr, 32);
addr = __shfl(addr, (idx + 31) % 32, 32); addr = __shfl_sync(mask, addr, (idx + 31) % 32, 32);
} }
} }
......
...@@ -244,13 +244,16 @@ __device__ __forceinline__ void blockReduce(Pair* shTopK, ...@@ -244,13 +244,16 @@ __device__ __forceinline__ void blockReduce(Pair* shTopK,
if (--beamSize == 0) break; if (--beamSize == 0) break;
__syncthreads(); __syncthreads();
unsigned mask = 0u;
// CREATE_SHFL_MASK(mask, tid < len);
if (tid == maxId[0]) { if (tid == maxId[0]) {
if (beam < maxLength) { if (beam < maxLength) {
shTopK[tid] = topK[beam]; shTopK[tid] = topK[beam];
} }
} }
if (maxId[0] / 32 == warp) { if (maxId[0] / 32 == warp) {
if (__shfl(beam, (maxId[0]) % 32, 32) == maxLength) break; if (__shfl_sync(mask, beam, (maxId[0]) % 32, 32) == maxLength) break;
} }
} }
} }
......
...@@ -139,7 +139,7 @@ struct TestBroadcastOpHandle { ...@@ -139,7 +139,7 @@ struct TestBroadcastOpHandle {
PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal."); PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal.");
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor); f::TensorCopySync(out_tensor, cpu_place, &result_tensor);
float* ct = result_tensor.mutable_data<float>(cpu_place); float* ct = result_tensor.mutable_data<float>(cpu_place);
for (int64_t i = 0; i < f::product(kDims); ++i) { for (int64_t i = 0; i < f::product(kDims); ++i) {
...@@ -185,7 +185,7 @@ struct TestBroadcastOpHandle { ...@@ -185,7 +185,7 @@ struct TestBroadcastOpHandle {
} }
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor); f::TensorCopySync(rt, cpu_place, &result_tensor);
float* ct = result_tensor.data<float>(); float* ct = result_tensor.data<float>();
for (int64_t i = 0; i < f::product(kDims); ++i) { for (int64_t i = 0; i < f::product(kDims); ++i) {
......
...@@ -66,8 +66,7 @@ void FetchOpHandle::RunImpl() { ...@@ -66,8 +66,7 @@ void FetchOpHandle::RunImpl() {
auto &t = var->Get<framework::LoDTensor>(); auto &t = var->Get<framework::LoDTensor>();
if (platform::is_gpu_place(t.place())) { if (platform::is_gpu_place(t.place())) {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
TensorCopy(t, cpu, *dev_ctxes_[t.place()], &tensors_[i], true); TensorCopySync(t, cpu, &tensors_[i]);
dev_ctxes_.at(t.place())->Wait();
#endif #endif
} else { } else {
tensors_[i].ShareDataWith(t); tensors_[i].ShareDataWith(t);
......
...@@ -34,7 +34,7 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( ...@@ -34,7 +34,7 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder(
const std::vector<platform::Place> &places, const std::vector<platform::Place> &places,
const std::string &loss_var_name, const std::string &loss_var_name,
const std::unordered_set<std::string> &params, const std::unordered_set<std::string> &params,
const std::vector<Scope *> &local_scopes, bool skip_scale_loss, const std::vector<Scope *> &local_scopes, bool use_default_grad_scale,
platform::NCCLContextMap *nccl_ctxs) platform::NCCLContextMap *nccl_ctxs)
: loss_var_name_(loss_var_name), : loss_var_name_(loss_var_name),
places_(places), places_(places),
...@@ -45,7 +45,7 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( ...@@ -45,7 +45,7 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder(
const std::vector<platform::Place> &places, const std::vector<platform::Place> &places,
const std::string &loss_var_name, const std::string &loss_var_name,
const std::unordered_set<std::string> &params, const std::unordered_set<std::string> &params,
const std::vector<Scope *> &local_scopes, bool skip_scale_loss) const std::vector<Scope *> &local_scopes, bool use_default_grad_scale)
: loss_var_name_(loss_var_name), : loss_var_name_(loss_var_name),
places_(places), places_(places),
local_scopes_(local_scopes) { local_scopes_(local_scopes) {
...@@ -53,28 +53,25 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( ...@@ -53,28 +53,25 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder(
for (auto &p : params) { for (auto &p : params) {
grad_names_.insert(GradVarName(p)); grad_names_.insert(GradVarName(p));
} }
skip_scale_loss_ = skip_scale_loss; use_default_grad_scale_ = use_default_grad_scale;
} }
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result,
const OpDesc &op, const OpDesc &op,
const platform::Place &p, size_t place_id) const {
const size_t &i) const { auto p = places_[place_id];
auto *op_handle = result->ops_.back().get(); auto *op_handle = result->ops_.back().get();
op_handle->SetDeviceContext(p, op_handle->SetDeviceContext(p,
platform::DeviceContextPool::Instance().Get(p)); platform::DeviceContextPool::Instance().Get(p));
auto var_names = op.InputArgumentNames(); for (auto &each_var_name : op.InputArgumentNames()) {
VarHandle *var =
for (auto &each_var_name : var_names) { CreateOrGetLatestVarHandle(result, each_var_name, p, place_id);
VarHandle *var = CreateOrGetLatestVarHandle(result, each_var_name, p, i);
op_handle->AddInput(var); op_handle->AddInput(var);
} }
var_names = op.OutputArgumentNames(); for (auto &each_var_name : op.OutputArgumentNames()) {
CreateOpOutput(result, op_handle, each_var_name, p, place_id);
for (auto &each_var_name : var_names) {
CreateOpOutput(result, op_handle, each_var_name, p, i);
} }
} }
...@@ -84,17 +81,18 @@ bool MultiDevSSAGraphBuilder::IsDistTrainOp(const OpDesc &op, ...@@ -84,17 +81,18 @@ bool MultiDevSSAGraphBuilder::IsDistTrainOp(const OpDesc &op,
return false; return false;
} }
auto checker = [&](const std::vector<std::string> opvars, /**
const std::vector<std::string> sendvars) -> bool { * Check any of opvars contains `.block` and in sendvars
bool is_dist_train_op = false; */
auto checker = [](const std::vector<std::string> &opvars,
const std::vector<std::string> &sendvars) -> bool {
for (auto &var : opvars) { for (auto &var : opvars) {
if (var.find(".block") != std::string::npos && if (var.find(".block") != std::string::npos &&
std::find(sendvars.begin(), sendvars.end(), var) != sendvars.end()) { std::find(sendvars.begin(), sendvars.end(), var) != sendvars.end()) {
is_dist_train_op = true; return true;
break;
} }
} }
return is_dist_train_op; return false;
}; };
if (op.Type() == "split") { if (op.Type() == "split") {
...@@ -117,13 +115,7 @@ std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build( ...@@ -117,13 +115,7 @@ std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build(
places_.size()); places_.size());
// Find "send" op first for split is in front of send. // Find "send" op first for split is in front of send.
OpDesc *send_op = nullptr; OpDesc *send_op = GetSendOpDesc(program);
for (auto *op : program.Block(0).AllOps()) {
if (op->Type() == "send") {
send_op = op;
break;
}
}
bool is_forwarding = true; bool is_forwarding = true;
for (auto *op : program.Block(0).AllOps()) { for (auto *op : program.Block(0).AllOps()) {
...@@ -134,7 +126,8 @@ std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build( ...@@ -134,7 +126,8 @@ std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build(
} else if (IsDistTrainOp(*op, send_op)) { } else if (IsDistTrainOp(*op, send_op)) {
CreateComputationalOps(&result, *op, 1); CreateComputationalOps(&result, *op, 1);
} else if (IsScaleLossOp(*op)) { } else if (IsScaleLossOp(*op)) {
if (!skip_scale_loss_) { // user can customize loss@grad if not use_default_grad_scale_
if (use_default_grad_scale_) {
CreateScaleLossGradOp(&result); CreateScaleLossGradOp(&result);
} }
is_forwarding = false; is_forwarding = false;
...@@ -142,10 +135,7 @@ std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build( ...@@ -142,10 +135,7 @@ std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build(
CreateComputationalOps(&result, *op, places_.size()); CreateComputationalOps(&result, *op, places_.size());
if (!is_forwarding) { if (!is_forwarding) {
// Currently, we assume that once gradient is generated, it can be // Currently, we assume that once gradient is generated, it can be
// broadcast, and each gradient is only broadcast once. But there are no // broadcast, and each gradient is only broadcast once.
// other cases, for example, we need to adjust the gradient according to
// the input when we get the gradient, which is not considered at
// present.
for (auto &og : op->OutputArgumentNames()) { for (auto &og : op->OutputArgumentNames()) {
if (IsParameterGradientOnce(og, &og_has_been_broadcast)) { if (IsParameterGradientOnce(og, &og_has_been_broadcast)) {
InsertNCCLAllReduceOp(&result, og); InsertNCCLAllReduceOp(&result, og);
...@@ -175,6 +165,16 @@ std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build( ...@@ -175,6 +165,16 @@ std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build(
return std::unique_ptr<SSAGraph>(graph); return std::unique_ptr<SSAGraph>(graph);
} }
OpDesc *MultiDevSSAGraphBuilder::GetSendOpDesc(
const ProgramDesc &program) const {
for (auto *op : program.Block(0).AllOps()) {
if (op->Type() == "send") {
return op;
}
}
return nullptr;
}
void MultiDevSSAGraphBuilder::InsertNCCLAllReduceOp( void MultiDevSSAGraphBuilder::InsertNCCLAllReduceOp(
SSAGraph *result, const std::string &og) const { SSAGraph *result, const std::string &og) const {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
...@@ -243,7 +243,7 @@ void MultiDevSSAGraphBuilder::CreateComputationalOps(SSAGraph *result, ...@@ -243,7 +243,7 @@ void MultiDevSSAGraphBuilder::CreateComputationalOps(SSAGraph *result,
auto p = places_[scope_idx]; auto p = places_[scope_idx];
auto s = local_scopes_[scope_idx]; auto s = local_scopes_[scope_idx];
result->ops_.emplace_back(new ComputationOpHandle(op, s, p)); result->ops_.emplace_back(new ComputationOpHandle(op, s, p));
CreateOpHandleIOs(result, op, p, scope_idx); CreateOpHandleIOs(result, op, scope_idx);
} }
} }
...@@ -255,7 +255,7 @@ void MultiDevSSAGraphBuilder::CreateSendOp(SSAGraph *result, ...@@ -255,7 +255,7 @@ void MultiDevSSAGraphBuilder::CreateSendOp(SSAGraph *result,
result->ops_.emplace_back(new SendOpHandle(op, s, p)); result->ops_.emplace_back(new SendOpHandle(op, s, p));
// Create inputs for output on original place and no ssa output // Create inputs for output on original place and no ssa output
// is created for send op. // is created for send op.
CreateOpHandleIOs(result, op, p, 0); CreateOpHandleIOs(result, op, 0);
} }
bool MultiDevSSAGraphBuilder::IsScaleLossOp(const OpDesc &op) const { bool MultiDevSSAGraphBuilder::IsScaleLossOp(const OpDesc &op) const {
......
...@@ -41,14 +41,14 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { ...@@ -41,14 +41,14 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder {
const std::string &loss_var_name, const std::string &loss_var_name,
const std::unordered_set<std::string> &params, const std::unordered_set<std::string> &params,
const std::vector<Scope *> &local_scopes, const std::vector<Scope *> &local_scopes,
bool skip_scale_loss); bool use_default_grad_scale);
#endif #endif
std::unique_ptr<SSAGraph> Build(const ProgramDesc &program) const override; std::unique_ptr<SSAGraph> Build(const ProgramDesc &program) const override;
private: private:
void CreateOpHandleIOs(SSAGraph *result, const OpDesc &op, void CreateOpHandleIOs(SSAGraph *result, const OpDesc &op,
const platform::Place &p, const size_t &i) const; size_t place_id) const;
private: private:
std::string loss_var_name_; std::string loss_var_name_;
...@@ -59,12 +59,15 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { ...@@ -59,12 +59,15 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
platform::NCCLContextMap *nccl_ctxs_; platform::NCCLContextMap *nccl_ctxs_;
#endif #endif
bool skip_scale_loss_; bool use_default_grad_scale_;
bool IsScaleLossOp(const OpDesc &op) const; bool IsScaleLossOp(const OpDesc &op) const;
void CreateSendOp(SSAGraph *result, const OpDesc &op) const; void CreateSendOp(SSAGraph *result, const OpDesc &op) const;
/**
* Is this operator as the end-point operator before/after send operator.
*/
bool IsDistTrainOp(const OpDesc &op, OpDesc *send_op) const; bool IsDistTrainOp(const OpDesc &op, OpDesc *send_op) const;
void CreateComputationalOps(SSAGraph *result, const OpDesc &op, void CreateComputationalOps(SSAGraph *result, const OpDesc &op,
...@@ -77,6 +80,12 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { ...@@ -77,6 +80,12 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder {
std::unordered_set<std::string> *og_has_been_broadcast) const; std::unordered_set<std::string> *og_has_been_broadcast) const;
void InsertNCCLAllReduceOp(SSAGraph *result, const std::string &og) const; void InsertNCCLAllReduceOp(SSAGraph *result, const std::string &og) const;
/**
* Get send op in the global block of program.
* nullptr if not found.
*/
OpDesc *GetSendOpDesc(const ProgramDesc &program) const;
}; };
} // namespace details } // namespace details
} // namespace framework } // namespace framework
......
...@@ -194,7 +194,7 @@ struct TestReduceOpHandle { ...@@ -194,7 +194,7 @@ struct TestReduceOpHandle {
} }
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); f::TensorCopySync(rt, cpu_place, &result_tensor);
float *ct = result_tensor.data<float>(); float *ct = result_tensor.data<float>();
for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) {
...@@ -239,7 +239,7 @@ struct TestReduceOpHandle { ...@@ -239,7 +239,7 @@ struct TestReduceOpHandle {
auto &rt = out_var->Get<f::LoDTensor>(); auto &rt = out_var->Get<f::LoDTensor>();
f::Tensor result_tensor; f::Tensor result_tensor;
f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); f::TensorCopySync(rt, cpu_place, &result_tensor);
float *ct = result_tensor.data<float>(); float *ct = result_tensor.data<float>();
for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) {
......
...@@ -46,6 +46,7 @@ void ScaleLossGradOpHandle::RunImpl() { ...@@ -46,6 +46,7 @@ void ScaleLossGradOpHandle::RunImpl() {
->stream(); ->stream();
memory::Copy(boost::get<platform::CUDAPlace>(place_), tmp, memory::Copy(boost::get<platform::CUDAPlace>(place_), tmp,
platform::CPUPlace(), &coeff_, sizeof(float), stream); platform::CPUPlace(), &coeff_, sizeof(float), stream);
VLOG(1) << place_ << "RUN Scale loss grad op";
}); });
#endif #endif
} }
......
...@@ -25,12 +25,22 @@ namespace paddle { ...@@ -25,12 +25,22 @@ namespace paddle {
namespace framework { namespace framework {
namespace details { namespace details {
// A SSA graph used by parallel executor.
struct SSAGraph { struct SSAGraph {
// all variable in each devices.
// The outside vector is the device vector. Each element of this vector is a
// map from variable name to variables. The variables, who have the same name,
// will have a different version. The offset in the
// `std::vector<std::unique_ptr<VarHandle>>` is the version of varaibles.
std::vector< std::vector<
std::unordered_map<std::string, std::vector<std::unique_ptr<VarHandle>>>> std::unordered_map<std::string, std::vector<std::unique_ptr<VarHandle>>>>
vars_; vars_;
// aux variables to represent dependency. Useful to resolve data hazard. // aux variables to represent dependency. Useful to resolve data hazard.
std::unordered_set<std::unique_ptr<VarHandleBase>> dep_vars_; std::unordered_set<std::unique_ptr<VarHandleBase>> dep_vars_;
// all operators. NOTE that even we use a vector here, the operators is
// unordered.
std::vector<std::unique_ptr<OpHandleBase>> ops_; std::vector<std::unique_ptr<OpHandleBase>> ops_;
}; };
......
...@@ -48,6 +48,8 @@ class SSAGraphBuilder { ...@@ -48,6 +48,8 @@ class SSAGraphBuilder {
const platform::Place &place, const platform::Place &place,
size_t place_offset); size_t place_offset);
// Add an output variable (each_var_name, place, place_offset) to op_handle,
// which belongs to graph
static void CreateOpOutput(SSAGraph *graph, OpHandleBase *op_handle, static void CreateOpOutput(SSAGraph *graph, OpHandleBase *op_handle,
const std::string &each_var_name, const std::string &each_var_name,
const platform::Place &place, size_t place_offset); const platform::Place &place, size_t place_offset);
......
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <stdexcept> #include <stdexcept>
#include <string> #include <string>
#include <vector>
#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/init.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
...@@ -31,6 +30,7 @@ std::once_flag p2p_init_flag; ...@@ -31,6 +30,7 @@ std::once_flag p2p_init_flag;
void InitGflags(std::vector<std::string> argv) { void InitGflags(std::vector<std::string> argv) {
std::call_once(gflags_init_flag, [&]() { std::call_once(gflags_init_flag, [&]() {
argv.insert(argv.begin(), "dummy");
int argc = argv.size(); int argc = argv.size();
char **arr = new char *[argv.size()]; char **arr = new char *[argv.size()];
std::string line; std::string line;
...@@ -44,20 +44,23 @@ void InitGflags(std::vector<std::string> argv) { ...@@ -44,20 +44,23 @@ void InitGflags(std::vector<std::string> argv) {
}); });
} }
void InitP2P(int count) { void InitP2P(std::vector<int> devices) {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
std::call_once(p2p_init_flag, [&]() { std::call_once(p2p_init_flag, [&]() {
int count = devices.size();
for (int i = 0; i < count; ++i) { for (int i = 0; i < count; ++i) {
for (int j = 0; j < count; ++j) { for (int j = 0; j < count; ++j) {
if (i == j) continue; if (devices[i] == devices[j]) continue;
int can_acess = -1; int can_acess = -1;
PADDLE_ENFORCE(cudaDeviceCanAccessPeer(&can_acess, i, j), PADDLE_ENFORCE(
cudaDeviceCanAccessPeer(&can_acess, devices[i], devices[j]),
"Failed to test P2P access."); "Failed to test P2P access.");
if (can_acess != 1) { if (can_acess != 1) {
LOG(WARNING) << "Cannot enable P2P access from " << i << " to " << j; LOG(WARNING) << "Cannot enable P2P access from " << devices[i]
<< " to " << devices[j];
} else { } else {
cudaSetDevice(i); cudaSetDevice(devices[i]);
cudaDeviceEnablePeerAccess(j, 0); cudaDeviceEnablePeerAccess(devices[j], 0);
} }
} }
} }
...@@ -67,11 +70,26 @@ void InitP2P(int count) { ...@@ -67,11 +70,26 @@ void InitP2P(int count) {
void InitDevices(bool init_p2p) { void InitDevices(bool init_p2p) {
/*Init all available devices by default */ /*Init all available devices by default */
std::vector<int> devices;
#ifdef PADDLE_WITH_CUDA
try {
int count = platform::GetCUDADeviceCount();
for (int i = 0; i < count; ++i) {
devices.push_back(i);
}
} catch (const std::exception &exp) {
LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime.";
}
#else
LOG(WARNING)
<< "'CUDA' is not supported, Please re-compile with WITH_GPU option";
#endif
InitDevices(init_p2p, devices);
}
void InitDevices(bool init_p2p, const std::vector<int> devices) {
std::vector<platform::Place> places; std::vector<platform::Place> places;
places.emplace_back(platform::CPUPlace());
int count = 0; int count = 0;
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
try { try {
count = platform::GetCUDADeviceCount(); count = platform::GetCUDADeviceCount();
...@@ -83,12 +101,17 @@ void InitDevices(bool init_p2p) { ...@@ -83,12 +101,17 @@ void InitDevices(bool init_p2p) {
<< "'CUDA' is not supported, Please re-compile with WITH_GPU option"; << "'CUDA' is not supported, Please re-compile with WITH_GPU option";
#endif #endif
for (int i = 0; i < count; ++i) { for (size_t i = 0; i < devices.size(); ++i) {
places.emplace_back(platform::CUDAPlace(i)); if (devices[i] >= count || devices[i] < 0) {
LOG(WARNING) << "Invalid devices id.";
continue;
}
places.emplace_back(platform::CUDAPlace(devices[i]));
} }
if (init_p2p) { if (init_p2p) {
InitP2P(count); InitP2P(devices);
} }
places.emplace_back(platform::CPUPlace());
platform::DeviceContextPool::Init(places); platform::DeviceContextPool::Init(places);
} }
......
...@@ -28,5 +28,7 @@ void InitGLOG(const std::string &prog_name); ...@@ -28,5 +28,7 @@ void InitGLOG(const std::string &prog_name);
void InitDevices(bool init_p2p); void InitDevices(bool init_p2p);
void InitDevices(bool init_p2p, const std::vector<int> devices);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -58,7 +58,7 @@ ParallelExecutor::ParallelExecutor( ...@@ -58,7 +58,7 @@ ParallelExecutor::ParallelExecutor(
const std::unordered_set<std::string> &bcast_vars, const std::unordered_set<std::string> &bcast_vars,
const ProgramDesc &main_program, const std::string &loss_var_name, const ProgramDesc &main_program, const std::string &loss_var_name,
Scope *scope, const std::vector<Scope *> &local_scopes, bool allow_op_delay, Scope *scope, const std::vector<Scope *> &local_scopes, bool allow_op_delay,
bool customize_scale_loss) bool use_default_grad_scale)
: member_(new ParallelExecutorPrivate(places)) { : member_(new ParallelExecutorPrivate(places)) {
member_->global_scope_ = scope; member_->global_scope_ = scope;
...@@ -93,11 +93,11 @@ ParallelExecutor::ParallelExecutor( ...@@ -93,11 +93,11 @@ ParallelExecutor::ParallelExecutor(
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
details::MultiDevSSAGraphBuilder builder( details::MultiDevSSAGraphBuilder builder(
member_->places_, loss_var_name, params, member_->local_scopes_, member_->places_, loss_var_name, params, member_->local_scopes_,
customize_scale_loss, member_->nccl_ctxs_.get()); use_default_grad_scale, member_->nccl_ctxs_.get());
#else #else
details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name, details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name,
params, member_->local_scopes_, params, member_->local_scopes_,
customize_scale_loss); use_default_grad_scale);
#endif #endif
auto graph = builder.Build(main_program); auto graph = builder.Build(main_program);
......
...@@ -40,7 +40,7 @@ class ParallelExecutor { ...@@ -40,7 +40,7 @@ class ParallelExecutor {
const ProgramDesc& main_program, const ProgramDesc& main_program,
const std::string& loss_var_name, Scope* scope, const std::string& loss_var_name, Scope* scope,
const std::vector<Scope*>& local_scopes, const std::vector<Scope*>& local_scopes,
bool allow_op_delay, bool customize_scale_loss); bool allow_op_delay, bool use_default_grad_scale);
~ParallelExecutor(); ~ParallelExecutor();
......
...@@ -20,7 +20,7 @@ namespace paddle { ...@@ -20,7 +20,7 @@ namespace paddle {
namespace framework { namespace framework {
void TensorCopy(const Tensor& src, const platform::Place& dst_place, void TensorCopy(const Tensor& src, const platform::Place& dst_place,
const platform::DeviceContext& ctx, Tensor* dst, bool sync) { const platform::DeviceContext& ctx, Tensor* dst) {
VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to " VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to "
<< dst_place; << dst_place;
src.check_memory_size(); src.check_memory_size();
...@@ -48,9 +48,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, ...@@ -48,9 +48,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
auto ctx_gpu_place = boost::get<platform::CUDAPlace>(ctx_place); auto ctx_gpu_place = boost::get<platform::CUDAPlace>(ctx_place);
PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place);
auto stream = auto stream =
sync ? nullptr reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
: reinterpret_cast<const platform::CUDADeviceContext&>(ctx)
.stream();
memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
} else if (platform::is_cpu_place(src_place) && } else if (platform::is_cpu_place(src_place) &&
platform::is_gpu_place(dst_place)) { platform::is_gpu_place(dst_place)) {
...@@ -61,9 +59,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, ...@@ -61,9 +59,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
auto ctx_gpu_place = boost::get<platform::CUDAPlace>(ctx_place); auto ctx_gpu_place = boost::get<platform::CUDAPlace>(ctx_place);
PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place); PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place);
auto stream = auto stream =
sync ? nullptr reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
: reinterpret_cast<const platform::CUDADeviceContext&>(ctx)
.stream();
memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream); memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream);
} else if (platform::is_gpu_place(src_place) && } else if (platform::is_gpu_place(src_place) &&
platform::is_gpu_place(dst_place)) { platform::is_gpu_place(dst_place)) {
...@@ -72,9 +68,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, ...@@ -72,9 +68,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
auto ctx_place = ctx.GetPlace(); auto ctx_place = ctx.GetPlace();
PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); PADDLE_ENFORCE(platform::is_gpu_place(ctx_place));
auto stream = auto stream =
sync ? nullptr reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream();
: reinterpret_cast<const platform::CUDADeviceContext&>(ctx)
.stream();
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream);
} }
#endif #endif
...@@ -92,6 +86,41 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, ...@@ -92,6 +86,41 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
TensorCopy(src, dst_place, *dev_ctx, dst); TensorCopy(src, dst_place, *dev_ctx, dst);
} }
void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
Tensor* dst) {
VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place()
<< " to " << dst_place;
src.check_memory_size();
dst->Resize(src.dims());
dst->set_layout(src.layout());
auto src_place = src.place();
auto src_ptr = src.data<void>();
auto dst_ptr = dst->mutable_data(dst_place, src.type());
auto size = src.numel() * SizeOfType(src.type());
if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
memory::Copy(boost::get<platform::CPUPlace>(dst_place), dst_ptr,
boost::get<platform::CPUPlace>(src_place), src_ptr, size);
}
#ifdef PADDLE_WITH_CUDA
else if (platform::is_gpu_place(src_place) && // NOLINT
platform::is_cpu_place(dst_place)) {
auto src_gpu_place = boost::get<platform::CUDAPlace>(src_place);
auto dst_cpu_place = boost::get<platform::CPUPlace>(dst_place);
memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
} else if (platform::is_cpu_place(src_place) &&
platform::is_gpu_place(dst_place)) {
auto src_cpu_place = boost::get<platform::CPUPlace>(src_place);
auto dst_gpu_place = boost::get<platform::CUDAPlace>(dst_place);
memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, nullptr);
} else if (platform::is_gpu_place(src_place) &&
platform::is_gpu_place(dst_place)) {
auto src_gpu_place = boost::get<platform::CUDAPlace>(src_place);
auto dst_gpu_place = boost::get<platform::CUDAPlace>(dst_place);
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr);
}
#endif
}
template <typename Predicate, typename DevCtx> template <typename Predicate, typename DevCtx>
struct AnyDTypeVisitor { struct AnyDTypeVisitor {
Predicate predicate_; Predicate predicate_;
......
...@@ -24,10 +24,11 @@ namespace paddle { ...@@ -24,10 +24,11 @@ namespace paddle {
namespace framework { namespace framework {
void TensorCopy(const Tensor& src, const platform::Place& dst_place, void TensorCopy(const Tensor& src, const platform::Place& dst_place,
const platform::DeviceContext& ctx, Tensor* dst, const platform::DeviceContext& ctx, Tensor* dst);
bool sync = false);
void TensorCopy(const Tensor& src, const platform::Place& dst_place, void TensorCopy(const Tensor& src, const platform::Place& dst_place,
Tensor* dst); Tensor* dst);
void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
Tensor* dst);
template <typename T> template <typename T>
void TensorFromVector(const std::vector<T>& src, void TensorFromVector(const std::vector<T>& src,
......
...@@ -46,7 +46,6 @@ class EngineBase { ...@@ -46,7 +46,6 @@ class EngineBase {
virtual void Execute(int batch_size) = 0; virtual void Execute(int batch_size) = 0;
virtual ~EngineBase() {} virtual ~EngineBase() {}
}; // class EngineBase }; // class EngineBase
} // namespace inference } // namespace inference
......
...@@ -16,17 +16,29 @@ limitations under the License. */ ...@@ -16,17 +16,29 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <fstream> #include <fstream>
#include <vector>
#include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/pybind/pybind.h" #include "paddle/fluid/pybind/pybind.h"
DEFINE_string(devices, "", "The devices to be used which is joined by comma.");
DEFINE_bool(init_p2p, false, "Whether to init p2p.");
namespace paddle { namespace paddle {
namespace inference { namespace inference {
// Temporarily add this function for exposing framework::InitDevices() when void Init(const std::vector<std::string> argv) {
// linking the inference shared library. framework::InitGflags(argv);
void Init(bool init_p2p) { framework::InitDevices(init_p2p); } // init devices
std::vector<int> devices;
std::string token;
std::istringstream tokenStream(FLAGS_devices);
while (std::getline(tokenStream, token, ',')) {
devices.push_back(std::stoi(token));
}
framework::InitDevices(FLAGS_init_p2p, devices);
}
void ReadBinaryFile(const std::string& filename, std::string* contents) { void ReadBinaryFile(const std::string& filename, std::string* contents) {
std::ifstream fin(filename, std::ios::in | std::ios::binary); std::ifstream fin(filename, std::ios::in | std::ios::binary);
......
...@@ -25,7 +25,7 @@ limitations under the License. */ ...@@ -25,7 +25,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace inference { namespace inference {
void Init(bool init_p2p); void Init(const std::vector<std::string> argv);
void LoadPersistables(framework::Executor* executor, framework::Scope* scope, void LoadPersistables(framework::Executor* executor, framework::Scope* scope,
const framework::ProgramDesc& main_program, const framework::ProgramDesc& main_program,
......
...@@ -17,6 +17,7 @@ limitations under the License. */ ...@@ -17,6 +17,7 @@ limitations under the License. */
#include <NvInfer.h> #include <NvInfer.h>
#include <cuda.h> #include <cuda.h>
#include <glog/logging.h> #include <glog/logging.h>
#include <string>
#include "paddle/fluid/inference/tensorrt/helper.h" #include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
......
...@@ -16,7 +16,9 @@ limitations under the License. */ ...@@ -16,7 +16,9 @@ limitations under the License. */
#include <NvInfer.h> #include <NvInfer.h>
#include <memory> #include <memory>
#include <string>
#include <unordered_map> #include <unordered_map>
#include <vector>
#include "paddle/fluid/inference/engine.h" #include "paddle/fluid/inference/engine.h"
#include "paddle/fluid/inference/tensorrt/helper.h" #include "paddle/fluid/inference/tensorrt/helper.h"
...@@ -56,9 +58,9 @@ class TensorRTEngine : public EngineBase { ...@@ -56,9 +58,9 @@ class TensorRTEngine : public EngineBase {
virtual ~TensorRTEngine(); virtual ~TensorRTEngine();
// TODO(Superjomn) implement it later when graph segmentation is supported. // TODO(Superjomn) implement it later when graph segmentation is supported.
virtual void Build(const DescType& paddle_model) override; void Build(const DescType& paddle_model) override;
virtual void Execute(int batch_size) override; void Execute(int batch_size) override;
// Initialize the inference network, so that TensorRT layers can add to this // Initialize the inference network, so that TensorRT layers can add to this
// network. // network.
......
...@@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/engine.h"
#include <cuda.h> #include <cuda.h>
#include <cuda_runtime_api.h> #include <cuda_runtime_api.h>
#include <glog/logging.h> #include <glog/logging.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "paddle/fluid/inference/tensorrt/engine.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
namespace paddle { namespace paddle {
...@@ -65,7 +64,8 @@ TEST_F(TensorRTEngineTest, add_layer) { ...@@ -65,7 +64,8 @@ TEST_F(TensorRTEngineTest, add_layer) {
// fill in real data // fill in real data
float x_v = 1234; float x_v = 1234;
engine_->SetInputFromCPU("x", (void*)&x_v, 1 * sizeof(float)); engine_->SetInputFromCPU("x", reinterpret_cast<void*>(&x_v),
1 * sizeof(float));
LOG(INFO) << "to execute"; LOG(INFO) << "to execute";
engine_->Execute(1); engine_->Execute(1);
......
...@@ -62,5 +62,21 @@ TEST(inference, image_classification) { ...@@ -62,5 +62,21 @@ TEST(inference, image_classification) {
LOG(INFO) << output2.dims(); LOG(INFO) << output2.dims();
CheckError<float>(output1, output2); CheckError<float>(output1, output2);
// float16 inference requires cuda GPUs with >= 5.3 compute capability
if (paddle::platform::GetCUDAComputeCapability(0) >= 53) {
paddle::framework::LoDTensor output3;
std::vector<paddle::framework::LoDTensor*> cpu_fetchs3;
cpu_fetchs3.push_back(&output3);
LOG(INFO) << "--- GPU Runs in float16 mode: ---";
std::string fp16_dirname = dirname;
fp16_dirname.replace(fp16_dirname.find("book/"),
std::string("book/").size(), "book/float16_");
TestInference<paddle::platform::CUDAPlace, false, true>(
fp16_dirname, cpu_feeds, cpu_fetchs3, FLAGS_repeat);
CheckError<float>(output2, output3);
}
#endif #endif
} }
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include <thrust/execution_policy.h> #include <thrust/execution_policy.h>
#include <thrust/reduce.h> #include <thrust/reduce.h>
#include "paddle/fluid/operators/accuracy_op.h" #include "paddle/fluid/operators/accuracy_op.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/gpu_info.h"
namespace paddle { namespace paddle {
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "paddle/fluid/operators/adagrad_op.h" #include "paddle/fluid/operators/adagrad_op.h"
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include <math.h> // for sqrt in CPU and CUDA #include <math.h> // for sqrt in CPU and CUDA
#include <Eigen/Dense>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/detail/safe_ref.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/operators/math/selected_rows_functor.h"
...@@ -24,8 +25,14 @@ namespace operators { ...@@ -24,8 +25,14 @@ namespace operators {
namespace scatter = paddle::operators::math::scatter; namespace scatter = paddle::operators::math::scatter;
struct GPUAdam;
struct CPUAdam;
template <typename T, typename Flavour>
struct AdamFunctor;
template <typename T> template <typename T>
struct AdamFunctor { struct AdamFunctor<T, GPUAdam> {
T beta1_; T beta1_;
T beta2_; T beta2_;
T epsilon_; T epsilon_;
...@@ -71,6 +78,7 @@ struct AdamFunctor { ...@@ -71,6 +78,7 @@ struct AdamFunctor {
// Calculation // Calculation
lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);
mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom1 = beta1_ * mom1 + (1 - beta1_) * g;
mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g;
p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); p -= lr * (mom1 / (sqrt(mom2) + epsilon_));
...@@ -82,6 +90,71 @@ struct AdamFunctor { ...@@ -82,6 +90,71 @@ struct AdamFunctor {
} }
}; };
template <typename T>
struct AdamFunctor<T, CPUAdam> {
T beta1_;
T beta2_;
T epsilon_;
const T* beta1_pow_;
const T* beta2_pow_;
const T* moment1_;
T* moment1_out_;
const T* moment2_;
T* moment2_out_;
const T* lr_;
const T* grad_;
const T* param_;
T* param_out_;
AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow,
const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2,
T* mom2_out, const T* lr, const T* grad, const T* param,
T* param_out)
: beta1_(beta1),
beta2_(beta2),
epsilon_(epsilon),
beta1_pow_(beta1_pow),
beta2_pow_(beta2_pow),
moment1_(mom1),
moment1_out_(mom1_out),
moment2_(mom2),
moment2_out_(mom2_out),
lr_(lr),
grad_(grad),
param_(param),
param_out_(param_out) {}
void operator()(size_t numel) const {
Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> g{
grad_, static_cast<Eigen::Index>(numel)};
Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom1{
moment1_, static_cast<Eigen::Index>(numel)};
Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom2{
moment2_, static_cast<Eigen::Index>(numel)};
Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> param{
param_, static_cast<Eigen::Index>(numel)};
Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> param_out{
param_out_, static_cast<Eigen::Index>(numel)};
Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment1_out{
moment1_out_, static_cast<Eigen::Index>(numel)};
Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment2_out{
moment2_out_, static_cast<Eigen::Index>(numel)};
T lr = *lr_;
T beta1_pow = *beta1_pow_;
T beta2_pow = *beta2_pow_;
// Calculation
lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);
moment1_out = beta1_ * mom1 + (1 - beta1_) * g;
moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g;
param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_));
}
};
template <typename T> template <typename T>
struct SparseAdamFunctor { struct SparseAdamFunctor {
T beta1_; T beta1_;
...@@ -134,6 +207,7 @@ struct SparseAdamFunctor { ...@@ -134,6 +207,7 @@ struct SparseAdamFunctor {
T p = param_[rows_[i] * row_numel_ + j]; T p = param_[rows_[i] * row_numel_ + j];
lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow);
mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom1 = beta1_ * mom1 + (1 - beta1_) * g;
mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g;
p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); p -= lr * (mom1 / (sqrt(mom2) + epsilon_));
...@@ -177,7 +251,9 @@ class AdamOpKernel : public framework::OpKernel<T> { ...@@ -177,7 +251,9 @@ class AdamOpKernel : public framework::OpKernel<T> {
if (grad_var->IsType<framework::LoDTensor>()) { if (grad_var->IsType<framework::LoDTensor>()) {
auto& grad = Ref(ctx.Input<LoDTensor>("Grad"), "Must set Grad"); auto& grad = Ref(ctx.Input<LoDTensor>("Grad"), "Must set Grad");
AdamFunctor<T> functor(
if (platform::is_cpu_place(ctx.GetPlace())) {
AdamFunctor<T, CPUAdam> functor(
beta1, beta2, epsilon, beta1_pow.template data<T>(), beta1, beta2, epsilon, beta1_pow.template data<T>(),
beta2_pow.template data<T>(), mom1.template data<T>(), beta2_pow.template data<T>(), mom1.template data<T>(),
mom1_out.template mutable_data<T>(ctx.GetPlace()), mom1_out.template mutable_data<T>(ctx.GetPlace()),
...@@ -186,10 +262,23 @@ class AdamOpKernel : public framework::OpKernel<T> { ...@@ -186,10 +262,23 @@ class AdamOpKernel : public framework::OpKernel<T> {
lr.template data<T>(), grad.template data<T>(), lr.template data<T>(), grad.template data<T>(),
param.template data<T>(), param.template data<T>(),
param_out.template mutable_data<T>(ctx.GetPlace())); param_out.template mutable_data<T>(ctx.GetPlace()));
functor(param.numel());
} else if (platform::is_gpu_place(ctx.GetPlace())) {
AdamFunctor<T, GPUAdam> functor(
beta1, beta2, epsilon, beta1_pow.template data<T>(),
beta2_pow.template data<T>(), mom1.template data<T>(),
mom1_out.template mutable_data<T>(ctx.GetPlace()),
mom2.template data<T>(),
mom2_out.template mutable_data<T>(ctx.GetPlace()),
lr.template data<T>(), grad.template data<T>(),
param.template data<T>(),
param_out.template mutable_data<T>(ctx.GetPlace()));
platform::ForRange<DeviceContext> for_range( platform::ForRange<DeviceContext> for_range(
static_cast<const DeviceContext&>(ctx.device_context()), static_cast<const DeviceContext&>(ctx.device_context()),
param.numel()); param.numel());
for_range(functor); for_range(functor);
}
} else if (grad_var->IsType<framework::SelectedRows>()) { } else if (grad_var->IsType<framework::SelectedRows>()) {
auto& grad = auto& grad =
Ref(ctx.Input<framework::SelectedRows>("Grad"), "Must set Grad"); Ref(ctx.Input<framework::SelectedRows>("Grad"), "Must set Grad");
......
...@@ -195,10 +195,9 @@ std::string ItemToString(const BeamSearch::Item &item) { ...@@ -195,10 +195,9 @@ std::string ItemToString(const BeamSearch::Item &item) {
return stream.str(); return stream.str();
} }
class BeamSearchProtoAndCheckerMaker class BeamSearchOpMaker : public framework::OpProtoAndCheckerMaker {
: public framework::OpProtoAndCheckerMaker {
public: public:
BeamSearchProtoAndCheckerMaker(OpProto *proto, OpAttrChecker *op_checker) BeamSearchOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
// inputs and outputs stored in proto // inputs and outputs stored in proto
AddInput("pre_ids", "ids in previous step"); AddInput("pre_ids", "ids in previous step");
...@@ -222,20 +221,32 @@ class BeamSearchProtoAndCheckerMaker ...@@ -222,20 +221,32 @@ class BeamSearchProtoAndCheckerMaker
} }
}; };
class BeamSearchInferShape : public framework::InferShapeBase { class BeamSearchOp : public framework::OperatorWithKernel {
public: public:
void operator()(framework::InferShapeContext *context) const override { using framework::OperatorWithKernel::OperatorWithKernel;
protected:
void InferShape(framework::InferShapeContext *ctx) const override {
for (const std::string &arg : for (const std::string &arg :
std::vector<std::string>({"pre_ids", "ids", "scores"})) { std::vector<std::string>({"pre_ids", "ids", "scores"})) {
PADDLE_ENFORCE(context->HasInput(arg), PADDLE_ENFORCE(ctx->HasInput(arg), "BeamSearch need input argument '%s'",
"BeamSearch need input argument '%s'", arg); arg);
} }
for (const std::string &arg : for (const std::string &arg :
std::vector<std::string>({"selected_ids", "selected_scores"})) { std::vector<std::string>({"selected_ids", "selected_scores"})) {
PADDLE_ENFORCE(context->HasOutput(arg), PADDLE_ENFORCE(ctx->HasOutput(arg),
"BeamSearch need output argument '%s'", arg); "BeamSearch need output argument '%s'", arg);
} }
} }
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
framework::OpKernelType kt = framework::OpKernelType(
framework::ToDataType(
ctx.Input<framework::LoDTensor>("pre_ids")->type()),
platform::CPUPlace());
return kt;
}
}; };
class BeamSearchInferVarType : public framework::VarTypeInference { class BeamSearchInferVarType : public framework::VarTypeInference {
...@@ -254,8 +265,13 @@ class BeamSearchInferVarType : public framework::VarTypeInference { ...@@ -254,8 +265,13 @@ class BeamSearchInferVarType : public framework::VarTypeInference {
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
REGISTER_OPERATOR(beam_search, paddle::operators::BeamSearchOp, namespace ops = paddle::operators;
paddle::operators::BeamSearchProtoAndCheckerMaker,
paddle::operators::BeamSearchInferShape, REGISTER_OPERATOR(beam_search, ops::BeamSearchOp, ops::BeamSearchOpMaker,
paddle::operators::BeamSearchInferVarType, ops::BeamSearchInferVarType);
paddle::framework::EmptyGradOpMaker); REGISTER_OP_CPU_KERNEL(
beam_search,
ops::BeamSearchOpKernel<paddle::platform::CPUDeviceContext, float>,
ops::BeamSearchOpKernel<paddle::platform::CPUDeviceContext, double>,
ops::BeamSearchOpKernel<paddle::platform::CPUDeviceContext, int>,
ops::BeamSearchOpKernel<paddle::platform::CPUDeviceContext, int64_t>);
...@@ -192,49 +192,29 @@ std::ostream& operator<<(std::ostream& os, const BeamSearch::Item& item); ...@@ -192,49 +192,29 @@ std::ostream& operator<<(std::ostream& os, const BeamSearch::Item& item);
std::string ItemToString(const BeamSearch::Item& item); std::string ItemToString(const BeamSearch::Item& item);
class BeamSearchOp : public framework::OperatorBase { template <typename DeviceContext, typename T>
class BeamSearchOpKernel : public framework::OpKernel<T> {
public: public:
BeamSearchOp(const std::string& type, void Compute(const framework::ExecutionContext& context) const override {
const framework::VariableNameMap& inputs, auto* ids_var = context.Input<framework::LoDTensor>("ids");
const framework::VariableNameMap& outputs, auto* scores_var = context.Input<framework::LoDTensor>("scores");
const framework::AttributeMap& attrs) auto* pre_ids_var = context.Input<framework::LoDTensor>("pre_ids");
: OperatorBase(type, inputs, outputs, attrs) {}
BeamSearchOp(const BeamSearchOp& o)
: framework::OperatorBase(
static_cast<const framework::OperatorBase&>(o)) {
PADDLE_THROW("Not Implemented");
}
private:
void RunImpl(const framework::Scope& scope,
const platform::Place& dev_place) const override {
auto ids_var = scope.FindVar(Input("ids"));
auto scores_var = scope.FindVar(Input("scores"));
auto pre_ids_var = scope.FindVar(Input("pre_ids"));
PADDLE_ENFORCE_NOT_NULL(ids_var); PADDLE_ENFORCE_NOT_NULL(ids_var);
PADDLE_ENFORCE_NOT_NULL(scores_var); PADDLE_ENFORCE_NOT_NULL(scores_var);
PADDLE_ENFORCE_NOT_NULL(pre_ids_var); PADDLE_ENFORCE_NOT_NULL(pre_ids_var);
auto& ids = ids_var->Get<framework::LoDTensor>(); size_t level = context.Attr<int>("level");
auto& scores = scores_var->Get<framework::LoDTensor>(); size_t beam_size = context.Attr<int>("beam_size");
auto& pre_ids = pre_ids_var->Get<framework::LoDTensor>(); int end_id = context.Attr<int>("end_id");
size_t level = Attr<int>("level"); BeamSearch alg(*ids_var, *scores_var, level, beam_size, end_id);
size_t beam_size = Attr<int>("beam_size"); auto selected_ids_var =
int end_id = Attr<int>("end_id"); context.Output<framework::LoDTensor>("selected_ids");
BeamSearch alg(ids, scores, level, beam_size, end_id); auto selected_scores_var =
context.Output<framework::LoDTensor>("selected_scores");
auto selected_ids_var = scope.FindVar(Output("selected_ids"));
auto selected_scores_var = scope.FindVar(Output("selected_scores"));
PADDLE_ENFORCE_NOT_NULL(selected_ids_var); PADDLE_ENFORCE_NOT_NULL(selected_ids_var);
PADDLE_ENFORCE_NOT_NULL(selected_scores_var); PADDLE_ENFORCE_NOT_NULL(selected_scores_var);
auto& selected_ids_tensor = alg(*pre_ids_var, selected_ids_var, selected_scores_var);
*selected_ids_var->GetMutable<framework::LoDTensor>();
auto& selected_scores_tensor =
*selected_scores_var->GetMutable<framework::LoDTensor>();
alg(pre_ids, &selected_ids_tensor, &selected_scores_tensor);
} }
}; };
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/bilinear_interp_op.h" #include "paddle/fluid/operators/bilinear_interp_op.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -10,7 +10,7 @@ See the License for the specific language governing permissions and ...@@ -10,7 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/box_coder_op.h" #include "paddle/fluid/operators/box_coder_op.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -87,7 +87,7 @@ class ConcatGradKernel : public framework::OpKernel<T> { ...@@ -87,7 +87,7 @@ class ConcatGradKernel : public framework::OpKernel<T> {
auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto& dev_ctx = ctx.template device_context<DeviceContext>();
paddle::operators::math::ConcatGradFunctor<DeviceContext, T> paddle::operators::math::ConcatGradFunctor<DeviceContext, T>
concat_grad_functor; concat_grad_functor;
concat_grad_functor(dev_ctx, *in, static_cast<int>(axis), outputs); concat_grad_functor(dev_ctx, *in, static_cast<int>(axis), &outputs);
} }
} }
}; };
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "channel_util.h" #include "paddle/fluid/operators/concurrency/channel_util.h"
#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/framework/var_type.h"
namespace poc = paddle::operators::concurrency; namespace poc = paddle::operators::concurrency;
......
...@@ -20,6 +20,11 @@ limitations under the License. */ ...@@ -20,6 +20,11 @@ limitations under the License. */
#include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/float16.h"
DEFINE_bool(cudnn_algo_use_autotune, true,
"Whether allow using an autotuning algorithm for convolution "
"operator. The autotuning algorithm may be non-deterministic. If "
"false, the algorithm is deterministic.");
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -267,10 +272,12 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> { ...@@ -267,10 +272,12 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle(); auto handle = dev_ctx.cudnn_handle();
if (input_grad) { if (input_grad) {
if (FLAGS_cudnn_algo_use_autotune) {
PADDLE_ENFORCE( PADDLE_ENFORCE(
platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm(
handle, cudnn_filter_desc, handle, cudnn_filter_desc,
// dyDesc: Handle to the previously initialized input differential // dyDesc: Handle to the previously initialized input
// differential
// tensor descriptor. // tensor descriptor.
cudnn_output_grad_desc, cudnn_conv_desc, cudnn_output_grad_desc, cudnn_conv_desc,
// dxDesc: Handle to the previously initialized output tensor // dxDesc: Handle to the previously initialized output tensor
...@@ -278,6 +285,10 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> { ...@@ -278,6 +285,10 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
cudnn_input_desc, cudnn_input_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT, CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit, &data_algo)); workspace_size_limit, &data_algo));
} else {
data_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
}
PADDLE_ENFORCE( PADDLE_ENFORCE(
platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize( platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize(
handle, cudnn_filter_desc, cudnn_output_grad_desc, handle, cudnn_filter_desc, cudnn_output_grad_desc,
...@@ -286,12 +297,16 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> { ...@@ -286,12 +297,16 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
} }
if (filter_grad) { if (filter_grad) {
if (FLAGS_cudnn_algo_use_autotune) {
PADDLE_ENFORCE( PADDLE_ENFORCE(
platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm(
handle, cudnn_input_desc, cudnn_output_grad_desc, cudnn_conv_desc, handle, cudnn_input_desc, cudnn_output_grad_desc,
cudnn_filter_desc, cudnn_conv_desc, cudnn_filter_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT, CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit, &filter_algo)); workspace_size_limit, &filter_algo));
} else {
filter_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
}
PADDLE_ENFORCE( PADDLE_ENFORCE(
platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize( platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize(
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/conv_shift_op.h" #include "paddle/fluid/operators/conv_shift_op.h"
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -30,9 +30,13 @@ enum CallStatus { PROCESS = 0, FINISH }; ...@@ -30,9 +30,13 @@ enum CallStatus { PROCESS = 0, FINISH };
class RequestBase { class RequestBase {
public: public:
explicit RequestBase(GrpcService::AsyncService* service, explicit RequestBase(GrpcService::AsyncService* service,
::grpc::ServerCompletionQueue* cq, ::grpc::ServerCompletionQueue* cq, bool sync_mode,
const platform::DeviceContext* dev_ctx) const platform::DeviceContext* dev_ctx)
: service_(service), cq_(cq), status_(PROCESS), dev_ctx_(dev_ctx) { : service_(service),
cq_(cq),
sync_mode_(sync_mode),
status_(PROCESS),
dev_ctx_(dev_ctx) {
PADDLE_ENFORCE(cq_); PADDLE_ENFORCE(cq_);
} }
virtual ~RequestBase() {} virtual ~RequestBase() {}
...@@ -49,6 +53,7 @@ class RequestBase { ...@@ -49,6 +53,7 @@ class RequestBase {
::grpc::ServerContext ctx_; ::grpc::ServerContext ctx_;
GrpcService::AsyncService* service_; GrpcService::AsyncService* service_;
::grpc::ServerCompletionQueue* cq_; ::grpc::ServerCompletionQueue* cq_;
const bool sync_mode_;
CallStatus status_; CallStatus status_;
const platform::DeviceContext* dev_ctx_; const platform::DeviceContext* dev_ctx_;
}; };
...@@ -56,11 +61,17 @@ class RequestBase { ...@@ -56,11 +61,17 @@ class RequestBase {
class RequestSend final : public RequestBase { class RequestSend final : public RequestBase {
public: public:
explicit RequestSend(GrpcService::AsyncService* service, explicit RequestSend(GrpcService::AsyncService* service,
::grpc::ServerCompletionQueue* cq, ::grpc::ServerCompletionQueue* cq, bool sync_mode,
framework::Scope* scope, ReceivedQueue* queue, framework::Scope* scope, ReceivedQueue* queue,
const platform::DeviceContext* dev_ctx) const platform::DeviceContext* dev_ctx)
: RequestBase(service, cq, dev_ctx), queue_(queue), responder_(&ctx_) { : RequestBase(service, cq, sync_mode, dev_ctx),
request_.reset(new VariableResponse(scope, dev_ctx_)); queue_(queue),
responder_(&ctx_) {
if (sync_mode_) {
request_.reset(new VariableResponse(scope, dev_ctx_, false));
} else {
request_.reset(new VariableResponse(scope, dev_ctx_, true));
}
int method_id = static_cast<int>(detail::GrpcMethod::kSendVariable); int method_id = static_cast<int>(detail::GrpcMethod::kSendVariable);
service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_,
cq_, cq_, this); cq_, cq_, this);
...@@ -87,11 +98,11 @@ class RequestSend final : public RequestBase { ...@@ -87,11 +98,11 @@ class RequestSend final : public RequestBase {
class RequestGet final : public RequestBase { class RequestGet final : public RequestBase {
public: public:
explicit RequestGet(GrpcService::AsyncService* service, explicit RequestGet(GrpcService::AsyncService* service,
::grpc::ServerCompletionQueue* cq, ::grpc::ServerCompletionQueue* cq, bool sync_mode,
framework::Scope* scope, framework::Scope* scope,
const platform::DeviceContext* dev_ctx, const platform::DeviceContext* dev_ctx,
framework::BlockingQueue<MessageWithName>* queue) framework::BlockingQueue<MessageWithName>* queue)
: RequestBase(service, cq, dev_ctx), : RequestBase(service, cq, sync_mode, dev_ctx),
responder_(&ctx_), responder_(&ctx_),
scope_(scope), scope_(scope),
queue_(queue) { queue_(queue) {
...@@ -134,19 +145,23 @@ class RequestGet final : public RequestBase { ...@@ -134,19 +145,23 @@ class RequestGet final : public RequestBase {
class RequestPrefetch final : public RequestBase { class RequestPrefetch final : public RequestBase {
public: public:
explicit RequestPrefetch(GrpcService::AsyncService* service, explicit RequestPrefetch(GrpcService::AsyncService* service,
::grpc::ServerCompletionQueue* cq, ::grpc::ServerCompletionQueue* cq, bool sync_mode,
framework::Scope* scope, framework::Scope* scope,
const platform::DeviceContext* dev_ctx, const platform::DeviceContext* dev_ctx,
framework::Executor* executor, framework::Executor* executor,
framework::ProgramDesc* program, framework::ProgramDesc* program,
framework::ExecutorPrepareContext* prefetch_ctx) framework::ExecutorPrepareContext* prefetch_ctx)
: RequestBase(service, cq, dev_ctx), : RequestBase(service, cq, sync_mode, dev_ctx),
responder_(&ctx_), responder_(&ctx_),
scope_(scope), scope_(scope),
executor_(executor), executor_(executor),
program_(program), program_(program),
prefetch_ctx_(prefetch_ctx) { prefetch_ctx_(prefetch_ctx) {
request_.reset(new VariableResponse(scope, dev_ctx_)); if (sync_mode_) {
request_.reset(new VariableResponse(scope, dev_ctx_, false));
} else {
request_.reset(new VariableResponse(scope, dev_ctx_, true));
}
int method_id = static_cast<int>(detail::GrpcMethod::kPrefetchVariable); int method_id = static_cast<int>(detail::GrpcMethod::kPrefetchVariable);
service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_,
cq_, cq_, this); cq_, cq_, this);
...@@ -181,7 +196,6 @@ class RequestPrefetch final : public RequestBase { ...@@ -181,7 +196,6 @@ class RequestPrefetch final : public RequestBase {
framework::Executor* executor_; framework::Executor* executor_;
framework::ProgramDesc* program_; framework::ProgramDesc* program_;
framework::ExecutorPrepareContext* prefetch_ctx_; framework::ExecutorPrepareContext* prefetch_ctx_;
int blkid_;
}; };
void AsyncGRPCServer::WaitClientGet(int count) { void AsyncGRPCServer::WaitClientGet(int count) {
...@@ -254,8 +268,8 @@ void AsyncGRPCServer::TryToRegisterNewSendOne() { ...@@ -254,8 +268,8 @@ void AsyncGRPCServer::TryToRegisterNewSendOne() {
VLOG(3) << "shutdown, do not TryToRegisterNewSendOne"; VLOG(3) << "shutdown, do not TryToRegisterNewSendOne";
return; return;
} }
RequestSend* send = new RequestSend(&service_, cq_send_.get(), scope_, RequestSend* send = new RequestSend(&service_, cq_send_.get(), sync_mode_,
&var_recv_queue_, dev_ctx_); scope_, &var_recv_queue_, dev_ctx_);
VLOG(4) << "Create RequestSend status:" << send->Status(); VLOG(4) << "Create RequestSend status:" << send->Status();
} }
...@@ -265,8 +279,8 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() { ...@@ -265,8 +279,8 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() {
VLOG(3) << "shutdown, do not TryToRegisterNewGetOne"; VLOG(3) << "shutdown, do not TryToRegisterNewGetOne";
return; return;
} }
RequestGet* get = new RequestGet(&service_, cq_get_.get(), scope_, dev_ctx_, RequestGet* get = new RequestGet(&service_, cq_get_.get(), sync_mode_, scope_,
&var_get_queue_); dev_ctx_, &var_get_queue_);
VLOG(4) << "Create RequestGet status:" << get->Status(); VLOG(4) << "Create RequestGet status:" << get->Status();
} }
...@@ -277,8 +291,8 @@ void AsyncGRPCServer::TryToRegisterNewPrefetchOne() { ...@@ -277,8 +291,8 @@ void AsyncGRPCServer::TryToRegisterNewPrefetchOne() {
return; return;
} }
RequestPrefetch* prefetch = RequestPrefetch* prefetch =
new RequestPrefetch(&service_, cq_prefetch_.get(), scope_, dev_ctx_, new RequestPrefetch(&service_, cq_prefetch_.get(), sync_mode_, scope_,
executor_, program_, prefetch_ctx_); dev_ctx_, executor_, program_, prefetch_ctx_);
VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status();
} }
...@@ -301,9 +315,11 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, ...@@ -301,9 +315,11 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq,
VLOG(3) << "HandleRequest for " << cq_name << " while after Next"; VLOG(3) << "HandleRequest for " << cq_name << " while after Next";
PADDLE_ENFORCE(tag); PADDLE_ENFORCE(tag);
if (sync_mode_) {
// FIXME(typhoonzero): de-couple the barriers with recv_op // FIXME(typhoonzero): de-couple the barriers with recv_op
if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1);
if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0);
}
RequestBase* base = reinterpret_cast<RequestBase*>(tag); RequestBase* base = reinterpret_cast<RequestBase*>(tag);
// reference: // reference:
...@@ -320,13 +336,13 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, ...@@ -320,13 +336,13 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq,
switch (base->Status()) { switch (base->Status()) {
case PROCESS: { case PROCESS: {
VLOG(4) << cq_name << " status:" << base->Status(); VLOG(4) << cq_name << " PROCESS status:" << base->Status();
TryToRegisterNewOne(); TryToRegisterNewOne();
base->Process(); base->Process();
break; break;
} }
case FINISH: { case FINISH: {
VLOG(4) << cq_name << " status:" << base->Status(); VLOG(4) << cq_name << " FINISH status:" << base->Status();
delete base; delete base;
break; break;
} }
......
...@@ -44,7 +44,8 @@ class RequestBase; ...@@ -44,7 +44,8 @@ class RequestBase;
class AsyncGRPCServer final { class AsyncGRPCServer final {
public: public:
explicit AsyncGRPCServer(const std::string &address) : address_(address) {} explicit AsyncGRPCServer(const std::string &address, bool sync_mode)
: address_(address), sync_mode_(sync_mode) {}
void RunSyncUpdate(); void RunSyncUpdate();
...@@ -95,6 +96,7 @@ class AsyncGRPCServer final { ...@@ -95,6 +96,7 @@ class AsyncGRPCServer final {
std::unique_ptr<::grpc::Server> server_; std::unique_ptr<::grpc::Server> server_;
std::string address_; std::string address_;
const bool sync_mode_;
framework::Scope *scope_; framework::Scope *scope_;
const platform::DeviceContext *dev_ctx_; const platform::DeviceContext *dev_ctx_;
......
...@@ -89,7 +89,7 @@ void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place, ...@@ -89,7 +89,7 @@ void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place,
} }
void StartServer(const std::string& endpoint) { void StartServer(const std::string& endpoint) {
rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, true));
framework::ProgramDesc program; framework::ProgramDesc program;
framework::Scope scope; framework::Scope scope;
platform::CPUPlace place; platform::CPUPlace place;
......
...@@ -46,7 +46,9 @@ class VariableResponse { ...@@ -46,7 +46,9 @@ class VariableResponse {
} }
virtual ~VariableResponse() { virtual ~VariableResponse() {
if (create_scope_) scope_->DeleteScope(local_scope_); if (create_scope_) {
scope_->DeleteScope(local_scope_);
}
} }
// return: // return:
...@@ -63,6 +65,8 @@ class VariableResponse { ...@@ -63,6 +65,8 @@ class VariableResponse {
const framework::Scope& GetLocalScope() const { return *local_scope_; } const framework::Scope& GetLocalScope() const { return *local_scope_; }
framework::Scope* GetMutableLocalScope() const { return local_scope_; }
inline std::string Varname() { return meta_.varname(); } inline std::string Varname() { return meta_.varname(); }
inline std::string OutVarname() { return meta_.out_varname(); } inline std::string OutVarname() { return meta_.out_varname(); }
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/edit_distance_op.h" #include "paddle/fluid/operators/edit_distance_op.h"
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/gpu_info.h"
namespace paddle { namespace paddle {
......
...@@ -22,6 +22,7 @@ limitations under the License. */ ...@@ -22,6 +22,7 @@ limitations under the License. */
#ifdef __NVCC__ #ifdef __NVCC__
#include <cuda.h> #include <cuda.h>
#include <thrust/iterator/iterator_adaptor.h> #include <thrust/iterator/iterator_adaptor.h>
#include "paddle/fluid/platform/cuda_primitives.h"
constexpr int ELEMWISE_MAX_BLOCK_DIM = 1024; constexpr int ELEMWISE_MAX_BLOCK_DIM = 1024;
#endif #endif
...@@ -333,24 +334,12 @@ static void ElemwiseGradBroadcast1CPU(const T* x, const T* y, const T* out, ...@@ -333,24 +334,12 @@ static void ElemwiseGradBroadcast1CPU(const T* x, const T* y, const T* out,
} }
} }
} }
#ifdef __NVCC__
// __shfl_down has been deprecated as of CUDA 9.0. #ifdef __NVCC__
#if CUDA_VERSION < 9000
template <typename T>
__forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) {
return __shfl_down(val, delta);
}
#define CREATE_SHFL_MASK(mask, predicate) mask = 0u;
#else
#define FULL_WARP_MASK 0xFFFFFFFF
#define CREATE_SHFL_MASK(mask, predicate) \
mask = __ballot_sync(FULL_WARP_MASK, (predicate))
#endif
template <typename T> template <typename T>
__device__ T reduceSum(T val, int tid, int len) { __device__ T reduceSum(T val, int tid, int len) {
// TODO(zcd): The warp size should be taken from the // NOTE(zcd): The warp size should be taken from the
// parameters of the GPU but not specified as 32 simply. // parameters of the GPU but not specified as 32 simply.
// To make the reduceSum more efficiently, // To make the reduceSum more efficiently,
// I use Warp-Level Parallelism and assume the Warp size // I use Warp-Level Parallelism and assume the Warp size
...@@ -362,7 +351,7 @@ __device__ T reduceSum(T val, int tid, int len) { ...@@ -362,7 +351,7 @@ __device__ T reduceSum(T val, int tid, int len) {
CREATE_SHFL_MASK(mask, tid < len); CREATE_SHFL_MASK(mask, tid < len);
for (int offset = warpSize / 2; offset > 0; offset /= 2) for (int offset = warpSize / 2; offset > 0; offset /= 2)
val += __shfl_down_sync(mask, val, offset); val += platform::__shfl_down_sync(mask, val, offset);
if (tid < warpSize) shm[tid] = 0; if (tid < warpSize) shm[tid] = 0;
...@@ -378,7 +367,7 @@ __device__ T reduceSum(T val, int tid, int len) { ...@@ -378,7 +367,7 @@ __device__ T reduceSum(T val, int tid, int len) {
if (tid < warpSize) { if (tid < warpSize) {
val = shm[tid]; val = shm[tid];
for (int offset = warpSize / 2; offset > 0; offset /= 2) for (int offset = warpSize / 2; offset > 0; offset /= 2)
val += __shfl_down_sync(mask, val, offset); val += platform::__shfl_down_sync(mask, val, offset);
} }
return val; return val;
......
...@@ -57,10 +57,7 @@ class FetchOp : public framework::OperatorBase { ...@@ -57,10 +57,7 @@ class FetchOp : public framework::OperatorBase {
// FIXME(yuyang18): Should we assume the fetch operator always generate // FIXME(yuyang18): Should we assume the fetch operator always generate
// CPU outputs? // CPU outputs?
auto &dev_ctx = *pool.Get(src_item.place()); TensorCopySync(src_item, platform::CPUPlace(), &dst_item);
TensorCopy(src_item, platform::CPUPlace(), dev_ctx, &dst_item);
dev_ctx.Wait();
dst_item.set_lod(src_item.lod()); dst_item.set_lod(src_item.lod());
VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name; VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name;
......
...@@ -34,7 +34,7 @@ inline void ReorderInitState(const DeviceContext& ctx, ...@@ -34,7 +34,7 @@ inline void ReorderInitState(const DeviceContext& ctx,
framework::Tensor* dst, bool indexed_src) { framework::Tensor* dst, bool indexed_src) {
math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle; math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle;
dst->mutable_data<T>(src.dims(), ctx.GetPlace()); dst->mutable_data<T>(src.dims(), ctx.GetPlace());
row_shuffle(ctx, src, index_lod, *dst, indexed_src); row_shuffle(ctx, src, index_lod, dst, indexed_src);
} }
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
...@@ -61,7 +61,7 @@ class GRUKernel : public framework::OpKernel<T> { ...@@ -61,7 +61,7 @@ class GRUKernel : public framework::OpKernel<T> {
bool is_reverse = context.Attr<bool>("is_reverse"); bool is_reverse = context.Attr<bool>("is_reverse");
math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch; math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
auto& dev_ctx = context.template device_context<DeviceContext>(); auto& dev_ctx = context.template device_context<DeviceContext>();
to_batch(dev_ctx, *input, *batch_gate, true, is_reverse); to_batch(dev_ctx, *input, batch_gate, true, is_reverse);
if (bias) { if (bias) {
math::RowwiseAdd<DeviceContext, T> add_bias; math::RowwiseAdd<DeviceContext, T> add_bias;
...@@ -113,7 +113,7 @@ class GRUKernel : public framework::OpKernel<T> { ...@@ -113,7 +113,7 @@ class GRUKernel : public framework::OpKernel<T> {
math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq; math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
batch_hidden->set_lod(batch_gate->lod()); batch_hidden->set_lod(batch_gate->lod());
to_seq(dev_ctx, *batch_hidden, *hidden); to_seq(dev_ctx, *batch_hidden, hidden);
} }
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
...@@ -174,7 +174,7 @@ class GRUGradKernel : public framework::OpKernel<T> { ...@@ -174,7 +174,7 @@ class GRUGradKernel : public framework::OpKernel<T> {
bool is_reverse = context.Attr<bool>("is_reverse"); bool is_reverse = context.Attr<bool>("is_reverse");
batch_hidden_grad.set_lod(batch_hidden->lod()); batch_hidden_grad.set_lod(batch_hidden->lod());
to_batch(dev_ctx, *hidden_grad, batch_hidden_grad, false, is_reverse); to_batch(dev_ctx, *hidden_grad, &batch_hidden_grad, false, is_reverse);
math::GRUMetaValue<T> gru_value; math::GRUMetaValue<T> gru_value;
gru_value.gate_weight = const_cast<T*>(weight_data); gru_value.gate_weight = const_cast<T*>(weight_data);
...@@ -236,7 +236,7 @@ class GRUGradKernel : public framework::OpKernel<T> { ...@@ -236,7 +236,7 @@ class GRUGradKernel : public framework::OpKernel<T> {
input_grad->mutable_data<T>(context.GetPlace()); input_grad->mutable_data<T>(context.GetPlace());
math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq; math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
batch_gate_grad.set_lod(batch_gate->lod()); batch_gate_grad.set_lod(batch_gate->lod());
to_seq(dev_ctx, batch_gate_grad, *input_grad); to_seq(dev_ctx, batch_gate_grad, input_grad);
} }
if (bias_grad) { if (bias_grad) {
bias_grad->mutable_data<T>(context.GetPlace()); bias_grad->mutable_data<T>(context.GetPlace());
......
...@@ -41,22 +41,24 @@ struct IOUSimilarityFunctor { ...@@ -41,22 +41,24 @@ struct IOUSimilarityFunctor {
IOUSimilarityFunctor(const T* x, const T* y, T* z, int cols) IOUSimilarityFunctor(const T* x, const T* y, T* z, int cols)
: x_(x), y_(y), z_(z), cols_(static_cast<size_t>(cols)) {} : x_(x), y_(y), z_(z), cols_(static_cast<size_t>(cols)) {}
inline HOSTDEVICE void operator()(size_t row_id) const { inline HOSTDEVICE void operator()(size_t tid) const {
size_t row_id = tid / cols_;
size_t col_id = tid % cols_;
T x_min1 = x_[row_id * 4]; T x_min1 = x_[row_id * 4];
T y_min1 = x_[row_id * 4 + 1]; T y_min1 = x_[row_id * 4 + 1];
T x_max1 = x_[row_id * 4 + 2]; T x_max1 = x_[row_id * 4 + 2];
T y_max1 = x_[row_id * 4 + 3]; T y_max1 = x_[row_id * 4 + 3];
for (size_t i = 0; i < cols_; ++i) {
T x_min2 = y_[i * 4]; T x_min2 = y_[col_id * 4];
T y_min2 = y_[i * 4 + 1]; T y_min2 = y_[col_id * 4 + 1];
T x_max2 = y_[i * 4 + 2]; T x_max2 = y_[col_id * 4 + 2];
T y_max2 = y_[i * 4 + 3]; T y_max2 = y_[col_id * 4 + 3];
T sim = IOUSimilarity(x_min1, y_min1, x_max1, y_max1, x_min2, y_min2, T sim = IOUSimilarity(x_min1, y_min1, x_max1, y_max1, x_min2, y_min2,
x_max2, y_max2); x_max2, y_max2);
z_[row_id * cols_ + i] = sim; z_[row_id * cols_ + col_id] = sim;
}
} }
const T* x_; const T* x_;
const T* y_; const T* y_;
...@@ -81,7 +83,7 @@ class IOUSimilarityKernel : public framework::OpKernel<T> { ...@@ -81,7 +83,7 @@ class IOUSimilarityKernel : public framework::OpKernel<T> {
out->mutable_data<T>(ctx.GetPlace()), y_n); out->mutable_data<T>(ctx.GetPlace()), y_n);
platform::ForRange<DeviceContext> for_range( platform::ForRange<DeviceContext> for_range(
static_cast<const DeviceContext&>(ctx.device_context()), x_n); static_cast<const DeviceContext&>(ctx.device_context()), x_n * y_n);
for_range(functor); for_range(functor);
} }
}; // namespace operators }; // namespace operators
......
...@@ -27,6 +27,38 @@ void RunServer(std::shared_ptr<detail::AsyncGRPCServer> service) { ...@@ -27,6 +27,38 @@ void RunServer(std::shared_ptr<detail::AsyncGRPCServer> service) {
VLOG(4) << "RunServer thread end"; VLOG(4) << "RunServer thread end";
} }
static void split(const std::string &str, char sep,
std::vector<std::string> *pieces) {
pieces->clear();
if (str.empty()) {
return;
}
size_t pos = 0;
size_t next = str.find(sep, pos);
while (next != std::string::npos) {
pieces->push_back(str.substr(pos, next - pos));
pos = next + 1;
next = str.find(sep, pos);
}
if (!str.substr(pos).empty()) {
pieces->push_back(str.substr(pos));
}
}
static void AsyncExecuteBlock(framework::Executor *executor,
framework::ExecutorPrepareContext *prepared,
framework::Scope *scope) {
std::future<void> future = framework::Async([&executor, &prepared, &scope]() {
try {
executor->RunPreparedContext(prepared, scope, false, false);
} catch (std::exception &e) {
LOG(ERROR) << "run sub program error " << e.what();
}
});
// TODO(qiao) maybe we can remove this
future.wait();
}
static void ParallelExecuteBlocks( static void ParallelExecuteBlocks(
const std::vector<size_t> &parallel_blkids, framework::Executor *executor, const std::vector<size_t> &parallel_blkids, framework::Executor *executor,
const std::vector<std::shared_ptr<framework::ExecutorPrepareContext>> const std::vector<std::shared_ptr<framework::ExecutorPrepareContext>>
...@@ -169,15 +201,82 @@ void ListenAndServOp::RunSyncLoop(framework::Executor *executor, ...@@ -169,15 +201,82 @@ void ListenAndServOp::RunSyncLoop(framework::Executor *executor,
} // while(true) } // while(true)
} }
void ListenAndServOp::RunAsyncLoop(framework::Executor *executor,
framework::ProgramDesc *program,
framework::Scope *recv_scope,
framework::BlockDesc *prefetch_block) const {
VLOG(3) << "RunAsyncLoop in";
// grad name to block id
std::unordered_map<std::string, int32_t> grad_to_block_id;
std::unordered_map<int32_t, std::string> id_to_grad;
auto grad_to_block_id_str =
Attr<std::vector<std::string>>("grad_to_block_id");
for (auto &grad_and_id : grad_to_block_id_str) {
std::vector<std::string> pieces;
split(grad_and_id, ':', &pieces);
VLOG(3) << "after split, grad = " << pieces[0] << ", id=" << pieces[1];
PADDLE_ENFORCE_EQ(pieces.size(), 2);
PADDLE_ENFORCE_EQ(grad_to_block_id.count(pieces[0]), 0);
int block_id = std::stoi(pieces[1]);
grad_to_block_id[pieces[0]] = block_id;
id_to_grad[block_id] = pieces[0];
}
size_t num_blocks = program->Size();
PADDLE_ENFORCE_GE(num_blocks, 2,
"server program should have at least 2 blocks");
std::vector<int> block_list;
for (size_t blkid = 1; blkid < num_blocks; ++blkid) {
block_list.push_back(blkid);
}
auto optimize_prepared = executor->Prepare(*program, block_list);
std::unordered_map<std::string,
std::shared_ptr<framework::ExecutorPrepareContext>>
grad_to_prepared_ctx;
for (size_t i = 0; i < block_list.size(); ++i) {
grad_to_prepared_ctx[id_to_grad[block_list[i]]] = optimize_prepared[i];
}
VLOG(3) << "RunAsyncLoop into while";
bool exit_flag = false;
while (!exit_flag) {
const detail::ReceivedMessage v = rpc_service_->Get();
auto recv_var_name = v.first;
if (recv_var_name == LISTEN_TERMINATE_MESSAGE) {
LOG(INFO) << "received terminate message and exit";
exit_flag = true;
break;
} else {
VLOG(3) << "received grad: " << recv_var_name;
auto var = v.second->GetVar();
if (var == nullptr) {
LOG(ERROR) << "Can not find server side var: " << recv_var_name;
PADDLE_THROW("Can not find server side var");
}
AsyncExecuteBlock(executor, grad_to_prepared_ctx[recv_var_name].get(),
v.second->GetMutableLocalScope());
}
if (exit_flag) {
rpc_service_->ShutDown();
break;
}
} // while(true)
}
void ListenAndServOp::RunImpl(const framework::Scope &scope, void ListenAndServOp::RunImpl(const framework::Scope &scope,
const platform::Place &dev_place) const { const platform::Place &dev_place) const {
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto &dev_ctx = *pool.Get(dev_place); auto &dev_ctx = *pool.Get(dev_place);
framework::Scope &recv_scope = scope.NewScope(); framework::Scope &recv_scope = scope.NewScope();
bool sync_mode = Attr<bool>("sync_mode");
PADDLE_ENFORCE(!rpc_service_); PADDLE_ENFORCE(!rpc_service_);
std::string endpoint = Attr<std::string>("endpoint"); std::string endpoint = Attr<std::string>("endpoint");
rpc_service_.reset(new detail::AsyncGRPCServer(endpoint));
rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, sync_mode));
auto *optimize_block = Attr<framework::BlockDesc *>(kOptimizeBlock); auto *optimize_block = Attr<framework::BlockDesc *>(kOptimizeBlock);
auto *prefetch_block = Attr<framework::BlockDesc *>(kPrefetchBlock); auto *prefetch_block = Attr<framework::BlockDesc *>(kPrefetchBlock);
...@@ -202,7 +301,11 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, ...@@ -202,7 +301,11 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope,
sleep(5); sleep(5);
// Write to a file of server selected port for python use. // Write to a file of server selected port for python use.
SavePort(rpc_service_); SavePort(rpc_service_);
if (sync_mode) {
RunSyncLoop(&executor, program, &recv_scope, prefetch_block); RunSyncLoop(&executor, program, &recv_scope, prefetch_block);
} else {
RunAsyncLoop(&executor, program, &recv_scope, prefetch_block);
}
} }
class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker {
...@@ -221,6 +324,12 @@ from send_op and send back variables to recv_op. ...@@ -221,6 +324,12 @@ from send_op and send back variables to recv_op.
"IP address to listen on.") "IP address to listen on.")
.SetDefault("127.0.0.1:6164") .SetDefault("127.0.0.1:6164")
.AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); .AddCustomChecker([](const std::string &ip) { return !ip.empty(); });
AddAttr<std::vector<std::string>>(
"grad_to_block_id",
"['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'] "
"a map from grad name to it's optimize block id")
.SetDefault({});
AddAttr<bool>("sync_mode", "if works at sync_mode or not").SetDefault(true);
AddAttr<framework::BlockDesc *>(kOptimizeBlock, AddAttr<framework::BlockDesc *>(kOptimizeBlock,
"BlockID to run on server side."); "BlockID to run on server side.");
AddAttr<framework::BlockDesc *>(kPrefetchBlock, AddAttr<framework::BlockDesc *>(kPrefetchBlock,
......
...@@ -46,6 +46,11 @@ class ListenAndServOp : public framework::OperatorBase { ...@@ -46,6 +46,11 @@ class ListenAndServOp : public framework::OperatorBase {
framework::Scope* recv_scope, framework::Scope* recv_scope,
framework::BlockDesc* prefetch_block) const; framework::BlockDesc* prefetch_block) const;
void RunAsyncLoop(framework::Executor* executor,
framework::ProgramDesc* program,
framework::Scope* recv_scope,
framework::BlockDesc* prefetch_block) const;
void Stop() override; void Stop() override;
void RunImpl(const framework::Scope& scope, void RunImpl(const framework::Scope& scope,
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_op.h" #include "paddle/fluid/operators/lookup_table_op.h"
#include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/assert.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -33,7 +33,7 @@ inline void ReorderInitState(const DeviceContext& ctx, ...@@ -33,7 +33,7 @@ inline void ReorderInitState(const DeviceContext& ctx,
framework::Tensor* dst, bool indexed_src) { framework::Tensor* dst, bool indexed_src) {
math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle; math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle;
dst->mutable_data<T>(src.dims(), ctx.GetPlace()); dst->mutable_data<T>(src.dims(), ctx.GetPlace());
row_shuffle(ctx, src, index_lod, *dst, indexed_src); row_shuffle(ctx, src, index_lod, dst, indexed_src);
} }
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
...@@ -57,7 +57,7 @@ class LSTMKernel : public framework::OpKernel<T> { ...@@ -57,7 +57,7 @@ class LSTMKernel : public framework::OpKernel<T> {
bool is_reverse = ctx.Attr<bool>("is_reverse"); bool is_reverse = ctx.Attr<bool>("is_reverse");
math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch; math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
auto& device_ctx = ctx.template device_context<DeviceContext>(); auto& device_ctx = ctx.template device_context<DeviceContext>();
to_batch(device_ctx, *input, *batch_gate, true, is_reverse); to_batch(device_ctx, *input, batch_gate, true, is_reverse);
auto in_dims = input->dims(); auto in_dims = input->dims();
int frame_size = static_cast<int>(in_dims[1] / 4); int frame_size = static_cast<int>(in_dims[1] / 4);
...@@ -161,11 +161,11 @@ class LSTMKernel : public framework::OpKernel<T> { ...@@ -161,11 +161,11 @@ class LSTMKernel : public framework::OpKernel<T> {
math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq; math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
batch_hidden.set_lod(batch_gate->lod()); batch_hidden.set_lod(batch_gate->lod());
// restore the output hidden in LoDTensor from the batch hidden // restore the output hidden in LoDTensor from the batch hidden
to_seq(device_ctx, batch_hidden, *hidden_out); to_seq(device_ctx, batch_hidden, hidden_out);
batch_cell.set_lod(batch_gate->lod()); batch_cell.set_lod(batch_gate->lod());
// restore the output cell state in LoDTensor from the batch cell // restore the output cell state in LoDTensor from the batch cell
to_seq(device_ctx, batch_cell, *cell_out); to_seq(device_ctx, batch_cell, cell_out);
} }
}; };
...@@ -257,7 +257,7 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -257,7 +257,7 @@ class LSTMGradKernel : public framework::OpKernel<T> {
const framework::DDim& dims, framework::LoDTensor& dst) { const framework::DDim& dims, framework::LoDTensor& dst) {
dst.mutable_data<T>(dims, ctx.GetPlace()); dst.mutable_data<T>(dims, ctx.GetPlace());
dst.set_lod(batch_gate->lod()); dst.set_lod(batch_gate->lod());
to_batch(ctx, src, dst, false); to_batch(ctx, src, &dst, false);
}; };
LoDTensor batch_hidden, batch_hidden_g, batch_cell; LoDTensor batch_hidden, batch_hidden_g, batch_cell;
...@@ -351,7 +351,7 @@ class LSTMGradKernel : public framework::OpKernel<T> { ...@@ -351,7 +351,7 @@ class LSTMGradKernel : public framework::OpKernel<T> {
if (in_g) { if (in_g) {
/* backward data */ /* backward data */
in_g->mutable_data<T>(ctx.GetPlace()); in_g->mutable_data<T>(ctx.GetPlace());
to_seq(device_ctx, batch_gate_g, *in_g); to_seq(device_ctx, batch_gate_g, in_g);
} }
if (bias && bias_g) { if (bias && bias_g) {
/* backward bias */ /* backward bias */
......
...@@ -40,7 +40,7 @@ inline void ReorderInitState(const DeviceContext& ctx, ...@@ -40,7 +40,7 @@ inline void ReorderInitState(const DeviceContext& ctx,
framework::Tensor* dst, bool indexed_src) { framework::Tensor* dst, bool indexed_src) {
math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle; math::CopyMatrixRowsFunctor<DeviceContext, T> row_shuffle;
dst->mutable_data<T>(src.dims(), ctx.GetPlace()); dst->mutable_data<T>(src.dims(), ctx.GetPlace());
row_shuffle(ctx, src, index, *dst, indexed_src); row_shuffle(ctx, src, index, dst, indexed_src);
} }
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
...@@ -81,7 +81,7 @@ class LSTMPKernel : public framework::OpKernel<T> { ...@@ -81,7 +81,7 @@ class LSTMPKernel : public framework::OpKernel<T> {
bool is_reverse = ctx.Attr<bool>("is_reverse"); bool is_reverse = ctx.Attr<bool>("is_reverse");
math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch; math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
auto& device_ctx = ctx.template device_context<DeviceContext>(); auto& device_ctx = ctx.template device_context<DeviceContext>();
to_batch(device_ctx, *input, *batch_gate, true, is_reverse); to_batch(device_ctx, *input, batch_gate, true, is_reverse);
auto in_dims = input->dims(); auto in_dims = input->dims();
int frame_size = static_cast<int>(in_dims[1] / 4); int frame_size = static_cast<int>(in_dims[1] / 4);
...@@ -208,11 +208,11 @@ class LSTMPKernel : public framework::OpKernel<T> { ...@@ -208,11 +208,11 @@ class LSTMPKernel : public framework::OpKernel<T> {
math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq; math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
batch_proj.set_lod(batch_gate->lod()); batch_proj.set_lod(batch_gate->lod());
// restore the output hidden in LoDTensor from the batch hidden // restore the output hidden in LoDTensor from the batch hidden
to_seq(device_ctx, batch_proj, *proj_out); to_seq(device_ctx, batch_proj, proj_out);
batch_cell.set_lod(batch_gate->lod()); batch_cell.set_lod(batch_gate->lod());
// restore the output cell state in LoDTensor from the batch cell // restore the output cell state in LoDTensor from the batch cell
to_seq(device_ctx, batch_cell, *cell_out); to_seq(device_ctx, batch_cell, cell_out);
} }
}; };
...@@ -332,7 +332,7 @@ class LSTMPGradKernel : public framework::OpKernel<T> { ...@@ -332,7 +332,7 @@ class LSTMPGradKernel : public framework::OpKernel<T> {
const framework::DDim& dims, framework::LoDTensor& dst) { const framework::DDim& dims, framework::LoDTensor& dst) {
dst.mutable_data<T>(dims, ctx.GetPlace()); dst.mutable_data<T>(dims, ctx.GetPlace());
dst.set_lod(batch_gate->lod()); dst.set_lod(batch_gate->lod());
to_batch(ctx, src, dst, false); to_batch(ctx, src, &dst, false);
}; };
LoDTensor batch_hidden_g, batch_proj, batch_proj_g, batch_cell; LoDTensor batch_hidden_g, batch_proj, batch_proj_g, batch_cell;
...@@ -471,7 +471,7 @@ class LSTMPGradKernel : public framework::OpKernel<T> { ...@@ -471,7 +471,7 @@ class LSTMPGradKernel : public framework::OpKernel<T> {
if (in_g) { if (in_g) {
/* backward data */ /* backward data */
in_g->mutable_data<T>(ctx.GetPlace()); in_g->mutable_data<T>(ctx.GetPlace());
to_seq(device_ctx, batch_gate_g, *in_g); to_seq(device_ctx, batch_gate_g, in_g);
} }
if (bias && bias_g) { if (bias && bias_g) {
/* backward bias */ /* backward bias */
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/concat.h" #include "paddle/fluid/operators/math/concat.h"
#include <vector>
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -70,20 +71,20 @@ class ConcatGradFunctor<platform::CPUDeviceContext, T> { ...@@ -70,20 +71,20 @@ class ConcatGradFunctor<platform::CPUDeviceContext, T> {
public: public:
void operator()(const platform::CPUDeviceContext& context, void operator()(const platform::CPUDeviceContext& context,
const framework::Tensor& input, const int axis, const framework::Tensor& input, const int axis,
std::vector<framework::Tensor>& outputs) { std::vector<framework::Tensor>* outputs) {
// TODO(zcd): Add input data validity checking // TODO(zcd): Add input data validity checking
int num = outputs.size(); int num = outputs->size();
int input_rows = 1; int input_rows = 1;
auto dim_0 = outputs[0].dims(); auto dim_0 = outputs->at(0).dims();
for (int i = 0; i < axis; ++i) { for (int i = 0; i < axis; ++i) {
input_rows *= dim_0[i]; input_rows *= dim_0[i];
} }
int input_cols = 0; int input_cols = 0;
std::vector<int64_t> output_cols(outputs.size()); std::vector<int64_t> output_cols(outputs->size());
for (int i = 0; i < num; ++i) { for (int i = 0; i < num; ++i) {
int t_cols = outputs[i].numel() / input_rows; int t_cols = outputs->at(i).numel() / input_rows;
input_cols += t_cols; input_cols += t_cols;
output_cols[i] = t_cols; output_cols[i] = t_cols;
} }
...@@ -95,7 +96,7 @@ class ConcatGradFunctor<platform::CPUDeviceContext, T> { ...@@ -95,7 +96,7 @@ class ConcatGradFunctor<platform::CPUDeviceContext, T> {
int col_idx = 0; int col_idx = 0;
for (int j = 0; j < num; ++j) { for (int j = 0; j < num; ++j) {
int col_len = output_cols[j]; int col_len = output_cols[j];
T* dst_ptr = outputs[j].data<T>() + k * col_len; T* dst_ptr = outputs->at(j).data<T>() + k * col_len;
memory::Copy(cpu_place, dst_ptr, cpu_place, src_ptr + col_idx, memory::Copy(cpu_place, dst_ptr, cpu_place, src_ptr + col_idx,
sizeof(T) * col_len); sizeof(T) * col_len);
col_idx += col_len; col_idx += col_len;
......
...@@ -12,9 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,9 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/operators/math/concat.h" #include "paddle/fluid/operators/math/concat.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -202,16 +204,16 @@ class ConcatGradFunctor<platform::CUDADeviceContext, T> { ...@@ -202,16 +204,16 @@ class ConcatGradFunctor<platform::CUDADeviceContext, T> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const int axis, const framework::Tensor& input, const int axis,
std::vector<framework::Tensor>& outputs) { std::vector<framework::Tensor>* outputs) {
// TODO(zcd): Add input data validity checking // TODO(zcd): Add input data validity checking
int o_num = outputs.size(); int o_num = outputs->size();
int out_row = 1; int out_row = 1;
auto dim_0 = outputs[0].dims(); auto dim_0 = outputs->at(0).dims();
for (int i = 0; i < axis; ++i) { for (int i = 0; i < axis; ++i) {
out_row *= dim_0[i]; out_row *= dim_0[i];
} }
int out_col = outputs[0].numel() / out_row; int out_col = outputs->at(0).numel() / out_row;
int in_col = 0, in_row = out_row; int in_col = 0, in_row = out_row;
bool sameShape = true; bool sameShape = true;
...@@ -221,13 +223,13 @@ class ConcatGradFunctor<platform::CUDADeviceContext, T> { ...@@ -221,13 +223,13 @@ class ConcatGradFunctor<platform::CUDADeviceContext, T> {
outputs_cols[0] = 0; outputs_cols[0] = 0;
for (int i = 0; i < o_num; ++i) { for (int i = 0; i < o_num; ++i) {
int t_col = outputs[i].numel() / out_row; int t_col = outputs->at(i).numel() / out_row;
if (sameShape) { if (sameShape) {
if (t_col != out_col) sameShape = false; if (t_col != out_col) sameShape = false;
} }
in_col += t_col; in_col += t_col;
outputs_cols[i + 1] = in_col; outputs_cols[i + 1] = in_col;
outputs_ptr[i] = outputs[i].data<T>(); outputs_ptr[i] = outputs->at(i).data<T>();
} }
T** dev_out_gpu_data = T** dev_out_gpu_data =
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <vector>
#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
...@@ -56,7 +57,7 @@ template <typename DeviceContext, typename T> ...@@ -56,7 +57,7 @@ template <typename DeviceContext, typename T>
class ConcatGradFunctor { class ConcatGradFunctor {
public: public:
void operator()(const DeviceContext& context, const framework::Tensor& input, void operator()(const DeviceContext& context, const framework::Tensor& input,
const int axis, std::vector<framework::Tensor>& outputs); const int axis, std::vector<framework::Tensor>* outputs);
}; };
} // namespace math } // namespace math
......
...@@ -17,17 +17,14 @@ limitations under the License. */ ...@@ -17,17 +17,14 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
using namespace paddle::framework;
using namespace paddle::platform;
template <typename DeviceContext, typename Place> template <typename DeviceContext, typename Place>
void testConcat() { void testConcat() {
Tensor input_a_cpu; paddle::framework::Tensor input_a_cpu;
Tensor input_b_cpu; paddle::framework::Tensor input_b_cpu;
Tensor out_cpu; paddle::framework::Tensor out_cpu;
Tensor input_a; paddle::framework::Tensor input_a;
Tensor input_b; paddle::framework::Tensor input_b;
Tensor out; paddle::framework::Tensor out;
DeviceContext* context = new DeviceContext(Place()); DeviceContext* context = new DeviceContext(Place());
// DeviceContext context(Place()); // DeviceContext context(Place());
...@@ -40,18 +37,18 @@ void testConcat() { ...@@ -40,18 +37,18 @@ void testConcat() {
* output: * output:
* out.shape: [5, 3, 4] * out.shape: [5, 3, 4]
*/ */
auto dim_a = make_ddim({2, 3, 4}); auto dim_a = paddle::framework::make_ddim({2, 3, 4});
auto dim_b = make_ddim({3, 3, 4}); auto dim_b = paddle::framework::make_ddim({3, 3, 4});
auto dim_out = make_ddim({5, 3, 4}); auto dim_out = paddle::framework::make_ddim({5, 3, 4});
input_a.mutable_data<int>(dim_a, Place()); input_a.mutable_data<int>(dim_a, Place());
input_b.mutable_data<int>(dim_b, Place()); input_b.mutable_data<int>(dim_b, Place());
out.mutable_data<int>(dim_out, Place()); out.mutable_data<int>(dim_out, Place());
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
input_a_cpu.mutable_data<int>(dim_a, CPUPlace()); input_a_cpu.mutable_data<int>(dim_a, paddle::platform::CPUPlace());
input_b_cpu.mutable_data<int>(dim_b, CPUPlace()); input_b_cpu.mutable_data<int>(dim_b, paddle::platform::CPUPlace());
out_cpu.mutable_data<int>(dim_out, CPUPlace()); out_cpu.mutable_data<int>(dim_out, paddle::platform::CPUPlace());
} }
int* a_ptr; int* a_ptr;
...@@ -72,11 +69,11 @@ void testConcat() { ...@@ -72,11 +69,11 @@ void testConcat() {
} }
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(input_a_cpu, Place(), *context, &input_a); paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a);
TensorCopy(input_b_cpu, Place(), *context, &input_b); paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b);
} }
std::vector<Tensor> input; std::vector<paddle::framework::Tensor> input;
input.push_back(input_a); input.push_back(input_a);
input.push_back(input_b); input.push_back(input_b);
...@@ -89,7 +86,8 @@ void testConcat() { ...@@ -89,7 +86,8 @@ void testConcat() {
int* out_ptr; int* out_ptr;
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(out, CPUPlace(), *context, &out_cpu); paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context,
&out_cpu);
out_ptr = out_cpu.data<int>(); out_ptr = out_cpu.data<int>();
} else { } else {
out_ptr = out.data<int>(); out_ptr = out.data<int>();
...@@ -115,9 +113,9 @@ void testConcat() { ...@@ -115,9 +113,9 @@ void testConcat() {
* output: * output:
* out.shape: [2, 7, 4] * out.shape: [2, 7, 4]
*/ */
dim_a = make_ddim({2, 3, 4}); dim_a = paddle::framework::make_ddim({2, 3, 4});
dim_b = make_ddim({2, 4, 4}); dim_b = paddle::framework::make_ddim({2, 4, 4});
dim_out = make_ddim({2, 7, 4}); dim_out = paddle::framework::make_ddim({2, 7, 4});
input_a.Resize(dim_a); input_a.Resize(dim_a);
input_b.Resize(dim_b); input_b.Resize(dim_b);
...@@ -144,8 +142,8 @@ void testConcat() { ...@@ -144,8 +142,8 @@ void testConcat() {
} }
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(input_a_cpu, Place(), *context, &input_a); paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a);
TensorCopy(input_b_cpu, Place(), *context, &input_b); paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b);
} }
input.clear(); input.clear();
...@@ -159,7 +157,8 @@ void testConcat() { ...@@ -159,7 +157,8 @@ void testConcat() {
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); PADDLE_ENFORCE_EQ(input_b.dims(), dim_b);
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(out, CPUPlace(), *context, &out_cpu); paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context,
&out_cpu);
out_ptr = out_cpu.data<int>(); out_ptr = out_cpu.data<int>();
} else { } else {
out_ptr = out.data<int>(); out_ptr = out.data<int>();
...@@ -187,9 +186,9 @@ void testConcat() { ...@@ -187,9 +186,9 @@ void testConcat() {
* output: * output:
* out.shape: [2, 3, 9] * out.shape: [2, 3, 9]
*/ */
dim_a = make_ddim({2, 3, 4}); dim_a = paddle::framework::make_ddim({2, 3, 4});
dim_b = make_ddim({2, 3, 5}); dim_b = paddle::framework::make_ddim({2, 3, 5});
dim_out = make_ddim({2, 3, 9}); dim_out = paddle::framework::make_ddim({2, 3, 9});
input_a.Resize(dim_a); input_a.Resize(dim_a);
input_b.Resize(dim_b); input_b.Resize(dim_b);
...@@ -216,8 +215,8 @@ void testConcat() { ...@@ -216,8 +215,8 @@ void testConcat() {
} }
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(input_a_cpu, Place(), *context, &input_a); paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a);
TensorCopy(input_b_cpu, Place(), *context, &input_b); paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b);
} }
input.clear(); input.clear();
...@@ -231,7 +230,8 @@ void testConcat() { ...@@ -231,7 +230,8 @@ void testConcat() {
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); PADDLE_ENFORCE_EQ(input_b.dims(), dim_b);
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(out, CPUPlace(), *context, &out_cpu); paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context,
&out_cpu);
out_ptr = out_cpu.data<int>(); out_ptr = out_cpu.data<int>();
} else { } else {
out_ptr = out.data<int>(); out_ptr = out.data<int>();
...@@ -261,9 +261,9 @@ void testConcat() { ...@@ -261,9 +261,9 @@ void testConcat() {
* output: * output:
* out.shape: [2, 6, 4] * out.shape: [2, 6, 4]
*/ */
dim_a = make_ddim({2, 3, 4}); dim_a = paddle::framework::make_ddim({2, 3, 4});
dim_b = make_ddim({2, 3, 4}); dim_b = paddle::framework::make_ddim({2, 3, 4});
dim_out = make_ddim({2, 6, 4}); dim_out = paddle::framework::make_ddim({2, 6, 4});
input_a.Resize(dim_a); input_a.Resize(dim_a);
input_b.Resize(dim_b); input_b.Resize(dim_b);
...@@ -290,8 +290,8 @@ void testConcat() { ...@@ -290,8 +290,8 @@ void testConcat() {
} }
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(input_a_cpu, Place(), *context, &input_a); paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a);
TensorCopy(input_b_cpu, Place(), *context, &input_b); paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b);
} }
input.clear(); input.clear();
...@@ -305,7 +305,8 @@ void testConcat() { ...@@ -305,7 +305,8 @@ void testConcat() {
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); PADDLE_ENFORCE_EQ(input_b.dims(), dim_b);
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
TensorCopy(out, CPUPlace(), *context, &out_cpu); paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context,
&out_cpu);
out_ptr = out_cpu.data<int>(); out_ptr = out_cpu.data<int>();
} else { } else {
out_ptr = out.data<int>(); out_ptr = out.data<int>();
......
...@@ -14,6 +14,8 @@ limitations under the License. */ ...@@ -14,6 +14,8 @@ limitations under the License. */
#pragma once #pragma once
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/operators/math/im2col.h"
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
......
...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/cos_sim_functor.h" #include "paddle/fluid/operators/math/cos_sim_functor.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -31,11 +32,11 @@ __global__ void CrossEntropyKernel(T* Y, const T* X, const int64_t* label, ...@@ -31,11 +32,11 @@ __global__ void CrossEntropyKernel(T* Y, const T* X, const int64_t* label,
template <typename T> template <typename T>
__device__ __forceinline__ T sum_single_warp(T val) { __device__ __forceinline__ T sum_single_warp(T val) {
val += __shfl_down(val, 16); val += platform::__shfl_down_sync(0, val, 16);
val += __shfl_down(val, 8); val += platform::__shfl_down_sync(0, val, 8);
val += __shfl_down(val, 4); val += platform::__shfl_down_sync(0, val, 4);
val += __shfl_down(val, 2); val += platform::__shfl_down_sync(0, val, 2);
val += __shfl_down(val, 1); val += platform::__shfl_down_sync(0, val, 1);
return val; return val;
} }
...@@ -108,7 +109,9 @@ class CrossEntropyFunctor<platform::CUDADeviceContext, T> { ...@@ -108,7 +109,9 @@ class CrossEntropyFunctor<platform::CUDADeviceContext, T> {
if (softLabel) { if (softLabel) {
const T* label_data = labels->data<T>(); const T* label_data = labels->data<T>();
int block = class_num > 512 ? 512 : pow(2, int(std::log2(class_num))); int block = class_num > 512
? 512
: pow(2, static_cast<int>(std::log2(class_num)));
SoftCrossEntropyKernel<T><<< SoftCrossEntropyKernel<T><<<
batch_size, block, block * sizeof(T), batch_size, block, block * sizeof(T),
......
...@@ -12,8 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,8 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <vector>
#include "paddle/fluid/operators/math/depthwise_conv.h" #include "paddle/fluid/operators/math/depthwise_conv.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <vector>
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/hostdevice.h"
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include <math.h> #include <math.h>
#include <string>
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/hostdevice.h"
......
...@@ -89,14 +89,14 @@ void hl_avx_gru_forward_reset_output(OpResetOutput op_reset_output, ...@@ -89,14 +89,14 @@ void hl_avx_gru_forward_reset_output(OpResetOutput op_reset_output,
__m256 r_value_reset_gate; __m256 r_value_reset_gate;
__m256 r_value_reset_output; __m256 r_value_reset_output;
__m256 r_prev_out = _mm256_set1_ps(0.0f); __m256 r_prev_out = _mm256_set1_ps(0.0f);
__m256 *update_gate = (__m256 *)gate_value; __m256 *update_gate = reinterpret_cast<__m256 *>(gate_value);
__m256 *reset_gate = (__m256 *)(gate_value + frame_size); __m256 *reset_gate = reinterpret_cast<__m256 *>(gate_value + frame_size);
for (int i = 0; i < frame_size / 8; i++) { for (int i = 0; i < frame_size / 8; i++) {
r_value_update_gate = update_gate[i]; r_value_update_gate = update_gate[i];
r_value_reset_gate = reset_gate[i]; r_value_reset_gate = reset_gate[i];
if (prev_output_value) { if (prev_output_value) {
r_prev_out = ((__m256 *)prev_output_value)[i]; r_prev_out = (reinterpret_cast<__m256 *>(prev_output_value))[i];
} }
op_reset_output(r_value_update_gate, r_value_reset_gate, r_prev_out, op_reset_output(r_value_update_gate, r_value_reset_gate, r_prev_out,
...@@ -104,7 +104,7 @@ void hl_avx_gru_forward_reset_output(OpResetOutput op_reset_output, ...@@ -104,7 +104,7 @@ void hl_avx_gru_forward_reset_output(OpResetOutput op_reset_output,
update_gate[i] = r_value_update_gate; update_gate[i] = r_value_update_gate;
reset_gate[i] = r_value_reset_gate; reset_gate[i] = r_value_reset_gate;
((__m256 *)reset_output_value)[i] = r_value_reset_output; (reinterpret_cast<__m256 *>(reset_output_value))[i] = r_value_reset_output;
} }
#endif #endif
} }
...@@ -119,21 +119,21 @@ void hl_avx_gru_forward_final_output(OpFinalOutput op_final_output, ...@@ -119,21 +119,21 @@ void hl_avx_gru_forward_final_output(OpFinalOutput op_final_output,
__m256 r_value_frame_state; __m256 r_value_frame_state;
__m256 r_prev_out = _mm256_set1_ps(0.0f); __m256 r_prev_out = _mm256_set1_ps(0.0f);
__m256 r_output; __m256 r_output;
__m256 *update_gate = (__m256 *)gate_value; __m256 *update_gate = reinterpret_cast<__m256 *>(gate_value);
__m256 *frame_state = (__m256 *)(gate_value + frame_size * 2); __m256 *frame_state = reinterpret_cast<__m256 *>(gate_value + frame_size * 2);
for (int i = 0; i < frame_size / 8; i++) { for (int i = 0; i < frame_size / 8; i++) {
r_value_update_gate = update_gate[i]; r_value_update_gate = update_gate[i];
r_value_frame_state = frame_state[i]; r_value_frame_state = frame_state[i];
if (prev_output_value) { if (prev_output_value) {
r_prev_out = ((__m256 *)prev_output_value)[i]; r_prev_out = (reinterpret_cast<__m256 *>(prev_output_value))[i];
} }
op_final_output(r_value_update_gate, r_value_frame_state, r_prev_out, op_final_output(r_value_update_gate, r_value_frame_state, r_prev_out,
r_output, active_node); r_output, active_node);
frame_state[i] = r_value_frame_state; frame_state[i] = r_value_frame_state;
((__m256 *)output_value)[i] = r_output; (reinterpret_cast<__m256 *>(output_value))[i] = r_output;
} }
#endif #endif
} }
...@@ -284,20 +284,22 @@ void hl_avx_gru_backward_state_grad(OpStateGrad op_state_grad, T *gate_value, ...@@ -284,20 +284,22 @@ void hl_avx_gru_backward_state_grad(OpStateGrad op_state_grad, T *gate_value,
__m256 r_out_grad; __m256 r_out_grad;
__m256 r_prev_out_value = _mm256_set1_ps(0.0f); __m256 r_prev_out_value = _mm256_set1_ps(0.0f);
__m256 r_prev_out_grad = _mm256_set1_ps(0.0f); __m256 r_prev_out_grad = _mm256_set1_ps(0.0f);
__m256 *update_gate_value = (__m256 *)gate_value; __m256 *update_gate_value = reinterpret_cast<__m256 *>(gate_value);
__m256 *update_gate_grad = (__m256 *)gate_grad; __m256 *update_gate_grad = reinterpret_cast<__m256 *>(gate_grad);
__m256 *frame_state_value = (__m256 *)(gate_value + frame_size * 2); __m256 *frame_state_value =
__m256 *frame_state_grad = (__m256 *)(gate_grad + frame_size * 2); reinterpret_cast<__m256 *>(gate_value + frame_size * 2);
__m256 *frame_state_grad =
reinterpret_cast<__m256 *>(gate_grad + frame_size * 2);
for (int i = 0; i < frame_size / 8; i++) { for (int i = 0; i < frame_size / 8; i++) {
r_update_gate_value = update_gate_value[i]; r_update_gate_value = update_gate_value[i];
r_frame_state_value = frame_state_value[i]; r_frame_state_value = frame_state_value[i];
r_out_grad = ((__m256 *)output_grad)[i]; r_out_grad = (reinterpret_cast<__m256 *>(output_grad))[i];
if (prev_out_value) { if (prev_out_value) {
r_prev_out_value = ((__m256 *)prev_out_value)[i]; r_prev_out_value = (reinterpret_cast<__m256 *>(prev_out_value))[i];
} }
if (prev_out_grad) { if (prev_out_grad) {
r_prev_out_grad = ((__m256 *)prev_out_grad)[i]; r_prev_out_grad = (reinterpret_cast<__m256 *>(prev_out_grad))[i];
} }
op_state_grad(r_update_gate_value, r_update_gate_grad, r_frame_state_value, op_state_grad(r_update_gate_value, r_update_gate_grad, r_frame_state_value,
...@@ -307,7 +309,7 @@ void hl_avx_gru_backward_state_grad(OpStateGrad op_state_grad, T *gate_value, ...@@ -307,7 +309,7 @@ void hl_avx_gru_backward_state_grad(OpStateGrad op_state_grad, T *gate_value,
update_gate_grad[i] = r_update_gate_grad; update_gate_grad[i] = r_update_gate_grad;
frame_state_grad[i] = r_frame_state_grad; frame_state_grad[i] = r_frame_state_grad;
if (prev_out_grad) { if (prev_out_grad) {
((__m256 *)prev_out_grad)[i] = r_prev_out_grad; (reinterpret_cast<__m256 *>(prev_out_grad))[i] = r_prev_out_grad;
} }
} }
#endif #endif
...@@ -327,10 +329,11 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, ...@@ -327,10 +329,11 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value,
__m256 r_reset_output_grad = _mm256_set1_ps(0.0f); __m256 r_reset_output_grad = _mm256_set1_ps(0.0f);
__m256 r_prev_out_value = _mm256_set1_ps(0.0f); __m256 r_prev_out_value = _mm256_set1_ps(0.0f);
__m256 r_prev_out_grad = _mm256_set1_ps(0.0f); __m256 r_prev_out_grad = _mm256_set1_ps(0.0f);
__m256 *update_gate_value = (__m256 *)gate_value; __m256 *update_gate_value = reinterpret_cast<__m256 *>(gate_value);
__m256 *update_gate_grad = (__m256 *)gate_grad; __m256 *update_gate_grad = reinterpret_cast<__m256 *>(gate_grad);
__m256 *reset_gate_value = (__m256 *)(gate_value + frame_size); __m256 *reset_gate_value =
__m256 *reset_gate_grad = (__m256 *)(gate_grad + frame_size); reinterpret_cast<__m256 *>(gate_value + frame_size);
__m256 *reset_gate_grad = reinterpret_cast<__m256 *>(gate_grad + frame_size);
for (int i = 0; i < frame_size / 8; i++) { for (int i = 0; i < frame_size / 8; i++) {
r_update_gate_value = update_gate_value[i]; r_update_gate_value = update_gate_value[i];
...@@ -338,13 +341,13 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, ...@@ -338,13 +341,13 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value,
r_reset_gate_value = reset_gate_value[i]; r_reset_gate_value = reset_gate_value[i];
if (prev_out_value && prev_out_grad) { if (prev_out_value && prev_out_grad) {
r_reset_output_grad = ((__m256 *)reset_output_grad)[i]; r_reset_output_grad = (reinterpret_cast<__m256 *>(reset_output_grad))[i];
} }
if (prev_out_value) { if (prev_out_value) {
r_prev_out_value = ((__m256 *)prev_out_value)[i]; r_prev_out_value = (reinterpret_cast<__m256 *>(prev_out_value))[i];
} }
if (prev_out_grad) { if (prev_out_grad) {
r_prev_out_grad = ((__m256 *)prev_out_grad)[i]; r_prev_out_grad = (reinterpret_cast<__m256 *>(prev_out_grad))[i];
} }
op_reset_grad(r_update_gate_value, r_update_gate_grad, r_reset_gate_value, op_reset_grad(r_update_gate_value, r_update_gate_grad, r_reset_gate_value,
...@@ -354,7 +357,7 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value, ...@@ -354,7 +357,7 @@ void hl_avx_gru_backward_reset_grad(OpResetGrad op_reset_grad, T *gate_value,
update_gate_grad[i] = r_update_gate_grad; update_gate_grad[i] = r_update_gate_grad;
reset_gate_grad[i] = r_reset_gate_grad; reset_gate_grad[i] = r_reset_gate_grad;
if (prev_out_grad) { if (prev_out_grad) {
((__m256 *)prev_out_grad)[i] = r_prev_out_grad; (reinterpret_cast<__m256 *>(prev_out_grad))[i] = r_prev_out_grad;
} }
} }
#endif #endif
......
...@@ -16,7 +16,7 @@ limitations under the License. */ ...@@ -16,7 +16,7 @@ limitations under the License. */
#include <type_traits> #include <type_traits>
#include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/detail/activation_functions.h"
#include "paddle/fluid/operators/math/gru_compute.h" #include "paddle/fluid/operators/math/gru_compute.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
namespace paddle { namespace paddle {
......
...@@ -164,10 +164,12 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value, ...@@ -164,10 +164,12 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value,
__m256 r_state_atv; __m256 r_state_atv;
__m256 r_out; __m256 r_out;
__m256 *value_in = (__m256 *)value.gate_value; __m256 *value_in = reinterpret_cast<__m256 *>(value.gate_value);
__m256 *value_ig = (__m256 *)(value.gate_value + frame_size); __m256 *value_ig = reinterpret_cast<__m256 *>(value.gate_value + frame_size);
__m256 *value_fg = (__m256 *)(value.gate_value + frame_size * 2); __m256 *value_fg =
__m256 *value_og = (__m256 *)(value.gate_value + frame_size * 3); reinterpret_cast<__m256 *>(value.gate_value + frame_size * 2);
__m256 *value_og =
reinterpret_cast<__m256 *>(value.gate_value + frame_size * 3);
for (int i = 0; i < frame_size / 8; i++) { for (int i = 0; i < frame_size / 8; i++) {
r_value_in = value_in[i]; r_value_in = value_in[i];
...@@ -175,13 +177,13 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value, ...@@ -175,13 +177,13 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value,
r_value_fg = value_fg[i]; r_value_fg = value_fg[i];
r_value_og = value_og[i]; r_value_og = value_og[i];
if (value.check_ig) { if (value.check_ig) {
r_checkI = ((__m256 *)value.check_ig)[i]; r_checkI = (reinterpret_cast<__m256 *>(value.check_ig))[i];
r_checkF = ((__m256 *)value.check_fg)[i]; r_checkF = (reinterpret_cast<__m256 *>(value.check_fg))[i];
r_checkO = ((__m256 *)value.check_og)[i]; r_checkO = (reinterpret_cast<__m256 *>(value.check_og))[i];
} }
if (value.prev_state_value) { if (value.prev_state_value) {
r_prev_state = ((__m256 *)value.prev_state_value)[i]; r_prev_state = (reinterpret_cast<__m256 *>(value.prev_state_value))[i];
} }
op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_prev_state, r_state, op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_prev_state, r_state,
...@@ -192,9 +194,9 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value, ...@@ -192,9 +194,9 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue<T> value,
value_ig[i] = r_value_ig; value_ig[i] = r_value_ig;
value_fg[i] = r_value_fg; value_fg[i] = r_value_fg;
value_og[i] = r_value_og; value_og[i] = r_value_og;
((__m256 *)value.state_value)[i] = r_state; (reinterpret_cast<__m256 *>(value.state_value))[i] = r_state;
((__m256 *)value.state_active_value)[i] = r_state_atv; (reinterpret_cast<__m256 *>(value.state_active_value))[i] = r_state_atv;
((__m256 *)value.output_value)[i] = r_out; (reinterpret_cast<__m256 *>(value.output_value))[i] = r_out;
} }
#endif #endif
} }
...@@ -227,14 +229,16 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value, ...@@ -227,14 +229,16 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value,
__m256 r_checkFGrad; __m256 r_checkFGrad;
__m256 r_checkOGrad; __m256 r_checkOGrad;
__m256 *value_in = (__m256 *)value.gate_value; __m256 *value_in = reinterpret_cast<__m256 *>(value.gate_value);
__m256 *value_ig = (__m256 *)(value.gate_value + frame_size); __m256 *value_ig = reinterpret_cast<__m256 *>(value.gate_value + frame_size);
__m256 *value_fg = (__m256 *)(value.gate_value + frame_size * 2); __m256 *value_fg =
__m256 *value_og = (__m256 *)(value.gate_value + frame_size * 3); reinterpret_cast<__m256 *>(value.gate_value + frame_size * 2);
__m256 *grad_in = (__m256 *)grad.gate_grad; __m256 *value_og =
__m256 *grad_ig = (__m256 *)(grad.gate_grad + frame_size); reinterpret_cast<__m256 *>(value.gate_value + frame_size * 3);
__m256 *grad_fg = (__m256 *)(grad.gate_grad + frame_size * 2); __m256 *grad_in = reinterpret_cast<__m256 *>(grad.gate_grad);
__m256 *grad_og = (__m256 *)(grad.gate_grad + frame_size * 3); __m256 *grad_ig = reinterpret_cast<__m256 *>(grad.gate_grad + frame_size);
__m256 *grad_fg = reinterpret_cast<__m256 *>(grad.gate_grad + frame_size * 2);
__m256 *grad_og = reinterpret_cast<__m256 *>(grad.gate_grad + frame_size * 3);
for (int i = 0; i < frame_size / 8; i++) { for (int i = 0; i < frame_size / 8; i++) {
r_value_in = value_in[i]; r_value_in = value_in[i];
...@@ -242,16 +246,16 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value, ...@@ -242,16 +246,16 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value,
r_value_fg = value_fg[i]; r_value_fg = value_fg[i];
r_value_og = value_og[i]; r_value_og = value_og[i];
if (value.check_ig) { if (value.check_ig) {
r_checkI = ((__m256 *)value.check_ig)[i]; r_checkI = (reinterpret_cast<__m256 *>(value.check_ig))[i];
r_checkF = ((__m256 *)value.check_fg)[i]; r_checkF = (reinterpret_cast<__m256 *>(value.check_fg))[i];
r_checkO = ((__m256 *)value.check_og)[i]; r_checkO = (reinterpret_cast<__m256 *>(value.check_og))[i];
} }
r_state = ((__m256 *)value.state_value)[i]; r_state = (reinterpret_cast<__m256 *>(value.state_value))[i];
r_state_atv = ((__m256 *)value.state_active_value)[i]; r_state_atv = (reinterpret_cast<__m256 *>(value.state_active_value))[i];
r_output_grad = ((__m256 *)grad.output_grad)[i]; r_output_grad = (reinterpret_cast<__m256 *>(grad.output_grad))[i];
r_state_grad = ((__m256 *)grad.state_grad)[i]; r_state_grad = (reinterpret_cast<__m256 *>(grad.state_grad))[i];
if (value.prev_state_value) { if (value.prev_state_value) {
r_prev_state = ((__m256 *)value.prev_state_value)[i]; r_prev_state = (reinterpret_cast<__m256 *>(value.prev_state_value))[i];
} }
op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_grad_in, r_grad_ig, op(r_value_in, r_value_ig, r_value_fg, r_value_og, r_grad_in, r_grad_ig,
...@@ -264,15 +268,18 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value, ...@@ -264,15 +268,18 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue<T> value,
grad_ig[i] = r_grad_ig; grad_ig[i] = r_grad_ig;
grad_fg[i] = r_grad_fg; grad_fg[i] = r_grad_fg;
grad_og[i] = r_grad_og; grad_og[i] = r_grad_og;
((__m256 *)grad.state_grad)[i] = r_state_grad; (reinterpret_cast<__m256 *>(grad.state_grad))[i] = r_state_grad;
if (grad.prev_state_grad) if (grad.prev_state_grad)
((__m256 *)grad.prev_state_grad)[i] = r_prev_state_grad; (reinterpret_cast<__m256 *>(grad.prev_state_grad))[i] = r_prev_state_grad;
if (value.prev_state_value) { if (value.prev_state_value) {
if (grad.check_ig_grad) ((__m256 *)grad.check_ig_grad)[i] += r_checkIGrad; if (grad.check_ig_grad)
if (grad.check_fg_grad) ((__m256 *)grad.check_fg_grad)[i] += r_checkFGrad; (reinterpret_cast<__m256 *>(grad.check_ig_grad))[i] += r_checkIGrad;
if (grad.check_fg_grad)
(reinterpret_cast<__m256 *>(grad.check_fg_grad))[i] += r_checkFGrad;
} }
if (grad.check_og_grad) ((__m256 *)grad.check_og_grad)[i] += r_checkOGrad; if (grad.check_og_grad)
(reinterpret_cast<__m256 *>(grad.check_og_grad))[i] += r_checkOGrad;
} }
#endif #endif
} }
......
...@@ -13,13 +13,13 @@ See the License for the specific language governing permissions and ...@@ -13,13 +13,13 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <type_traits>
#include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/detail/activation_functions.h"
#include "paddle/fluid/operators/math/lstm_compute.h" #include "paddle/fluid/operators/math/lstm_compute.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
#include <type_traits>
namespace paddle { namespace paddle {
namespace operators { namespace operators {
namespace math { namespace math {
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/operators/math/im2col.h"
#include <vector>
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -12,8 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,8 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/operators/math/im2col.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include <vector>
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/operators/math/im2col.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <vector>
template <typename DeviceContext, typename Place> template <typename DeviceContext, typename Place>
void testIm2col() { void testIm2col() {
...@@ -62,7 +63,7 @@ void testIm2col() { ...@@ -62,7 +63,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
TensorCopy(input_tmp, *place, *context, &input); TensorCopySync(input_tmp, *place, &input);
} }
output_cfo.mutable_data<float>( output_cfo.mutable_data<float>(
{1, filter_size, filter_size, output_height, output_width}, *place); {1, filter_size, filter_size, output_height, output_width}, *place);
...@@ -87,7 +88,7 @@ void testIm2col() { ...@@ -87,7 +88,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
out_cfo_ptr = output_cfo.data<float>(); out_cfo_ptr = output_cfo.data<float>();
} else { } else {
TensorCopy(output_cfo, paddle::platform::CPUPlace(), *context, &output_tmp); TensorCopySync(output_cfo, paddle::platform::CPUPlace(), &output_tmp);
out_cfo_ptr = output_tmp.data<float>(); out_cfo_ptr = output_tmp.data<float>();
} }
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
...@@ -98,7 +99,7 @@ void testIm2col() { ...@@ -98,7 +99,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
out_ocf_ptr = output_ocf.data<float>(); out_ocf_ptr = output_ocf.data<float>();
} else { } else {
TensorCopy(output_ocf, paddle::platform::CPUPlace(), *context, &output_tmp); TensorCopySync(output_ocf, paddle::platform::CPUPlace(), &output_tmp);
out_ocf_ptr = output_tmp.data<float>(); out_ocf_ptr = output_tmp.data<float>();
} }
...@@ -119,7 +120,7 @@ void testIm2col() { ...@@ -119,7 +120,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
TensorCopy(input_tmp, *place, *context, &input); TensorCopySync(input_tmp, *place, &input);
} }
col2im(*context, output_cfo, dilation, stride, padding, &input); col2im(*context, output_cfo, dilation, stride, padding, &input);
...@@ -128,7 +129,7 @@ void testIm2col() { ...@@ -128,7 +129,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
in_ptr = input.data<float>(); in_ptr = input.data<float>();
} else { } else {
TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp);
in_ptr = input_tmp.data<float>(); in_ptr = input_tmp.data<float>();
} }
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
...@@ -140,7 +141,7 @@ void testIm2col() { ...@@ -140,7 +141,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
TensorCopy(input_tmp, *place, *context, &input); TensorCopySync(input_tmp, *place, &input);
} }
col2im_ocf(*context, output_ocf, dilation, stride, padding, &input); col2im_ocf(*context, output_ocf, dilation, stride, padding, &input);
...@@ -148,7 +149,7 @@ void testIm2col() { ...@@ -148,7 +149,7 @@ void testIm2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
in_ptr = input.data<float>(); in_ptr = input.data<float>();
} else { } else {
TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp);
in_ptr = input_tmp.data<float>(); in_ptr = input_tmp.data<float>();
} }
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <vector>
#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
......
...@@ -23,32 +23,29 @@ void fill_fp16_data(paddle::platform::float16* in_ptr, size_t size, ...@@ -23,32 +23,29 @@ void fill_fp16_data(paddle::platform::float16* in_ptr, size_t size,
} }
TEST(math_function, notrans_mul_trans_fp32) { TEST(math_function, notrans_mul_trans_fp32) {
using namespace paddle::framework; paddle::framework::Tensor input1;
using namespace paddle::platform; paddle::framework::Tensor input1_gpu;
paddle::framework::Tensor input2_gpu;
paddle::framework::Tensor out_gpu;
paddle::framework::Tensor out;
Tensor input1; paddle::platform::CPUPlace cpu_place;
Tensor input1_gpu; paddle::platform::CUDAPlace gpu_place(0);
Tensor input2_gpu; paddle::platform::CUDADeviceContext context(gpu_place);
Tensor out_gpu;
Tensor out;
CPUPlace cpu_place;
CUDAPlace gpu_place(0);
CUDADeviceContext context(gpu_place);
float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place); float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
float arr[6] = {0, 1, 2, 3, 4, 5}; float arr[6] = {0, 1, 2, 3, 4, 5};
memcpy(input1_ptr, arr, 6 * sizeof(float)); memcpy(input1_ptr, arr, 6 * sizeof(float));
TensorCopy(input1, gpu_place, context, &input1_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input1, gpu_place, context, &input2_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu);
out_gpu.mutable_data<float>({2, 2}, gpu_place); out_gpu.mutable_data<float>({2, 2}, gpu_place);
paddle::operators::math::matmul<CUDADeviceContext, float>( paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float>(
context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0);
TensorCopy(out_gpu, cpu_place, context, &out); paddle::framework::TensorCopySync(out_gpu, cpu_place, &out);
float* out_ptr = out.data<float>(); float* out_ptr = out.data<float>();
context.Wait(); context.Wait();
...@@ -59,39 +56,38 @@ TEST(math_function, notrans_mul_trans_fp32) { ...@@ -59,39 +56,38 @@ TEST(math_function, notrans_mul_trans_fp32) {
} }
TEST(math_function, notrans_mul_trans_fp16) { TEST(math_function, notrans_mul_trans_fp16) {
using namespace paddle::framework; paddle::framework::Tensor input1;
using namespace paddle::platform; paddle::framework::Tensor input1_gpu;
paddle::framework::Tensor input2_gpu;
Tensor input1; paddle::framework::Tensor out_gpu;
Tensor input1_gpu; paddle::framework::Tensor out;
Tensor input2_gpu;
Tensor out_gpu;
Tensor out;
CPUPlace cpu_place; paddle::platform::CPUPlace cpu_place;
CUDAPlace gpu_place(0); paddle::platform::CUDAPlace gpu_place(0);
CUDADeviceContext context(gpu_place); paddle::platform::CUDADeviceContext context(gpu_place);
// fp16 GEMM in cublas requires GPU compute capability >= 53 // fp16 GEMM in cublas requires GPU compute capability >= 53
if (context.GetComputeCapability() < 53) { if (context.GetComputeCapability() < 53) {
return; return;
} }
float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place); paddle::platform::float16* input1_ptr =
input1.mutable_data<paddle::platform::float16>({2, 3}, cpu_place);
fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
TensorCopy(input1, gpu_place, context, &input1_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input1, gpu_place, context, &input2_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu);
out_gpu.mutable_data<float16>({2, 2}, gpu_place); out_gpu.mutable_data<paddle::platform::float16>({2, 2}, gpu_place);
paddle::operators::math::matmul<CUDADeviceContext, float16>( paddle::operators::math::matmul<paddle::platform::CUDADeviceContext,
context, input1_gpu, false, input2_gpu, true, float16(1), &out_gpu, paddle::platform::float16>(
float16(0)); context, input1_gpu, false, input2_gpu, true,
paddle::platform::float16(1), &out_gpu, paddle::platform::float16(0));
TensorCopy(out_gpu, cpu_place, context, &out); paddle::framework::TensorCopySync(out_gpu, cpu_place, &out);
float16* out_ptr = out.data<float16>(); paddle::platform::float16* out_ptr = out.data<paddle::platform::float16>();
context.Wait(); context.Wait();
EXPECT_EQ(static_cast<float>(out_ptr[0]), 5); EXPECT_EQ(static_cast<float>(out_ptr[0]), 5);
EXPECT_EQ(static_cast<float>(out_ptr[1]), 14); EXPECT_EQ(static_cast<float>(out_ptr[1]), 14);
...@@ -100,32 +96,29 @@ TEST(math_function, notrans_mul_trans_fp16) { ...@@ -100,32 +96,29 @@ TEST(math_function, notrans_mul_trans_fp16) {
} }
TEST(math_function, trans_mul_notrans_fp32) { TEST(math_function, trans_mul_notrans_fp32) {
using namespace paddle::framework; paddle::framework::Tensor input1;
using namespace paddle::platform; paddle::framework::Tensor input1_gpu;
paddle::framework::Tensor input2_gpu;
paddle::framework::Tensor out_gpu;
paddle::framework::Tensor out;
Tensor input1; paddle::platform::CPUPlace cpu_place;
Tensor input1_gpu; paddle::platform::CUDAPlace gpu_place(0);
Tensor input2_gpu; paddle::platform::CUDADeviceContext context(gpu_place);
Tensor out_gpu;
Tensor out;
CPUPlace cpu_place;
CUDAPlace gpu_place(0);
CUDADeviceContext context(gpu_place);
float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place); float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
float arr[6] = {0, 1, 2, 3, 4, 5}; float arr[6] = {0, 1, 2, 3, 4, 5};
memcpy(input1_ptr, arr, 6 * sizeof(float)); memcpy(input1_ptr, arr, 6 * sizeof(float));
TensorCopy(input1, gpu_place, context, &input1_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input1, gpu_place, context, &input2_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu);
out_gpu.mutable_data<float>({3, 3}, gpu_place); out_gpu.mutable_data<float>({3, 3}, gpu_place);
paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float>( paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float>(
context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0);
TensorCopy(out_gpu, cpu_place, context, &out); paddle::framework::TensorCopySync(out_gpu, cpu_place, &out);
float* out_ptr = out.data<float>(); float* out_ptr = out.data<float>();
context.Wait(); context.Wait();
...@@ -141,39 +134,38 @@ TEST(math_function, trans_mul_notrans_fp32) { ...@@ -141,39 +134,38 @@ TEST(math_function, trans_mul_notrans_fp32) {
} }
TEST(math_function, trans_mul_notrans_fp16) { TEST(math_function, trans_mul_notrans_fp16) {
using namespace paddle::framework; paddle::framework::Tensor input1;
using namespace paddle::platform; paddle::framework::Tensor input1_gpu;
paddle::framework::Tensor input2_gpu;
Tensor input1; paddle::framework::Tensor out_gpu;
Tensor input1_gpu; paddle::framework::Tensor out;
Tensor input2_gpu;
Tensor out_gpu;
Tensor out;
CPUPlace cpu_place; paddle::platform::CPUPlace cpu_place;
CUDAPlace gpu_place(0); paddle::platform::CUDAPlace gpu_place(0);
CUDADeviceContext context(gpu_place); paddle::platform::CUDADeviceContext context(gpu_place);
// fp16 GEMM in cublas requires GPU compute capability >= 53 // fp16 GEMM in cublas requires GPU compute capability >= 53
if (context.GetComputeCapability() < 53) { if (context.GetComputeCapability() < 53) {
return; return;
} }
float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place); paddle::platform::float16* input1_ptr =
input1.mutable_data<paddle::platform::float16>({2, 3}, cpu_place);
fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
TensorCopy(input1, gpu_place, context, &input1_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input1, gpu_place, context, &input2_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu);
out_gpu.mutable_data<float16>({3, 3}, gpu_place); out_gpu.mutable_data<paddle::platform::float16>({3, 3}, gpu_place);
paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float16>( paddle::operators::math::matmul<paddle::platform::CUDADeviceContext,
context, input1_gpu, true, input2_gpu, false, float16(1), &out_gpu, paddle::platform::float16>(
float16(0)); context, input1_gpu, true, input2_gpu, false,
paddle::platform::float16(1), &out_gpu, paddle::platform::float16(0));
TensorCopy(out_gpu, cpu_place, context, &out); paddle::framework::TensorCopySync(out_gpu, cpu_place, &out);
float16* out_ptr = out.data<float16>(); paddle::platform::float16* out_ptr = out.data<paddle::platform::float16>();
context.Wait(); context.Wait();
EXPECT_EQ(static_cast<float>(out_ptr[0]), 9); EXPECT_EQ(static_cast<float>(out_ptr[0]), 9);
EXPECT_EQ(static_cast<float>(out_ptr[1]), 12); EXPECT_EQ(static_cast<float>(out_ptr[1]), 12);
...@@ -187,19 +179,16 @@ TEST(math_function, trans_mul_notrans_fp16) { ...@@ -187,19 +179,16 @@ TEST(math_function, trans_mul_notrans_fp16) {
} }
TEST(math_function, gemm_notrans_cublas_fp32) { TEST(math_function, gemm_notrans_cublas_fp32) {
using namespace paddle::framework; paddle::framework::Tensor input1;
using namespace paddle::platform; paddle::framework::Tensor input2;
paddle::framework::Tensor input3;
paddle::framework::Tensor input1_gpu;
paddle::framework::Tensor input2_gpu;
paddle::framework::Tensor input3_gpu;
Tensor input1; paddle::platform::CPUPlace cpu_place;
Tensor input2; paddle::platform::CUDAPlace gpu_place(0);
Tensor input3; paddle::platform::CUDADeviceContext context(gpu_place);
Tensor input1_gpu;
Tensor input2_gpu;
Tensor input3_gpu;
CPUPlace cpu_place;
CUDAPlace gpu_place(0);
CUDADeviceContext context(gpu_place);
int m = 2; int m = 2;
int n = 3; int n = 3;
...@@ -214,9 +203,9 @@ TEST(math_function, gemm_notrans_cublas_fp32) { ...@@ -214,9 +203,9 @@ TEST(math_function, gemm_notrans_cublas_fp32) {
float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7};
memcpy(input3_ptr, arr3, 8 * sizeof(float)); memcpy(input3_ptr, arr3, 8 * sizeof(float));
TensorCopy(input1, gpu_place, context, &input1_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input2, gpu_place, context, &input2_gpu); paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu);
TensorCopy(input3, gpu_place, context, &input3_gpu); paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu);
float* a = input1_gpu.data<float>(); float* a = input1_gpu.data<float>();
float* b = input2_gpu.data<float>(); float* b = input2_gpu.data<float>();
float* c = input3_gpu.mutable_data<float>(gpu_place); float* c = input3_gpu.mutable_data<float>(gpu_place);
...@@ -224,7 +213,7 @@ TEST(math_function, gemm_notrans_cublas_fp32) { ...@@ -224,7 +213,7 @@ TEST(math_function, gemm_notrans_cublas_fp32) {
paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float>( paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float>(
context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4);
TensorCopy(input3_gpu, cpu_place, context, &input3); paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3);
// numpy code: // numpy code:
// a = np.arange(6).reshape(2, 3) // a = np.arange(6).reshape(2, 3)
...@@ -244,19 +233,16 @@ TEST(math_function, gemm_notrans_cublas_fp32) { ...@@ -244,19 +233,16 @@ TEST(math_function, gemm_notrans_cublas_fp32) {
} }
TEST(math_function, gemm_notrans_cublas_fp16) { TEST(math_function, gemm_notrans_cublas_fp16) {
using namespace paddle::framework; paddle::framework::Tensor input1;
using namespace paddle::platform; paddle::framework::Tensor input2;
paddle::framework::Tensor input3;
Tensor input1; paddle::framework::Tensor input1_gpu;
Tensor input2; paddle::framework::Tensor input2_gpu;
Tensor input3; paddle::framework::Tensor input3_gpu;
Tensor input1_gpu;
Tensor input2_gpu;
Tensor input3_gpu;
CPUPlace cpu_place; paddle::platform::CPUPlace cpu_place;
CUDAPlace gpu_place(0); paddle::platform::CUDAPlace gpu_place(0);
CUDADeviceContext context(gpu_place); paddle::platform::CUDADeviceContext context(gpu_place);
// fp16 GEMM in cublas requires GPU compute capability >= 53 // fp16 GEMM in cublas requires GPU compute capability >= 53
if (context.GetComputeCapability() < 53) { if (context.GetComputeCapability() < 53) {
...@@ -266,26 +252,31 @@ TEST(math_function, gemm_notrans_cublas_fp16) { ...@@ -266,26 +252,31 @@ TEST(math_function, gemm_notrans_cublas_fp16) {
int m = 2; int m = 2;
int n = 3; int n = 3;
int k = 3; int k = 3;
float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place); paddle::platform::float16* input1_ptr =
input1.mutable_data<paddle::platform::float16>({2, 3}, cpu_place);
fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
float16* input2_ptr = input2.mutable_data<float16>({3, 4}, cpu_place); paddle::platform::float16* input2_ptr =
input2.mutable_data<paddle::platform::float16>({3, 4}, cpu_place);
fill_fp16_data(input2_ptr, input2.numel(), fill_fp16_data(input2_ptr, input2.numel(),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
float16* input3_ptr = input3.mutable_data<float16>({2, 4}, cpu_place); paddle::platform::float16* input3_ptr =
input3.mutable_data<paddle::platform::float16>({2, 4}, cpu_place);
fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7});
TensorCopy(input1, gpu_place, context, &input1_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input2, gpu_place, context, &input2_gpu); paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu);
TensorCopy(input3, gpu_place, context, &input3_gpu); paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu);
float16* a = input1_gpu.data<float16>(); paddle::platform::float16* a = input1_gpu.data<paddle::platform::float16>();
float16* b = input2_gpu.data<float16>(); paddle::platform::float16* b = input2_gpu.data<paddle::platform::float16>();
float16* c = input3_gpu.mutable_data<float16>(gpu_place); paddle::platform::float16* c =
input3_gpu.mutable_data<paddle::platform::float16>(gpu_place);
paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float16>( paddle::operators::math::gemm<paddle::platform::CUDADeviceContext,
context, false, false, m, n, k, float16(1), a, 3, b + 1, 4, float16(1), paddle::platform::float16>(
c + 1, 4); context, false, false, m, n, k, paddle::platform::float16(1), a, 3, b + 1,
4, paddle::platform::float16(1), c + 1, 4);
TensorCopy(input3_gpu, cpu_place, context, &input3); paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3);
// numpy code: // numpy code:
// a = np.arange(6).reshape(2, 3) // a = np.arange(6).reshape(2, 3)
...@@ -305,19 +296,16 @@ TEST(math_function, gemm_notrans_cublas_fp16) { ...@@ -305,19 +296,16 @@ TEST(math_function, gemm_notrans_cublas_fp16) {
} }
TEST(math_function, gemm_trans_cublas_fp32) { TEST(math_function, gemm_trans_cublas_fp32) {
using namespace paddle::framework; paddle::framework::Tensor input1;
using namespace paddle::platform; paddle::framework::Tensor input2;
paddle::framework::Tensor input3;
Tensor input1; paddle::framework::Tensor input1_gpu;
Tensor input2; paddle::framework::Tensor input2_gpu;
Tensor input3; paddle::framework::Tensor input3_gpu;
Tensor input1_gpu;
Tensor input2_gpu;
Tensor input3_gpu;
CPUPlace cpu_place; paddle::platform::CPUPlace cpu_place;
CUDAPlace gpu_place(0); paddle::platform::CUDAPlace gpu_place(0);
CUDADeviceContext context(gpu_place); paddle::platform::CUDADeviceContext context(gpu_place);
int m = 2; int m = 2;
int n = 3; int n = 3;
...@@ -332,9 +320,9 @@ TEST(math_function, gemm_trans_cublas_fp32) { ...@@ -332,9 +320,9 @@ TEST(math_function, gemm_trans_cublas_fp32) {
float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7};
memcpy(input3_ptr, arr3, 8 * sizeof(float)); memcpy(input3_ptr, arr3, 8 * sizeof(float));
TensorCopy(input1, gpu_place, context, &input1_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input2, gpu_place, context, &input2_gpu); paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu);
TensorCopy(input3, gpu_place, context, &input3_gpu); paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu);
float* a = input1_gpu.data<float>(); float* a = input1_gpu.data<float>();
float* b = input2_gpu.data<float>(); float* b = input2_gpu.data<float>();
float* c = input3_gpu.mutable_data<float>(gpu_place); float* c = input3_gpu.mutable_data<float>(gpu_place);
...@@ -342,7 +330,7 @@ TEST(math_function, gemm_trans_cublas_fp32) { ...@@ -342,7 +330,7 @@ TEST(math_function, gemm_trans_cublas_fp32) {
paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float>( paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float>(
context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4);
TensorCopy(input3_gpu, cpu_place, context, &input3); paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3);
context.Wait(); context.Wait();
EXPECT_EQ(input3_ptr[0], 0); EXPECT_EQ(input3_ptr[0], 0);
...@@ -356,19 +344,16 @@ TEST(math_function, gemm_trans_cublas_fp32) { ...@@ -356,19 +344,16 @@ TEST(math_function, gemm_trans_cublas_fp32) {
} }
TEST(math_function, gemm_trans_cublas_fp16) { TEST(math_function, gemm_trans_cublas_fp16) {
using namespace paddle::framework; paddle::framework::Tensor input1;
using namespace paddle::platform; paddle::framework::Tensor input2;
paddle::framework::Tensor input3;
paddle::framework::Tensor input1_gpu;
paddle::framework::Tensor input2_gpu;
paddle::framework::Tensor input3_gpu;
Tensor input1; paddle::platform::CPUPlace cpu_place;
Tensor input2; paddle::platform::CUDAPlace gpu_place(0);
Tensor input3; paddle::platform::CUDADeviceContext context(gpu_place);
Tensor input1_gpu;
Tensor input2_gpu;
Tensor input3_gpu;
CPUPlace cpu_place;
CUDAPlace gpu_place(0);
CUDADeviceContext context(gpu_place);
// fp16 GEMM in cublas requires GPU compute capability >= 53 // fp16 GEMM in cublas requires GPU compute capability >= 53
if (context.GetComputeCapability() < 53) { if (context.GetComputeCapability() < 53) {
...@@ -378,26 +363,31 @@ TEST(math_function, gemm_trans_cublas_fp16) { ...@@ -378,26 +363,31 @@ TEST(math_function, gemm_trans_cublas_fp16) {
int m = 2; int m = 2;
int n = 3; int n = 3;
int k = 3; int k = 3;
float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place); paddle::platform::float16* input1_ptr =
input1.mutable_data<paddle::platform::float16>({2, 3}, cpu_place);
fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
float16* input2_ptr = input2.mutable_data<float16>({4, 3}, cpu_place); paddle::platform::float16* input2_ptr =
input2.mutable_data<paddle::platform::float16>({4, 3}, cpu_place);
fill_fp16_data(input2_ptr, input2.numel(), fill_fp16_data(input2_ptr, input2.numel(),
{0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11}); {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11});
float16* input3_ptr = input3.mutable_data<float16>({2, 4}, cpu_place); paddle::platform::float16* input3_ptr =
input3.mutable_data<paddle::platform::float16>({2, 4}, cpu_place);
fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7});
TensorCopy(input1, gpu_place, context, &input1_gpu); paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
TensorCopy(input2, gpu_place, context, &input2_gpu); paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu);
TensorCopy(input3, gpu_place, context, &input3_gpu); paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu);
float16* a = input1_gpu.data<float16>(); paddle::platform::float16* a = input1_gpu.data<paddle::platform::float16>();
float16* b = input2_gpu.data<float16>(); paddle::platform::float16* b = input2_gpu.data<paddle::platform::float16>();
float16* c = input3_gpu.mutable_data<float16>(gpu_place); paddle::platform::float16* c =
input3_gpu.mutable_data<paddle::platform::float16>(gpu_place);
paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float16>( paddle::operators::math::gemm<paddle::platform::CUDADeviceContext,
context, false, true, m, n, k, float16(1), a, 3, b + 3, 3, float16(1), paddle::platform::float16>(
c + 1, 4); context, false, true, m, n, k, paddle::platform::float16(1), a, 3, b + 3,
3, paddle::platform::float16(1), c + 1, 4);
TensorCopy(input3_gpu, cpu_place, context, &input3); paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3);
context.Wait(); context.Wait();
EXPECT_EQ(static_cast<float>(input3_ptr[0]), 0); EXPECT_EQ(static_cast<float>(input3_ptr[0]), 0);
...@@ -412,24 +402,21 @@ TEST(math_function, gemm_trans_cublas_fp16) { ...@@ -412,24 +402,21 @@ TEST(math_function, gemm_trans_cublas_fp16) {
template <typename T> template <typename T>
void GemvTest(int m, int n, bool trans) { void GemvTest(int m, int n, bool trans) {
using namespace paddle::framework; paddle::framework::Tensor mat_a;
using namespace paddle::platform; paddle::framework::Tensor vec_b;
paddle::framework::Tensor vec_c;
Tensor mat_a;
Tensor vec_b;
Tensor vec_c;
CPUPlace cpu_place; paddle::platform::CPUPlace cpu_place;
CUDAPlace gpu_place(0); paddle::platform::CUDAPlace gpu_place(0);
CUDADeviceContext context(gpu_place); paddle::platform::CUDADeviceContext context(gpu_place);
T* data_a = mat_a.mutable_data<T>({m, n}, cpu_place); T* data_a = mat_a.mutable_data<T>({m, n}, cpu_place);
T* data_b = vec_b.mutable_data<T>({trans ? m : n}, cpu_place); T* data_b = vec_b.mutable_data<T>({trans ? m : n}, cpu_place);
T* data_c = vec_c.mutable_data<T>({trans ? n : m}, cpu_place); T* data_c = vec_c.mutable_data<T>({trans ? n : m}, cpu_place);
Tensor g_mat_a; paddle::framework::Tensor g_mat_a;
Tensor g_vec_b; paddle::framework::Tensor g_vec_b;
Tensor g_vec_c; paddle::framework::Tensor g_vec_c;
T* g_data_a = g_mat_a.mutable_data<T>(mat_a.dims(), gpu_place); T* g_data_a = g_mat_a.mutable_data<T>(mat_a.dims(), gpu_place);
T* g_data_b = g_vec_b.mutable_data<T>(vec_b.dims(), gpu_place); T* g_data_b = g_vec_b.mutable_data<T>(vec_b.dims(), gpu_place);
T* g_data_c = g_vec_c.mutable_data<T>(vec_c.dims(), gpu_place); T* g_data_c = g_vec_c.mutable_data<T>(vec_c.dims(), gpu_place);
...@@ -441,14 +428,14 @@ void GemvTest(int m, int n, bool trans) { ...@@ -441,14 +428,14 @@ void GemvTest(int m, int n, bool trans) {
data_b[i] = static_cast<T>(i); data_b[i] = static_cast<T>(i);
} }
TensorCopy(mat_a, gpu_place, context, &g_mat_a); paddle::framework::TensorCopySync(mat_a, gpu_place, &g_mat_a);
TensorCopy(vec_b, gpu_place, context, &g_vec_b); paddle::framework::TensorCopySync(vec_b, gpu_place, &g_vec_b);
paddle::operators::math::gemv<CUDADeviceContext, T>( paddle::operators::math::gemv<paddle::platform::CUDADeviceContext, T>(
context, trans, static_cast<int>(m), static_cast<int>(n), 1., g_data_a, context, trans, static_cast<int>(m), static_cast<int>(n), 1., g_data_a,
g_data_b, 0., g_data_c); g_data_b, 0., g_data_c);
TensorCopy(g_vec_c, cpu_place, context, &vec_c); paddle::framework::TensorCopySync(g_vec_c, cpu_place, &vec_c);
if (!trans) { if (!trans) {
for (int i = 0; i < m; ++i) { for (int i = 0; i < m; ++i) {
......
...@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <algorithm>
#include <vector>
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
namespace paddle { namespace paddle {
......
...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/maxouting.h" #include "paddle/fluid/operators/math/maxouting.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "sampler.h" #include "paddle/fluid/operators/math/sampler.h"
namespace paddle { namespace paddle {
namespace random { namespace random {
......
...@@ -13,9 +13,9 @@ See the License for the specific language governing permissions and ...@@ -13,9 +13,9 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <cstdint>
#include <memory> #include <memory>
#include <random> #include <random>
typedef long int64;
namespace paddle { namespace paddle {
namespace operators { namespace operators {
namespace math { namespace math {
...@@ -27,25 +27,25 @@ namespace math { ...@@ -27,25 +27,25 @@ namespace math {
*/ */
class Sampler { class Sampler {
public: public:
explicit Sampler(int64 range) : range_(range) { explicit Sampler(int64_t range) : range_(range) {
PADDLE_ENFORCE_GT(range, 0); PADDLE_ENFORCE_GT(range, 0);
std::random_device r; std::random_device r;
seed_ = r(); seed_ = r();
} }
explicit Sampler(int64 range, unsigned int seed) explicit Sampler(int64_t range, unsigned int seed)
: range_(range), seed_(seed) { : range_(range), seed_(seed) {
PADDLE_ENFORCE_GT(range, 0); PADDLE_ENFORCE_GT(range, 0);
} }
virtual ~Sampler(); virtual ~Sampler();
// Sample a single value // Sample a single value
virtual int64 Sample() const = 0; virtual int64_t Sample() const = 0;
// The probability that a single call to Sample() returns the given value. // The probability that a single call to Sample() returns the given value.
virtual float Probability(int64 value) const = 0; virtual float Probability(int64_t value) const = 0;
int64 range() { return range_; }; int64 range() { return range_; }
protected: protected:
const int64 range_; const int64_t range_;
unsigned int seed_; unsigned int seed_;
}; };
...@@ -56,15 +56,15 @@ class Sampler { ...@@ -56,15 +56,15 @@ class Sampler {
*/ */
class UniformSampler : public Sampler { class UniformSampler : public Sampler {
public: public:
explicit UniformSampler(int64 range); explicit UniformSampler(int64_t range);
explicit UniformSampler(int64 range, unsigned int seed); explicit UniformSampler(int64_t range, unsigned int seed);
~UniformSampler() override {} ~UniformSampler() override {}
int64 Sample() const override; int64 Sample() const override;
float Probability(int64 value) const override; float Probability(int64_t value) const override;
private: private:
const float inv_range_; const float inv_range_;
...@@ -79,15 +79,15 @@ class UniformSampler : public Sampler { ...@@ -79,15 +79,15 @@ class UniformSampler : public Sampler {
*/ */
class LogUniformSampler : public Sampler { class LogUniformSampler : public Sampler {
public: public:
explicit LogUniformSampler(int64 range); explicit LogUniformSampler(int64_t range);
explicit LogUniformSampler(int64 range, unsigned int seed); explicit LogUniformSampler(int64_t range, unsigned int seed);
~LogUniformSampler() override {} ~LogUniformSampler() override {}
int64 Sample() const override; int64 Sample() const override;
float Probability(int64 value) const override; float Probability(int64_t value) const override;
private: private:
const float log_range_; const float log_range_;
...@@ -95,6 +95,6 @@ class LogUniformSampler : public Sampler { ...@@ -95,6 +95,6 @@ class LogUniformSampler : public Sampler {
std::shared_ptr<std::uniform_real_distribution<>> dist_; std::shared_ptr<std::uniform_real_distribution<>> dist_;
}; };
} // math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <set> #include <set>
#include <vector>
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/operators/math/selected_rows_functor.h"
......
...@@ -13,10 +13,11 @@ See the License for the specific language governing permissions and ...@@ -13,10 +13,11 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <set> #include <set>
#include <vector>
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -13,41 +13,50 @@ See the License for the specific language governing permissions and ...@@ -13,41 +13,50 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/operators/math/selected_rows_functor.h"
#include <vector>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
TEST(selected_rows_functor, cpu_add) { TEST(selected_rows_functor, cpu_add) {
using namespace paddle::framework; paddle::platform::CPUPlace cpu_place;
using namespace paddle::platform; paddle::platform::CPUDeviceContext ctx(cpu_place);
using namespace paddle::operators::math; paddle::operators::math::SetConstant<paddle::platform::CPUDeviceContext,
float>
CPUPlace cpu_place; functor;
CPUDeviceContext ctx(cpu_place);
SetConstant<CPUDeviceContext, float> functor;
int64_t height = 10; int64_t height = 10;
int64_t row_numel = 10; int64_t row_numel = 10;
std::vector<int64_t> rows1{0, 4, 7}; std::vector<int64_t> rows1{0, 4, 7};
std::unique_ptr<SelectedRows> selected_rows1{new SelectedRows(rows1, height)}; std::unique_ptr<paddle::framework::SelectedRows> selected_rows1{
new paddle::framework::SelectedRows(rows1, height)};
auto* in1_value = selected_rows1->mutable_value(); auto* in1_value = selected_rows1->mutable_value();
in1_value->mutable_data<float>( in1_value->mutable_data<float>(
make_ddim({static_cast<int64_t>(rows1.size()), row_numel}), cpu_place); paddle::framework::make_ddim(
{static_cast<int64_t>(rows1.size()), row_numel}),
cpu_place);
functor(ctx, in1_value, 1.0); functor(ctx, in1_value, 1.0);
std::vector<int64_t> rows2{0, 5, 7, 9}; std::vector<int64_t> rows2{0, 5, 7, 9};
std::unique_ptr<SelectedRows> selected_rows2{new SelectedRows(rows2, height)}; std::unique_ptr<paddle::framework::SelectedRows> selected_rows2{
new paddle::framework::SelectedRows(rows2, height)};
auto* in2_value = selected_rows2->mutable_value(); auto* in2_value = selected_rows2->mutable_value();
in2_value->mutable_data<float>( in2_value->mutable_data<float>(
make_ddim({static_cast<int64_t>(rows2.size()), row_numel}), cpu_place); paddle::framework::make_ddim(
{static_cast<int64_t>(rows2.size()), row_numel}),
cpu_place);
functor(ctx, in2_value, 2.0); functor(ctx, in2_value, 2.0);
std::unique_ptr<SelectedRows> output{new SelectedRows()}; std::unique_ptr<paddle::framework::SelectedRows> output{
new paddle::framework::SelectedRows()};
auto* out_value = output->mutable_value(); auto* out_value = output->mutable_value();
// simplely concat two SelectedRows // simplely concat two SelectedRows
out_value->mutable_data<float>(make_ddim({7, 10}), cpu_place); out_value->mutable_data<float>(paddle::framework::make_ddim({7, 10}),
cpu_place);
SelectedRowsAdd<CPUDeviceContext, float> add_functor; paddle::operators::math::SelectedRowsAdd<paddle::platform::CPUDeviceContext,
float>
add_functor;
add_functor(ctx, *selected_rows1, *selected_rows2, output.get()); add_functor(ctx, *selected_rows1, *selected_rows2, output.get());
auto out_height = output->height(); auto out_height = output->height();
...@@ -78,14 +87,20 @@ TEST(selected_rows_functor, cpu_add) { ...@@ -78,14 +87,20 @@ TEST(selected_rows_functor, cpu_add) {
EXPECT_EQ(out_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_data[5 * row_numel + 7], 2.0);
EXPECT_EQ(out_data[6 * row_numel + 9], 2.0); EXPECT_EQ(out_data[6 * row_numel + 9], 2.0);
std::unique_ptr<Tensor> tensor1{new Tensor()}; std::unique_ptr<paddle::framework::Tensor> tensor1{
tensor1->mutable_data<float>(make_ddim({height, row_numel}), cpu_place); new paddle::framework::Tensor()};
tensor1->mutable_data<float>(
paddle::framework::make_ddim({height, row_numel}), cpu_place);
functor(ctx, tensor1.get(), 3.0); functor(ctx, tensor1.get(), 3.0);
std::unique_ptr<Tensor> tensor2{new Tensor()}; std::unique_ptr<paddle::framework::Tensor> tensor2{
tensor2->mutable_data<float>(make_ddim({height, row_numel}), cpu_place); new paddle::framework::Tensor()};
tensor2->mutable_data<float>(
paddle::framework::make_ddim({height, row_numel}), cpu_place);
SelectedRowsAddTensor<CPUDeviceContext, float> add_tensor_functor; paddle::operators::math::SelectedRowsAddTensor<
paddle::platform::CPUDeviceContext, float>
add_tensor_functor;
add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); add_tensor_functor(ctx, *output, *tensor1, tensor2.get());
auto* tensor2_data = tensor2->data<float>(); auto* tensor2_data = tensor2->data<float>();
...@@ -106,38 +121,46 @@ TEST(selected_rows_functor, cpu_add) { ...@@ -106,38 +121,46 @@ TEST(selected_rows_functor, cpu_add) {
} }
TEST(selected_rows_functor, cpu_add_to) { TEST(selected_rows_functor, cpu_add_to) {
using namespace paddle::framework; paddle::platform::CPUPlace cpu_place;
using namespace paddle::platform; paddle::platform::CPUDeviceContext ctx(cpu_place);
using namespace paddle::operators::math; paddle::operators::math::SetConstant<paddle::platform::CPUDeviceContext,
float>
CPUPlace cpu_place; functor;
CPUDeviceContext ctx(cpu_place);
SetConstant<CPUDeviceContext, float> functor;
int64_t height = 10; int64_t height = 10;
int64_t row_numel = 10; int64_t row_numel = 10;
std::vector<int64_t> rows1{0, 4, 7}; std::vector<int64_t> rows1{0, 4, 7};
std::unique_ptr<SelectedRows> selected_rows1{new SelectedRows(rows1, height)}; std::unique_ptr<paddle::framework::SelectedRows> selected_rows1{
new paddle::framework::SelectedRows(rows1, height)};
auto* in1_value = selected_rows1->mutable_value(); auto* in1_value = selected_rows1->mutable_value();
in1_value->mutable_data<float>( in1_value->mutable_data<float>(
make_ddim({static_cast<int64_t>(rows1.size()), row_numel}), cpu_place); paddle::framework::make_ddim(
{static_cast<int64_t>(rows1.size()), row_numel}),
cpu_place);
functor(ctx, in1_value, 1.0); functor(ctx, in1_value, 1.0);
std::vector<int64_t> rows2{0, 5, 7, 9}; std::vector<int64_t> rows2{0, 5, 7, 9};
std::unique_ptr<SelectedRows> selected_rows2{new SelectedRows(rows2, height)}; std::unique_ptr<paddle::framework::SelectedRows> selected_rows2{
new paddle::framework::SelectedRows(rows2, height)};
auto* in2_value = selected_rows2->mutable_value(); auto* in2_value = selected_rows2->mutable_value();
in2_value->mutable_data<float>( in2_value->mutable_data<float>(
make_ddim({static_cast<int64_t>(rows2.size()), row_numel}), cpu_place); paddle::framework::make_ddim(
{static_cast<int64_t>(rows2.size()), row_numel}),
cpu_place);
functor(ctx, in2_value, 2.0); functor(ctx, in2_value, 2.0);
std::unique_ptr<SelectedRows> output{new SelectedRows()}; std::unique_ptr<paddle::framework::SelectedRows> output{
new paddle::framework::SelectedRows()};
output->set_height(height); output->set_height(height);
auto* out_value = output->mutable_value(); auto* out_value = output->mutable_value();
// simplely concat two SelectedRows // simplely concat two SelectedRows
out_value->mutable_data<float>(make_ddim({7, 10}), cpu_place); out_value->mutable_data<float>(paddle::framework::make_ddim({7, 10}),
cpu_place);
SelectedRowsAddTo<CPUDeviceContext, float> add_to_functor; paddle::operators::math::SelectedRowsAddTo<paddle::platform::CPUDeviceContext,
float>
add_to_functor;
add_to_functor(ctx, *selected_rows1, 0, output.get()); add_to_functor(ctx, *selected_rows1, 0, output.get());
add_to_functor(ctx, *selected_rows2, in1_value->numel(), output.get()); add_to_functor(ctx, *selected_rows2, in1_value->numel(), output.get());
...@@ -169,11 +192,15 @@ TEST(selected_rows_functor, cpu_add_to) { ...@@ -169,11 +192,15 @@ TEST(selected_rows_functor, cpu_add_to) {
EXPECT_EQ(out_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_data[5 * row_numel + 7], 2.0);
EXPECT_EQ(out_data[6 * row_numel + 9], 2.0); EXPECT_EQ(out_data[6 * row_numel + 9], 2.0);
std::unique_ptr<Tensor> tensor1{new Tensor()}; std::unique_ptr<paddle::framework::Tensor> tensor1{
tensor1->mutable_data<float>(make_ddim({height, row_numel}), cpu_place); new paddle::framework::Tensor()};
tensor1->mutable_data<float>(
paddle::framework::make_ddim({height, row_numel}), cpu_place);
functor(ctx, tensor1.get(), 3.0); functor(ctx, tensor1.get(), 3.0);
SelectedRowsAddToTensor<CPUDeviceContext, float> add_to_tensor_functor; paddle::operators::math::SelectedRowsAddToTensor<
paddle::platform::CPUDeviceContext, float>
add_to_tensor_functor;
add_to_tensor_functor(ctx, *output, tensor1.get()); add_to_tensor_functor(ctx, *output, tensor1.get());
auto* tensor1_data = tensor1->data<float>(); auto* tensor1_data = tensor1->data<float>();
......
...@@ -12,43 +12,52 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,43 +12,52 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <vector>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/operators/math/selected_rows_functor.h"
TEST(selected_rows_functor, gpu_add) { TEST(selected_rows_functor, gpu_add) {
using namespace paddle::framework; paddle::platform::CUDAPlace gpu_place(0);
using namespace paddle::platform; paddle::platform::CPUPlace cpu_place;
using namespace paddle::operators::math; paddle::platform::CUDADeviceContext ctx(gpu_place);
paddle::operators::math::SetConstant<paddle::platform::CUDADeviceContext,
CUDAPlace gpu_place(0); float>
CPUPlace cpu_place; functor;
CUDADeviceContext ctx(gpu_place);
SetConstant<CUDADeviceContext, float> functor;
int64_t height = 10; int64_t height = 10;
int64_t row_numel = 10; int64_t row_numel = 10;
std::vector<int64_t> rows1{0, 4, 7}; std::vector<int64_t> rows1{0, 4, 7};
std::unique_ptr<SelectedRows> selected_rows1{new SelectedRows(rows1, height)}; std::unique_ptr<paddle::framework::SelectedRows> selected_rows1{
new paddle::framework::SelectedRows(rows1, height)};
auto* in1_value = selected_rows1->mutable_value(); auto* in1_value = selected_rows1->mutable_value();
in1_value->mutable_data<float>( in1_value->mutable_data<float>(
make_ddim({static_cast<int64_t>(rows1.size()), row_numel}), gpu_place); paddle::framework::make_ddim(
{static_cast<int64_t>(rows1.size()), row_numel}),
gpu_place);
functor(ctx, in1_value, 1.0); functor(ctx, in1_value, 1.0);
std::vector<int64_t> rows2{0, 5, 7, 9}; std::vector<int64_t> rows2{0, 5, 7, 9};
std::unique_ptr<SelectedRows> selected_rows2{new SelectedRows(rows2, height)}; std::unique_ptr<paddle::framework::SelectedRows> selected_rows2{
new paddle::framework::SelectedRows(rows2, height)};
auto* in2_value = selected_rows2->mutable_value(); auto* in2_value = selected_rows2->mutable_value();
in2_value->mutable_data<float>( in2_value->mutable_data<float>(
make_ddim({static_cast<int64_t>(rows2.size()), row_numel}), gpu_place); paddle::framework::make_ddim(
{static_cast<int64_t>(rows2.size()), row_numel}),
gpu_place);
functor(ctx, in2_value, 2.0); functor(ctx, in2_value, 2.0);
std::unique_ptr<SelectedRows> output{new SelectedRows()}; std::unique_ptr<paddle::framework::SelectedRows> output{
new paddle::framework::SelectedRows()};
auto* out_value = output->mutable_value(); auto* out_value = output->mutable_value();
// simplely concat two SelectedRows // simply concat two SelectedRows
out_value->mutable_data<float>(make_ddim({7, 10}), gpu_place); out_value->mutable_data<float>(paddle::framework::make_ddim({7, 10}),
gpu_place);
SelectedRowsAdd<CUDADeviceContext, float> add_functor; paddle::operators::math::SelectedRowsAdd<paddle::platform::CUDADeviceContext,
float>
add_functor;
add_functor(ctx, *selected_rows1, *selected_rows2, output.get()); add_functor(ctx, *selected_rows1, *selected_rows2, output.get());
auto out_height = output->height(); auto out_height = output->height();
...@@ -66,8 +75,8 @@ TEST(selected_rows_functor, gpu_add) { ...@@ -66,8 +75,8 @@ TEST(selected_rows_functor, gpu_add) {
EXPECT_EQ(out_rows[5], 7); EXPECT_EQ(out_rows[5], 7);
EXPECT_EQ(out_rows[6], 9); EXPECT_EQ(out_rows[6], 9);
Tensor out_cpu; paddle::framework::Tensor out_cpu;
TensorCopy(*out_value, cpu_place, ctx, &out_cpu); paddle::framework::TensorCopy(*out_value, cpu_place, ctx, &out_cpu);
ctx.Wait(); ctx.Wait();
auto* out_cpu_data = out_cpu.data<float>(); auto* out_cpu_data = out_cpu.data<float>();
...@@ -83,18 +92,24 @@ TEST(selected_rows_functor, gpu_add) { ...@@ -83,18 +92,24 @@ TEST(selected_rows_functor, gpu_add) {
EXPECT_EQ(out_cpu_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_cpu_data[5 * row_numel + 7], 2.0);
EXPECT_EQ(out_cpu_data[6 * row_numel + 9], 2.0); EXPECT_EQ(out_cpu_data[6 * row_numel + 9], 2.0);
std::unique_ptr<Tensor> tensor1{new Tensor()}; std::unique_ptr<paddle::framework::Tensor> tensor1{
tensor1->mutable_data<float>(make_ddim({height, row_numel}), gpu_place); new paddle::framework::Tensor()};
tensor1->mutable_data<float>(
paddle::framework::make_ddim({height, row_numel}), gpu_place);
functor(ctx, tensor1.get(), 3.0); functor(ctx, tensor1.get(), 3.0);
std::unique_ptr<Tensor> tensor2{new Tensor()}; std::unique_ptr<paddle::framework::Tensor> tensor2{
tensor2->mutable_data<float>(make_ddim({height, row_numel}), gpu_place); new paddle::framework::Tensor()};
tensor2->mutable_data<float>(
paddle::framework::make_ddim({height, row_numel}), gpu_place);
SelectedRowsAddTensor<CUDADeviceContext, float> add_tensor_functor; paddle::operators::math::SelectedRowsAddTensor<
paddle::platform::CUDADeviceContext, float>
add_tensor_functor;
add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); add_tensor_functor(ctx, *output, *tensor1, tensor2.get());
Tensor tensor2_cpu; paddle::framework::Tensor tensor2_cpu;
TensorCopy(*tensor2, cpu_place, ctx, &tensor2_cpu); paddle::framework::TensorCopy(*tensor2, cpu_place, ctx, &tensor2_cpu);
ctx.Wait(); ctx.Wait();
auto* tensor2_cpu_data = tensor2_cpu.data<float>(); auto* tensor2_cpu_data = tensor2_cpu.data<float>();
...@@ -115,39 +130,47 @@ TEST(selected_rows_functor, gpu_add) { ...@@ -115,39 +130,47 @@ TEST(selected_rows_functor, gpu_add) {
} }
TEST(selected_rows_functor, gpu_add_to) { TEST(selected_rows_functor, gpu_add_to) {
using namespace paddle::framework; paddle::platform::CUDAPlace gpu_place(0);
using namespace paddle::platform; paddle::platform::CPUPlace cpu_place;
using namespace paddle::operators::math; paddle::platform::CUDADeviceContext ctx(gpu_place);
paddle::operators::math::SetConstant<paddle::platform::CUDADeviceContext,
CUDAPlace gpu_place(0); float>
CPUPlace cpu_place; functor;
CUDADeviceContext ctx(gpu_place);
SetConstant<CUDADeviceContext, float> functor;
int64_t height = 10; int64_t height = 10;
int64_t row_numel = 10; int64_t row_numel = 10;
std::vector<int64_t> rows1{0, 4, 7}; std::vector<int64_t> rows1{0, 4, 7};
std::unique_ptr<SelectedRows> selected_rows1{new SelectedRows(rows1, height)}; std::unique_ptr<paddle::framework::SelectedRows> selected_rows1{
new paddle::framework::SelectedRows(rows1, height)};
auto* in1_value = selected_rows1->mutable_value(); auto* in1_value = selected_rows1->mutable_value();
in1_value->mutable_data<float>( in1_value->mutable_data<float>(
make_ddim({static_cast<int64_t>(rows1.size()), row_numel}), gpu_place); paddle::framework::make_ddim(
{static_cast<int64_t>(rows1.size()), row_numel}),
gpu_place);
functor(ctx, in1_value, 1.0); functor(ctx, in1_value, 1.0);
std::vector<int64_t> rows2{0, 5, 7, 9}; std::vector<int64_t> rows2{0, 5, 7, 9};
std::unique_ptr<SelectedRows> selected_rows2{new SelectedRows(rows2, height)}; std::unique_ptr<paddle::framework::SelectedRows> selected_rows2{
new paddle::framework::SelectedRows(rows2, height)};
auto* in2_value = selected_rows2->mutable_value(); auto* in2_value = selected_rows2->mutable_value();
in2_value->mutable_data<float>( in2_value->mutable_data<float>(
make_ddim({static_cast<int64_t>(rows2.size()), row_numel}), gpu_place); paddle::framework::make_ddim(
{static_cast<int64_t>(rows2.size()), row_numel}),
gpu_place);
functor(ctx, in2_value, 2.0); functor(ctx, in2_value, 2.0);
std::unique_ptr<SelectedRows> output{new SelectedRows()}; std::unique_ptr<paddle::framework::SelectedRows> output{
new paddle::framework::SelectedRows()};
output->set_height(height); output->set_height(height);
auto* out_value = output->mutable_value(); auto* out_value = output->mutable_value();
// simplely concat two SelectedRows // simply concat two SelectedRows
out_value->mutable_data<float>(make_ddim({7, 10}), gpu_place); out_value->mutable_data<float>(paddle::framework::make_ddim({7, 10}),
gpu_place);
SelectedRowsAddTo<CUDADeviceContext, float> add_to_functor; paddle::operators::math::SelectedRowsAddTo<
paddle::platform::CUDADeviceContext, float>
add_to_functor;
add_to_functor(ctx, *selected_rows1, 0, output.get()); add_to_functor(ctx, *selected_rows1, 0, output.get());
add_to_functor(ctx, *selected_rows2, in1_value->numel(), output.get()); add_to_functor(ctx, *selected_rows2, in1_value->numel(), output.get());
...@@ -166,8 +189,8 @@ TEST(selected_rows_functor, gpu_add_to) { ...@@ -166,8 +189,8 @@ TEST(selected_rows_functor, gpu_add_to) {
EXPECT_EQ(out_rows[5], 7); EXPECT_EQ(out_rows[5], 7);
EXPECT_EQ(out_rows[6], 9); EXPECT_EQ(out_rows[6], 9);
Tensor out_cpu; paddle::framework::Tensor out_cpu;
TensorCopy(*out_value, cpu_place, ctx, &out_cpu); paddle::framework::TensorCopy(*out_value, cpu_place, ctx, &out_cpu);
ctx.Wait(); ctx.Wait();
auto* out_cpu_data = out_cpu.data<float>(); auto* out_cpu_data = out_cpu.data<float>();
...@@ -183,15 +206,19 @@ TEST(selected_rows_functor, gpu_add_to) { ...@@ -183,15 +206,19 @@ TEST(selected_rows_functor, gpu_add_to) {
EXPECT_EQ(out_cpu_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_cpu_data[5 * row_numel + 7], 2.0);
EXPECT_EQ(out_cpu_data[6 * row_numel + 9], 2.0); EXPECT_EQ(out_cpu_data[6 * row_numel + 9], 2.0);
std::unique_ptr<Tensor> tensor1{new Tensor()}; std::unique_ptr<paddle::framework::Tensor> tensor1{
tensor1->mutable_data<float>(make_ddim({height, row_numel}), gpu_place); new paddle::framework::Tensor()};
tensor1->mutable_data<float>(
paddle::framework::make_ddim({height, row_numel}), gpu_place);
functor(ctx, tensor1.get(), 3.0); functor(ctx, tensor1.get(), 3.0);
SelectedRowsAddToTensor<CUDADeviceContext, float> add_to_tensor_functor; paddle::operators::math::SelectedRowsAddToTensor<
paddle::platform::CUDADeviceContext, float>
add_to_tensor_functor;
add_to_tensor_functor(ctx, *output, tensor1.get()); add_to_tensor_functor(ctx, *output, tensor1.get());
Tensor tensor1_cpu; paddle::framework::Tensor tensor1_cpu;
TensorCopy(*tensor1, cpu_place, ctx, &tensor1_cpu); paddle::framework::TensorCopy(*tensor1, cpu_place, ctx, &tensor1_cpu);
ctx.Wait(); ctx.Wait();
auto* tensor1_cpu_data = tensor1_cpu.data<float>(); auto* tensor1_cpu_data = tensor1_cpu.data<float>();
......
...@@ -23,11 +23,11 @@ class CopyMatrixRowsFunctor<platform::CPUDeviceContext, T> { ...@@ -23,11 +23,11 @@ class CopyMatrixRowsFunctor<platform::CPUDeviceContext, T> {
public: public:
void operator()(const platform::CPUDeviceContext& context, void operator()(const platform::CPUDeviceContext& context,
const framework::Tensor& src, const framework::Tensor& src,
framework::Vector<size_t> index_lod, framework::Tensor& dst, framework::Vector<size_t> index_lod, framework::Tensor* dst,
bool is_src_index) { bool is_src_index) {
size_t* index = index_lod.data(); size_t* index = index_lod.data();
auto src_dims = src.dims(); auto src_dims = src.dims();
auto dst_dims = dst.dims(); auto dst_dims = dst->dims();
PADDLE_ENFORCE_EQ(src_dims.size(), 2UL, PADDLE_ENFORCE_EQ(src_dims.size(), 2UL,
"The src must be matrix with rank 2."); "The src must be matrix with rank 2.");
PADDLE_ENFORCE_EQ(dst_dims.size(), 2UL, PADDLE_ENFORCE_EQ(dst_dims.size(), 2UL,
...@@ -37,7 +37,7 @@ class CopyMatrixRowsFunctor<platform::CPUDeviceContext, T> { ...@@ -37,7 +37,7 @@ class CopyMatrixRowsFunctor<platform::CPUDeviceContext, T> {
auto height = dst_dims[0]; auto height = dst_dims[0];
auto width = dst_dims[1]; auto width = dst_dims[1];
auto* src_data = src.data<T>(); auto* src_data = src.data<T>();
auto* dst_data = dst.data<T>(); auto* dst_data = dst->data<T>();
for (int i = 0; i < height; ++i) { for (int i = 0; i < height; ++i) {
if (is_src_index) { if (is_src_index) {
memcpy(dst_data + i * width, src_data + index[i] * width, memcpy(dst_data + i * width, src_data + index[i] * width,
......
...@@ -43,10 +43,10 @@ class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> { ...@@ -43,10 +43,10 @@ class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& src, const framework::Tensor& src,
framework::Vector<size_t> index_lod, framework::Tensor& dst, framework::Vector<size_t> index_lod, framework::Tensor* dst,
bool is_src_index) { bool is_src_index) {
auto src_dims = src.dims(); auto src_dims = src.dims();
auto dst_dims = dst.dims(); auto dst_dims = dst->dims();
PADDLE_ENFORCE_EQ(src_dims.size(), 2, PADDLE_ENFORCE_EQ(src_dims.size(), 2,
"The src must be matrix with rank 2."); "The src must be matrix with rank 2.");
PADDLE_ENFORCE_EQ(dst_dims.size(), 2, PADDLE_ENFORCE_EQ(dst_dims.size(), 2,
...@@ -56,7 +56,7 @@ class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> { ...@@ -56,7 +56,7 @@ class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> {
auto height = dst_dims[0]; auto height = dst_dims[0];
auto width = dst_dims[1]; auto width = dst_dims[1];
auto* src_data = src.data<T>(); auto* src_data = src.data<T>();
auto* dst_data = dst.data<T>(); auto* dst_data = dst->data<T>();
dim3 threads(128, 8); dim3 threads(128, 8);
dim3 grid(8, 1); dim3 grid(8, 1);
......
...@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
...@@ -35,7 +37,7 @@ class CopyMatrixRowsFunctor { ...@@ -35,7 +37,7 @@ class CopyMatrixRowsFunctor {
// copy the input src to the indexed rows of output dst. // copy the input src to the indexed rows of output dst.
// The indexed rows are based on the input index. // The indexed rows are based on the input index.
void operator()(const DeviceContext& context, const framework::Tensor& src, void operator()(const DeviceContext& context, const framework::Tensor& src,
framework::Vector<size_t> index_lod, framework::Tensor& dst, framework::Vector<size_t> index_lod, framework::Tensor* dst,
bool is_src_index); bool is_src_index);
}; };
...@@ -58,10 +60,10 @@ class LoDTensor2BatchFunctor { ...@@ -58,10 +60,10 @@ class LoDTensor2BatchFunctor {
public: public:
void operator()(const DeviceContext& context, void operator()(const DeviceContext& context,
const framework::LoDTensor& lod_tensor, const framework::LoDTensor& lod_tensor,
framework::LoDTensor& batch, bool is_cal_batch_lod, framework::LoDTensor* batch, bool is_cal_batch_lod,
bool is_reverse = false) const { bool is_reverse = false) const {
if (!is_cal_batch_lod) { if (!is_cal_batch_lod) {
auto lods = batch.lod(); auto lods = batch->lod();
PADDLE_ENFORCE_GT(lods.size(), 2UL); PADDLE_ENFORCE_GT(lods.size(), 2UL);
PADDLE_ENFORCE_EQ(lods[1].size(), PADDLE_ENFORCE_EQ(lods[1].size(),
static_cast<size_t>(lod_tensor.dims()[0])); static_cast<size_t>(lod_tensor.dims()[0]));
...@@ -141,7 +143,7 @@ class LoDTensor2BatchFunctor { ...@@ -141,7 +143,7 @@ class LoDTensor2BatchFunctor {
for (size_t i = 0; i < seq_info.size(); ++i) { for (size_t i = 0; i < seq_info.size(); ++i) {
seq_order[i] = seq_info[i].seq_idx; seq_order[i] = seq_info[i].seq_idx;
} }
batch.set_lod(batch_lods); batch->set_lod(batch_lods);
CopyMatrixRowsFunctor<DeviceContext, T> to_batch; CopyMatrixRowsFunctor<DeviceContext, T> to_batch;
to_batch(context, lod_tensor, batch_lods[1], batch, true); to_batch(context, lod_tensor, batch_lods[1], batch, true);
...@@ -153,11 +155,11 @@ class Batch2LoDTensorFunctor { ...@@ -153,11 +155,11 @@ class Batch2LoDTensorFunctor {
public: public:
void operator()(const DeviceContext& context, void operator()(const DeviceContext& context,
const framework::LoDTensor& batch, const framework::LoDTensor& batch,
framework::LoDTensor& lod_tensor) const { framework::LoDTensor* lod_tensor) const {
auto in_lod = batch.lod(); auto in_lod = batch.lod();
PADDLE_ENFORCE_GT(in_lod.size(), 2UL); PADDLE_ENFORCE_GT(in_lod.size(), 2UL);
PADDLE_ENFORCE_EQ(in_lod[1].size(), PADDLE_ENFORCE_EQ(in_lod[1].size(),
static_cast<size_t>(lod_tensor.dims()[0])); static_cast<size_t>(lod_tensor->dims()[0]));
CopyMatrixRowsFunctor<DeviceContext, T> to_seq; CopyMatrixRowsFunctor<DeviceContext, T> to_seq;
to_seq(context, batch, in_lod[1], lod_tensor, false); to_seq(context, batch, in_lod[1], lod_tensor, false);
} }
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/math/sequence_padding.h" #include "paddle/fluid/operators/math/sequence_padding.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <vector>
template <typename DeviceContext, typename Place, typename T> template <typename DeviceContext, typename Place, typename T>
void TestSequencePadding(const paddle::framework::LoD& lod, void TestSequencePadding(const paddle::framework::LoD& lod,
...@@ -75,7 +76,7 @@ void TestSequencePadding(const paddle::framework::LoD& lod, ...@@ -75,7 +76,7 @@ void TestSequencePadding(const paddle::framework::LoD& lod,
delete place; delete place;
delete context; delete context;
}; }
TEST(Seq2BatchPadding, CPU) { TEST(Seq2BatchPadding, CPU) {
paddle::framework::LoD lod1; paddle::framework::LoD lod1;
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/sequence_pooling.h" #include "paddle/fluid/operators/math/sequence_pooling.h"
#include <string>
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
namespace paddle { namespace paddle {
......
...@@ -12,9 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,9 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <string>
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/sequence_pooling.h" #include "paddle/fluid/operators/math/sequence_pooling.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <string>
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
......
...@@ -21,15 +21,15 @@ namespace math { ...@@ -21,15 +21,15 @@ namespace math {
template <typename T> template <typename T>
class ScaleLoDTensorFunctor<platform::CPUDeviceContext, T> { class ScaleLoDTensorFunctor<platform::CPUDeviceContext, T> {
public: public:
void operator()(const platform::CPUDeviceContext& context, void operator()(const platform::CPUDeviceContext& context, const T* scales,
framework::LoDTensor& seq, const T* scales) { framework::LoDTensor* seq) {
const size_t level = 0; const size_t level = 0;
auto lod = seq.lod(); auto lod = seq->lod();
const size_t num_seq = lod[level].size() - 1; const size_t num_seq = lod[level].size() - 1;
size_t seq_width = seq.dims()[1]; size_t seq_width = seq->dims()[1];
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
T* seq_data = seq.mutable_data<T>(context.GetPlace()); T* seq_data = seq->mutable_data<T>(context.GetPlace());
for (size_t i = 0; i < num_seq; ++i) { for (size_t i = 0; i < num_seq; ++i) {
for (size_t j = lod[level][i] * seq_width; for (size_t j = lod[level][i] * seq_width;
j < lod[level][i + 1] * seq_width; ++j) { j < lod[level][i + 1] * seq_width; ++j) {
......
...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/sequence_scale.h" #include "paddle/fluid/operators/math/sequence_scale.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -35,14 +35,14 @@ __global__ void SequenceScaleKernel(T* seq, size_t* lod, const T* scales, ...@@ -35,14 +35,14 @@ __global__ void SequenceScaleKernel(T* seq, size_t* lod, const T* scales,
template <typename T> template <typename T>
class ScaleLoDTensorFunctor<platform::CUDADeviceContext, T> { class ScaleLoDTensorFunctor<platform::CUDADeviceContext, T> {
public: public:
void operator()(const platform::CUDADeviceContext& context, void operator()(const platform::CUDADeviceContext& context, const T* scales,
framework::LoDTensor& seq, const T* scales) { framework::LoDTensor* seq) {
const size_t level = 0; const size_t level = 0;
auto lod = seq.lod(); auto lod = seq->lod();
const size_t num_seq = lod[level].size() - 1; const size_t num_seq = lod[level].size() - 1;
const size_t seq_width = seq.numel() / seq.dims()[0]; const size_t seq_width = seq->numel() / seq->dims()[0];
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
T* seq_data = seq.mutable_data<T>(context.GetPlace()); T* seq_data = seq->mutable_data<T>(context.GetPlace());
SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS><<< SequenceScaleKernel<T, PADDLE_CUDA_NUM_THREADS><<<
num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>( num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>(
......
...@@ -46,8 +46,8 @@ namespace math { ...@@ -46,8 +46,8 @@ namespace math {
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
class ScaleLoDTensorFunctor { class ScaleLoDTensorFunctor {
public: public:
void operator()(const DeviceContext& context, framework::LoDTensor& seq, void operator()(const DeviceContext& context, const T* scales,
const T* scales); framework::LoDTensor* seq);
}; };
} // namespace math } // namespace math
......
...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/unpooling.h" #include "paddle/fluid/operators/math/unpooling.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/vol2col.h" #include "paddle/fluid/operators/math/vol2col.h"
#include <vector>
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -12,8 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,8 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/operators/math/vol2col.h" #include "paddle/fluid/operators/math/vol2col.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once #pragma once
#include <vector>
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/fluid/operators/math/vol2col.h" #include "paddle/fluid/operators/math/vol2col.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <iostream> #include <iostream>
#include <vector>
template <typename DeviceContext, typename Place> template <typename DeviceContext, typename Place>
void testVol2col() { void testVol2col() {
...@@ -71,7 +72,7 @@ void testVol2col() { ...@@ -71,7 +72,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
paddle::framework::TensorCopy(input_tmp, *place, *context, &input); paddle::framework::TensorCopySync(input_tmp, *place, &input);
} }
output.mutable_data<float>({1, filter_size, filter_size, filter_size, output.mutable_data<float>({1, filter_size, filter_size, filter_size,
output_depth, output_height, output_width}, output_depth, output_height, output_width},
...@@ -85,7 +86,7 @@ void testVol2col() { ...@@ -85,7 +86,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
out_cfo_ptr = output.data<float>(); out_cfo_ptr = output.data<float>();
} else { } else {
TensorCopy(output, paddle::platform::CPUPlace(), *context, &output_tmp); TensorCopySync(output, paddle::platform::CPUPlace(), &output_tmp);
out_cfo_ptr = output_tmp.data<float>(); out_cfo_ptr = output_tmp.data<float>();
} }
...@@ -99,7 +100,7 @@ void testVol2col() { ...@@ -99,7 +100,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp; input = input_tmp;
} else { } else {
TensorCopy(input_tmp, *place, *context, &input); TensorCopySync(input_tmp, *place, &input);
} }
paddle::operators::math::Col2VolFunctor<DeviceContext, float> col2vol; paddle::operators::math::Col2VolFunctor<DeviceContext, float> col2vol;
...@@ -109,7 +110,7 @@ void testVol2col() { ...@@ -109,7 +110,7 @@ void testVol2col() {
if (paddle::platform::is_cpu_place(*place)) { if (paddle::platform::is_cpu_place(*place)) {
in_ptr = input.data<float>(); in_ptr = input.data<float>();
} else { } else {
TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp);
in_ptr = input_tmp.data<float>(); in_ptr = input_tmp.data<float>();
} }
......
...@@ -15,9 +15,9 @@ limitations under the License. */ ...@@ -15,9 +15,9 @@ limitations under the License. */
#pragma once #pragma once
#include <algorithm> #include <algorithm>
#include <condition_variable> #include <condition_variable> // NOLINT
#include <memory> #include <memory>
#include <mutex> #include <mutex> // NOLINT
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
......
...@@ -228,10 +228,8 @@ TEST_F(NCCLTester, ncclReduceOp) { ...@@ -228,10 +228,8 @@ TEST_F(NCCLTester, ncclReduceOp) {
result_tensor->Resize(kDims); result_tensor->Resize(kDims);
auto *ct = result_tensor->mutable_data<float>(cpu_place); auto *ct = result_tensor->mutable_data<float>(cpu_place);
paddle::memory::Copy( paddle::memory::Copy(cpu_place, ct, p::CUDAPlace(gpu_list_[kRoot]), rt,
cpu_place, ct, p::CUDAPlace(gpu_list_[kRoot]), rt, recv_tensor.numel() * sizeof(float), nullptr);
recv_tensor.numel() * sizeof(float),
static_cast<p::CUDADeviceContext *>(dev_ctxs_[kRoot])->stream());
for (int64_t j = 0; j < f::product(kDims); ++j) { for (int64_t j = 0; j < f::product(kDims); ++j) {
ASSERT_NEAR(ct[j], expected_result, 1e-5); ASSERT_NEAR(ct[j], expected_result, 1e-5);
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/operators/one_hot_op.h" #include "paddle/fluid/operators/one_hot_op.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/gpu_info.h"
namespace paddle { namespace paddle {
......
...@@ -23,5 +23,7 @@ reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_o ...@@ -23,5 +23,7 @@ reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_o
reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc) reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc)
reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc) reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc)
reader_library(create_threaded_reader_op SRCS create_threaded_reader_op.cc) reader_library(create_threaded_reader_op SRCS create_threaded_reader_op.cc)
cc_test(reader_blocking_queue_test SRCS reader_blocking_queue_test.cc)
# Export local libraries to parent # Export local libraries to parent
set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE) set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE)
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <condition_variable> // NOLINT
#include <deque>
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace operators {
namespace reader {
template <typename T>
class BlockingQueue {
// BlockingQueue is for buffered reading and is supposed to use only the
// reader package. It is true that we could and we should have been using
// framework::Channel, but which has currently a deadlock bug. BlockingQueue
// is a workaround and a simplified version of framework::Channel as it
// doesn't support GPU and it implements on buffered blocking queue.
public:
explicit BlockingQueue(size_t capacity)
: capacity_(capacity), closed_(false) {
PADDLE_ENFORCE_GT(
capacity_, 0,
"The capacity of a reader::BlockingQueue must be greater than 0.");
}
bool Send(const T& elem) {
std::unique_lock<std::mutex> lock(mutex_);
send_cv_.wait(lock, [&] { return queue_.size() < capacity_ || closed_; });
if (closed_) {
VLOG(5)
<< "WARNING: Sending an element to a closed reader::BlokcingQueue.";
return false;
}
PADDLE_ENFORCE_LT(queue_.size(), capacity_);
queue_.push_back(elem);
receive_cv_.notify_one();
return true;
}
bool Send(T&& elem) {
std::unique_lock<std::mutex> lock(mutex_);
send_cv_.wait(lock, [&] { return queue_.size() < capacity_ || closed_; });
if (closed_) {
VLOG(5)
<< "WARNING: Sending an element to a closed reader::BlokcingQueue.";
return false;
}
PADDLE_ENFORCE_LT(queue_.size(), capacity_);
queue_.emplace_back(std::move(elem));
receive_cv_.notify_one();
return true;
}
bool Receive(T* elem) {
std::unique_lock<std::mutex> lock(mutex_);
receive_cv_.wait(lock, [&] { return !queue_.empty() || closed_; });
if (!queue_.empty()) {
PADDLE_ENFORCE_NOT_NULL(elem);
*elem = queue_.front();
queue_.pop_front();
send_cv_.notify_one();
return true;
} else {
PADDLE_ENFORCE(closed_);
return false;
}
}
void Close() {
std::lock_guard<std::mutex> lock(mutex_);
closed_ = true;
send_cv_.notify_all();
receive_cv_.notify_all();
}
bool IsClosed() {
std::lock_guard<std::mutex> lock(mutex_);
return closed_;
}
size_t Cap() {
std::lock_guard<std::mutex> lock(mutex_);
return capacity_;
}
private:
size_t capacity_;
bool closed_;
std::deque<T> queue_;
std::mutex mutex_;
std::condition_variable receive_cv_;
std::condition_variable send_cv_;
};
} // namespace reader
} // namespace operators
} // namespace paddle
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <thread> // NOLINT #include <thread> // NOLINT
#include "paddle/fluid/framework/channel.h" #include "paddle/fluid/operators/reader/blocking_queue.h"
#include "paddle/fluid/operators/reader/reader_op_registry.h" #include "paddle/fluid/operators/reader/reader_op_registry.h"
namespace paddle { namespace paddle {
...@@ -23,13 +23,13 @@ namespace reader { ...@@ -23,13 +23,13 @@ namespace reader {
// 'Double buffer' means we shall maintain two batches of input data at the same // 'Double buffer' means we shall maintain two batches of input data at the same
// time. So the kCacheSize shoul be at least 2. // time. So the kCacheSize shoul be at least 2.
static constexpr size_t kCacheSize = 2; static constexpr size_t kCacheSize = 3;
// There will be two bacthes out of the channel during training: // There will be two bacthes out of the channel during training:
// 1. the one waiting to be sent to the channel // 1. the one waiting to be sent to the channel
// 2. the one just be received from the channel, which is also being used by // 2. the one just be received from the channel, which is also being used by
// subsequent operators. // subsequent operators.
// So the channel size should be kChacheSize - 2 // So the channel size should be kChacheSize - 2
static constexpr size_t kChannelSize = 0; // kCacheSize - 2 static constexpr size_t kChannelSize = 1; // kCacheSize - 2
class DoubleBufferReader : public framework::DecoratedReader { class DoubleBufferReader : public framework::DecoratedReader {
public: public:
...@@ -55,10 +55,8 @@ class DoubleBufferReader : public framework::DecoratedReader { ...@@ -55,10 +55,8 @@ class DoubleBufferReader : public framework::DecoratedReader {
~DoubleBufferReader() { EndPrefetcher(); } ~DoubleBufferReader() { EndPrefetcher(); }
private: private:
bool HasNext() const;
void StartPrefetcher() { void StartPrefetcher() {
channel_ = framework::MakeChannel<size_t>(kChannelSize); channel_ = new reader::BlockingQueue<size_t>(kChannelSize);
prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); prefetcher_ = std::thread([this] { PrefetchThreadFunc(); });
} }
...@@ -74,7 +72,7 @@ class DoubleBufferReader : public framework::DecoratedReader { ...@@ -74,7 +72,7 @@ class DoubleBufferReader : public framework::DecoratedReader {
void PrefetchThreadFunc(); void PrefetchThreadFunc();
std::thread prefetcher_; std::thread prefetcher_;
framework::Channel<size_t>* channel_; reader::BlockingQueue<size_t>* channel_;
platform::Place place_; platform::Place place_;
std::vector<std::vector<framework::LoDTensor>> cpu_tensor_cache_; std::vector<std::vector<framework::LoDTensor>> cpu_tensor_cache_;
std::vector<std::vector<framework::LoDTensor>> gpu_tensor_cache_; std::vector<std::vector<framework::LoDTensor>> gpu_tensor_cache_;
...@@ -139,17 +137,16 @@ class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { ...@@ -139,17 +137,16 @@ class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase {
}; };
void DoubleBufferReader::ReadNext(std::vector<framework::LoDTensor>* out) { void DoubleBufferReader::ReadNext(std::vector<framework::LoDTensor>* out) {
out->clear();
if (HasNext()) {
size_t cached_tensor_id; size_t cached_tensor_id;
channel_->Receive(&cached_tensor_id); if (channel_->Receive(&cached_tensor_id)) {
if (platform::is_gpu_place(place_)) { if (platform::is_gpu_place(place_)) {
*out = gpu_tensor_cache_[cached_tensor_id]; *out = gpu_tensor_cache_[cached_tensor_id];
ctxs_[cached_tensor_id]->Wait();
} else { } else {
// CPU place // CPU place
*out = cpu_tensor_cache_[cached_tensor_id]; *out = cpu_tensor_cache_[cached_tensor_id];
} }
} else {
out->clear();
} }
} }
...@@ -159,12 +156,6 @@ void DoubleBufferReader::ReInit() { ...@@ -159,12 +156,6 @@ void DoubleBufferReader::ReInit() {
StartPrefetcher(); StartPrefetcher();
} }
bool DoubleBufferReader::HasNext() const {
while (!channel_->IsClosed() && !channel_->CanReceive()) {
}
return channel_->CanReceive();
}
void DoubleBufferReader::PrefetchThreadFunc() { void DoubleBufferReader::PrefetchThreadFunc() {
VLOG(5) << "A new prefetch thread starts."; VLOG(5) << "A new prefetch thread starts.";
size_t cached_tensor_id = 0; size_t cached_tensor_id = 0;
...@@ -177,18 +168,14 @@ void DoubleBufferReader::PrefetchThreadFunc() { ...@@ -177,18 +168,14 @@ void DoubleBufferReader::PrefetchThreadFunc() {
} }
if (platform::is_gpu_place(place_)) { if (platform::is_gpu_place(place_)) {
auto& gpu_batch = gpu_tensor_cache_[cached_tensor_id]; auto& gpu_batch = gpu_tensor_cache_[cached_tensor_id];
auto* gpu_ctx = ctxs_[cached_tensor_id].get();
gpu_batch.resize(cpu_batch.size()); gpu_batch.resize(cpu_batch.size());
for (size_t i = 0; i < cpu_batch.size(); ++i) { for (size_t i = 0; i < cpu_batch.size(); ++i) {
framework::TensorCopy(cpu_batch[i], place_, *gpu_ctx, &gpu_batch[i], // TODO(fengjiayi): Use asynchronous TensorCopy instead
true); framework::TensorCopySync(cpu_batch[i], place_, &gpu_batch[i]);
gpu_batch[i].set_lod(cpu_batch[i].lod()); gpu_batch[i].set_lod(cpu_batch[i].lod());
} }
} }
try { if (!channel_->Send(cached_tensor_id)) {
size_t tmp = cached_tensor_id;
channel_->Send(&tmp);
} catch (paddle::platform::EnforceNotMet e) {
VLOG(5) << "WARNING: The double buffer channel has been closed. The " VLOG(5) << "WARNING: The double buffer channel has been closed. The "
"prefetch thread will terminate."; "prefetch thread will terminate.";
break; break;
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <thread> // NOLINT #include <thread> // NOLINT
#include "paddle/fluid/framework/channel.h" #include "paddle/fluid/operators/reader/blocking_queue.h"
#include "paddle/fluid/operators/reader/reader_op_registry.h" #include "paddle/fluid/operators/reader/reader_op_registry.h"
namespace paddle { namespace paddle {
...@@ -37,7 +37,6 @@ class MultiFileReader : public framework::ReaderBase { ...@@ -37,7 +37,6 @@ class MultiFileReader : public framework::ReaderBase {
~MultiFileReader() { EndScheduler(); } ~MultiFileReader() { EndScheduler(); }
private: private:
bool HasNext();
void StartNewScheduler(); void StartNewScheduler();
void EndScheduler(); void EndScheduler();
void ScheduleThreadFunc(); void ScheduleThreadFunc();
...@@ -48,15 +47,14 @@ class MultiFileReader : public framework::ReaderBase { ...@@ -48,15 +47,14 @@ class MultiFileReader : public framework::ReaderBase {
std::thread scheduler_; std::thread scheduler_;
std::vector<std::thread> prefetchers_; std::vector<std::thread> prefetchers_;
size_t buffer_size_; size_t buffer_size_;
framework::Channel<size_t>* waiting_file_idx_; reader::BlockingQueue<size_t>* waiting_file_idx_;
framework::Channel<size_t>* available_thread_idx_; reader::BlockingQueue<size_t>* available_thread_idx_;
framework::Channel<std::vector<framework::LoDTensor>>* buffer_; reader::BlockingQueue<std::vector<framework::LoDTensor>>* buffer_;
}; };
void MultiFileReader::ReadNext(std::vector<framework::LoDTensor>* out) { void MultiFileReader::ReadNext(std::vector<framework::LoDTensor>* out) {
if (!buffer_->Receive(out)) {
out->clear(); out->clear();
if (HasNext()) {
buffer_->Receive(out);
} }
} }
...@@ -65,25 +63,19 @@ void MultiFileReader::ReInit() { ...@@ -65,25 +63,19 @@ void MultiFileReader::ReInit() {
StartNewScheduler(); StartNewScheduler();
} }
bool MultiFileReader::HasNext() {
while (!buffer_->IsClosed() && !buffer_->CanReceive()) {
}
return buffer_->CanReceive();
}
void MultiFileReader::StartNewScheduler() { void MultiFileReader::StartNewScheduler() {
size_t thread_num = prefetchers_.size(); size_t thread_num = prefetchers_.size();
waiting_file_idx_ = framework::MakeChannel<size_t>(file_names_.size()); waiting_file_idx_ = new reader::BlockingQueue<size_t>(file_names_.size());
available_thread_idx_ = framework::MakeChannel<size_t>(thread_num); available_thread_idx_ = new reader::BlockingQueue<size_t>(thread_num);
buffer_ = buffer_ = new reader::BlockingQueue<std::vector<framework::LoDTensor>>(
framework::MakeChannel<std::vector<framework::LoDTensor>>(buffer_size_); buffer_size_);
for (size_t i = 0; i < file_names_.size(); ++i) { for (size_t i = 0; i < file_names_.size(); ++i) {
waiting_file_idx_->Send(&i); waiting_file_idx_->Send(i);
} }
waiting_file_idx_->Close(); waiting_file_idx_->Close();
for (size_t i = 0; i < thread_num; ++i) { for (size_t i = 0; i < thread_num; ++i) {
available_thread_idx_->Send(&i); available_thread_idx_->Send(i);
} }
scheduler_ = std::thread([this] { ScheduleThreadFunc(); }); scheduler_ = std::thread([this] { ScheduleThreadFunc(); });
...@@ -149,7 +141,7 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name, ...@@ -149,7 +141,7 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name,
break; break;
} }
try { try {
buffer_->Send(&ins); buffer_->Send(std::move(ins));
} catch (paddle::platform::EnforceNotMet e) { } catch (paddle::platform::EnforceNotMet e) {
VLOG(5) << "WARNING: The buffer channel has been closed. The prefetch " VLOG(5) << "WARNING: The buffer channel has been closed. The prefetch "
"thread of file '" "thread of file '"
...@@ -158,9 +150,7 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name, ...@@ -158,9 +150,7 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name,
} }
} }
try { if (!available_thread_idx_->Send(thread_idx)) {
available_thread_idx_->Send(&thread_idx);
} catch (paddle::platform::EnforceNotMet e) {
VLOG(5) << "WARNING: The available_thread_idx_ channel has been closed. " VLOG(5) << "WARNING: The available_thread_idx_ channel has been closed. "
"Fail to send thread_idx."; "Fail to send thread_idx.";
} }
......
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <chrono> // NOLINT
#include <set>
#include <thread> // NOLINT
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/operators/reader/blocking_queue.h"
using paddle::operators::reader::BlockingQueue;
TEST(BlockingQueue, CapacityTest) {
size_t cap = 10;
BlockingQueue<int> q(cap);
EXPECT_EQ(q.Cap(), cap);
}
void FirstInFirstOut(size_t queue_cap, size_t elem_num, size_t send_time_gap,
size_t receive_time_gap) {
BlockingQueue<size_t> q(queue_cap);
std::thread sender([&]() {
for (size_t i = 0; i < elem_num; ++i) {
std::this_thread::sleep_for(std::chrono::milliseconds(send_time_gap));
EXPECT_TRUE(q.Send(i));
}
q.Close();
});
size_t count = 0;
while (true) {
std::this_thread::sleep_for(std::chrono::milliseconds(receive_time_gap));
size_t elem;
if (!q.Receive(&elem)) {
break;
}
EXPECT_EQ(elem, count++);
}
sender.join();
EXPECT_EQ(count, elem_num);
EXPECT_TRUE(q.IsClosed());
}
TEST(BlockingQueue, FirstInFirstOutTest) {
FirstInFirstOut(2, 5, 2, 50);
FirstInFirstOut(2, 5, 50, 2);
FirstInFirstOut(10, 3, 50, 2);
FirstInFirstOut(10, 3, 2, 50);
}
TEST(BlockingQueue, SenderBlockingTest) {
const size_t queue_cap = 2;
BlockingQueue<size_t> q(queue_cap);
size_t send_count = 0;
std::thread sender([&]() {
for (size_t i = 0; i < 5; ++i) {
if (!q.Send(i)) {
break;
}
++send_count;
}
});
std::this_thread::sleep_for(std::chrono::milliseconds(200));
q.Close();
sender.join();
EXPECT_EQ(send_count, queue_cap);
std::vector<size_t> res;
while (true) {
size_t elem;
if (!q.Receive(&elem)) {
break;
}
res.push_back(elem);
}
EXPECT_EQ(res.size(), queue_cap);
for (size_t i = 0; i < res.size(); ++i) {
EXPECT_EQ(res[i], i);
}
}
TEST(BlockingQueue, ReceiverBlockingTest) {
const size_t queue_cap = 5;
BlockingQueue<size_t> q(queue_cap);
std::vector<size_t> receive_res;
std::thread receiver([&]() {
size_t elem;
while (true) {
if (!q.Receive(&elem)) {
break;
}
receive_res.push_back(elem);
}
});
std::vector<size_t> to_send{2, 1, 7};
for (auto e : to_send) {
q.Send(e);
}
q.Close();
receiver.join();
EXPECT_EQ(receive_res.size(), to_send.size());
for (size_t i = 0; i < to_send.size(); ++i) {
EXPECT_EQ(receive_res[i], to_send[i]);
}
}
void CheckIsUnorderedSame(const std::vector<std::vector<size_t>>& v1,
const std::vector<std::vector<size_t>>& v2) {
std::set<size_t> s1;
std::set<size_t> s2;
for (auto vec : v1) {
for (size_t elem : vec) {
s1.insert(elem);
}
}
for (auto vec : v2) {
for (size_t elem : vec) {
s2.insert(elem);
}
}
EXPECT_EQ(s1.size(), s2.size());
auto it1 = s1.begin();
auto it2 = s2.begin();
while (it1 != s1.end()) {
EXPECT_EQ(*it1, *it2);
++it1;
++it2;
}
}
void MultiSenderMultiReceiver(const size_t queue_cap,
const std::vector<std::vector<size_t>>& to_send,
size_t receiver_num, size_t send_time_gap,
size_t receive_time_gap) {
BlockingQueue<size_t> q(queue_cap);
size_t sender_num = to_send.size();
std::vector<std::thread> senders;
for (size_t s_idx = 0; s_idx < sender_num; ++s_idx) {
senders.emplace_back(std::thread([&, s_idx] {
for (size_t elem : to_send[s_idx]) {
std::this_thread::sleep_for(std::chrono::milliseconds(send_time_gap));
EXPECT_TRUE(q.Send(elem));
}
}));
}
std::vector<std::thread> receivers;
std::mutex mu;
std::vector<std::vector<size_t>> res;
for (size_t r_idx = 0; r_idx < receiver_num; ++r_idx) {
receivers.emplace_back(std::thread([&] {
std::vector<size_t> receiver_res;
while (true) {
std::this_thread::sleep_for(
std::chrono::milliseconds(receive_time_gap));
size_t elem;
if (!q.Receive(&elem)) {
break;
}
receiver_res.push_back(elem);
}
std::lock_guard<std::mutex> lock(mu);
res.push_back(receiver_res);
}));
}
for (auto& t : senders) {
t.join();
}
q.Close();
for (auto& t : receivers) {
t.join();
}
CheckIsUnorderedSame(to_send, res);
}
TEST(BlockingQueue, MultiSenderMultiReaderTest) {
std::vector<std::vector<size_t>> to_send_1{{2, 3, 4}, {9}, {0, 7, 15, 6}};
MultiSenderMultiReceiver(2, to_send_1, 2, 0, 0);
MultiSenderMultiReceiver(10, to_send_1, 2, 0, 0);
MultiSenderMultiReceiver(2, to_send_1, 20, 0, 0);
MultiSenderMultiReceiver(2, to_send_1, 2, 50, 0);
MultiSenderMultiReceiver(2, to_send_1, 2, 0, 50);
std::vector<std::vector<size_t>> to_send_2{
{2, 3, 4}, {}, {0, 7, 15, 6, 9, 32}};
MultiSenderMultiReceiver(2, to_send_2, 3, 0, 0);
MultiSenderMultiReceiver(20, to_send_2, 3, 0, 0);
MultiSenderMultiReceiver(2, to_send_2, 30, 0, 0);
MultiSenderMultiReceiver(2, to_send_2, 3, 50, 0);
MultiSenderMultiReceiver(2, to_send_2, 3, 0, 50);
}
struct MyClass {
MyClass() : val_(0) {}
explicit MyClass(int val) : val_(val) {}
MyClass(const MyClass& b) { val_ = b.val_; }
MyClass(MyClass&& b) { val_ = b.val_; }
void operator=(const MyClass& b) { val_ = b.val_; }
int val_;
};
TEST(BlockingQueue, MyClassTest) {
BlockingQueue<MyClass> q(2);
MyClass a(200);
q.Send(std::move(a));
MyClass b;
q.Receive(&b);
EXPECT_EQ(a.val_, b.val_);
}
...@@ -12,7 +12,9 @@ ...@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "reader_op_registry.h" #include "paddle/fluid/operators/reader/reader_op_registry.h"
#include <string>
#include <vector>
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#pragma once #pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/reader.h"
......
...@@ -93,8 +93,14 @@ class ReshapeOp : public framework::OperatorWithKernel { ...@@ -93,8 +93,14 @@ class ReshapeOp : public framework::OperatorWithKernel {
if (unk_dim_idx != -1) { if (unk_dim_idx != -1) {
output_shape[unk_dim_idx] = -in_size / capacity; output_shape[unk_dim_idx] = -in_size / capacity;
// in_size < 0 and is un-determinate in compile time, skip the check,
// for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8],
// capacity = -24, in_size = -8, output_shape[0] = 0
// the following check will fail.
if (in_size > 0) {
PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size,
"Invalid shape is given."); "Invalid shape is given.");
}
} else { } else {
PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given."); PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given.");
} }
...@@ -124,10 +130,8 @@ class ReshapeKernel : public framework::OpKernel<T> { ...@@ -124,10 +130,8 @@ class ReshapeKernel : public framework::OpKernel<T> {
auto *shape_data = shape_tensor->data<int>(); auto *shape_data = shape_tensor->data<int>();
framework::Tensor cpu_shape_tensor; framework::Tensor cpu_shape_tensor;
if (platform::is_gpu_place(ctx.GetPlace())) { if (platform::is_gpu_place(ctx.GetPlace())) {
TensorCopy(*shape_tensor, platform::CPUPlace(), ctx.device_context(), TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor);
&cpu_shape_tensor);
shape_data = cpu_shape_tensor.data<int>(); shape_data = cpu_shape_tensor.data<int>();
ctx.device_context().Wait();
} }
auto shape = auto shape =
std::vector<int>(shape_data, shape_data + shape_tensor->numel()); std::vector<int>(shape_data, shape_data + shape_tensor->numel());
...@@ -146,9 +150,7 @@ class ReshapeKernel : public framework::OpKernel<T> { ...@@ -146,9 +150,7 @@ class ReshapeKernel : public framework::OpKernel<T> {
out->Resize(out_dims); out->Resize(out_dims);
if (!inplace) { if (!inplace) {
out->mutable_data<T>(ctx.GetPlace()); out->mutable_data<T>(ctx.GetPlace());
framework::TensorCopy(*in, ctx.GetPlace(), ctx.device_context(), out); framework::TensorCopySync(*in, ctx.GetPlace(), out);
ctx.device_context().Wait();
// TensorCopy will resize to in_dims.
out->Resize(out_dims); out->Resize(out_dims);
} else { } else {
out->ShareDataWith(*in); out->ShareDataWith(*in);
......
...@@ -18,8 +18,7 @@ namespace paddle { ...@@ -18,8 +18,7 @@ namespace paddle {
namespace operators { namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kROISize = 5;
class ROIPoolOp : public framework::OperatorWithKernel { class ROIPoolOp : public framework::OperatorWithKernel {
public: public:
...@@ -40,11 +39,11 @@ class ROIPoolOp : public framework::OperatorWithKernel { ...@@ -40,11 +39,11 @@ class ROIPoolOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE(input_dims.size() == 4, PADDLE_ENFORCE(input_dims.size() == 4,
"The format of input tensor is NCHW."); "The format of input tensor is NCHW.");
PADDLE_ENFORCE(rois_dims.size() == 2, PADDLE_ENFORCE(rois_dims.size() == 2,
"ROIs should be a 2-D tensor of shape (num_rois, 5)" "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)"
"given as [[batch_id, x1, y1, x2, y2], …]."); "given as [[x1, y1, x2, y2], …].");
PADDLE_ENFORCE(rois_dims[1] == kROISize, PADDLE_ENFORCE(rois_dims[1] == kROISize,
"ROIs should be a 2-D tensor of shape (num_rois, 5)" "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)"
"given as [[batch_id, x1, y1, x2, y2], …]."); "given as [[x1, y1, x2, y2], …].");
int pooled_height = ctx->Attrs().Get<int>("pooled_height"); int pooled_height = ctx->Attrs().Get<int>("pooled_height");
int pooled_width = ctx->Attrs().Get<int>("pooled_width"); int pooled_width = ctx->Attrs().Get<int>("pooled_width");
...@@ -109,10 +108,10 @@ class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -109,10 +108,10 @@ class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker {
"H is the height of the feature, and " "H is the height of the feature, and "
"W is the width of the feature."); "W is the width of the feature.");
AddInput("ROIs", AddInput("ROIs",
"(Tensor), " "(LoDTensor), "
"ROIs (Regions of Interest) to pool over. " "ROIs (Regions of Interest) to pool over. "
"should be a 2-D tensor of shape (num_rois, 5)" "should be a 2-D LoDTensor of shape (num_rois, 4)"
"given as [[batch_id, x1, y1, x2, y2], …]. " "given as [[x1, y1, x2, y2], …]. "
"Where batch_id is the id of the data, " "Where batch_id is the id of the data, "
"(x1, y1) is the top left coordinates, and " "(x1, y1) is the top left coordinates, and "
"(x2, y2) is the bottom right coordinates."); "(x2, y2) is the bottom right coordinates.");
......
...@@ -13,16 +13,16 @@ See the License for the specific language governing permissions and ...@@ -13,16 +13,16 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/roi_pool_op.h" #include "paddle/fluid/operators/roi_pool_op.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512; static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096; static constexpr int kNumMaxinumNumBlocks = 4096;
static constexpr int kROISize = 5;
static inline int NumBlocks(const int N) { static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
...@@ -30,13 +30,11 @@ static inline int NumBlocks(const int N) { ...@@ -30,13 +30,11 @@ static inline int NumBlocks(const int N) {
} }
template <typename T> template <typename T>
__global__ void GPUROIPoolForward(const int nthreads, const T* input_data, __global__ void GPUROIPoolForward(
const int64_t* input_rois, const int nthreads, const T* input_data, const int64_t* input_rois,
const float spatial_scale, const int channels, const float spatial_scale, const int channels, const int height,
const int height, const int width, const int width, const int pooled_height, const int pooled_width,
const int pooled_height, int* roi_batch_id_data, T* output_data, int64_t* argmax_data) {
const int pooled_width, T* output_data,
int64_t* argmax_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x; int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x; int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) { for (size_t i = index; i < nthreads; i += offset) {
...@@ -46,11 +44,11 @@ __global__ void GPUROIPoolForward(const int nthreads, const T* input_data, ...@@ -46,11 +44,11 @@ __global__ void GPUROIPoolForward(const int nthreads, const T* input_data,
int n = index / pooled_width / pooled_height / channels; int n = index / pooled_width / pooled_height / channels;
const int64_t* offset_input_rois = input_rois + n * kROISize; const int64_t* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = offset_input_rois[0]; int roi_batch_ind = roi_batch_id_data[n];
int roi_start_w = round(offset_input_rois[1] * spatial_scale); int roi_start_w = round(offset_input_rois[0] * spatial_scale);
int roi_start_h = round(offset_input_rois[2] * spatial_scale); int roi_start_h = round(offset_input_rois[1] * spatial_scale);
int roi_end_w = round(offset_input_rois[3] * spatial_scale); int roi_end_w = round(offset_input_rois[2] * spatial_scale);
int roi_end_h = round(offset_input_rois[4] * spatial_scale); int roi_end_h = round(offset_input_rois[3] * spatial_scale);
int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1);
...@@ -93,7 +91,8 @@ __global__ void GPUROIPoolBackward( ...@@ -93,7 +91,8 @@ __global__ void GPUROIPoolBackward(
const int nthreads, const int64_t* input_rois, const T* output_grad, const int nthreads, const int64_t* input_rois, const T* output_grad,
const int64_t* argmax_data, const int num_rois, const float spatial_scale, const int64_t* argmax_data, const int num_rois, const float spatial_scale,
const int channels, const int height, const int width, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, T* input_grad) { const int pooled_height, const int pooled_width, int* roi_batch_id_data,
T* input_grad) {
int index = blockIdx.x * blockDim.x + threadIdx.x; int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x; int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) { for (int i = index; i < nthreads; i += offset) {
...@@ -102,8 +101,7 @@ __global__ void GPUROIPoolBackward( ...@@ -102,8 +101,7 @@ __global__ void GPUROIPoolBackward(
int c = (index / pooled_width / pooled_height) % channels; int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels; int n = index / pooled_width / pooled_height / channels;
const int64_t* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n];
int roi_batch_ind = offset_input_rois[0];
int input_offset = (roi_batch_ind * channels + c) * height * width; int input_offset = (roi_batch_ind * channels + c) * height * width;
int output_offset = (n * channels + c) * pooled_height * pooled_width; int output_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_output_grad = output_grad + output_offset; const T* offset_output_grad = output_grad + output_offset;
...@@ -124,7 +122,7 @@ class GPUROIPoolOpKernel : public framework::OpKernel<T> { ...@@ -124,7 +122,7 @@ class GPUROIPoolOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X"); auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<Tensor>("ROIs"); auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out"); auto* out = ctx.Output<Tensor>("Out");
auto* argmax = ctx.Output<Tensor>("Argmax"); auto* argmax = ctx.Output<Tensor>("Argmax");
...@@ -133,23 +131,46 @@ class GPUROIPoolOpKernel : public framework::OpKernel<T> { ...@@ -133,23 +131,46 @@ class GPUROIPoolOpKernel : public framework::OpKernel<T> {
auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims(); auto in_dims = in->dims();
int batch_size = in_dims[0];
auto in_stride = framework::stride(in_dims); auto in_stride = framework::stride(in_dims);
int channels = in_dims[1]; int channels = in_dims[1];
int height = in_dims[2]; int height = in_dims[2];
int width = in_dims[3]; int width = in_dims[3];
size_t rois_num = rois->dims()[0]; int rois_num = rois->dims()[0];
if (rois_num == 0) return; if (rois_num == 0) return;
int output_size = out->numel(); int output_size = out->numel();
int blocks = NumBlocks(output_size); int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads; int threads = kNumCUDAThreads;
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
int* roi_batch_id_data =
roi_batch_id_list.mutable_data<int>(platform::CPUPlace());
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
"The rois_batch_size and imgs batch_size must be the same.");
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
framework::Tensor roi_batch_id_list_gpu;
framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(),
ctx.device_context(), &roi_batch_id_list_gpu);
GPUROIPoolForward< GPUROIPoolForward<
T><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( T><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>(
output_size, in->data<T>(), rois->data<int64_t>(), spatial_scale, output_size, in->data<T>(), rois->data<int64_t>(), spatial_scale,
channels, height, width, pooled_height, pooled_width, channels, height, width, pooled_height, pooled_width,
out->mutable_data<T>(ctx.GetPlace()), roi_batch_id_list_gpu.data<int>(), out->mutable_data<T>(ctx.GetPlace()),
argmax->mutable_data<int64_t>(ctx.GetPlace())); argmax->mutable_data<int64_t>(ctx.GetPlace()));
} }
}; };
...@@ -159,7 +180,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel<T> { ...@@ -159,7 +180,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X"); auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<Tensor>("ROIs"); auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* argmax = ctx.Input<Tensor>("Argmax"); auto* argmax = ctx.Input<Tensor>("Argmax");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
...@@ -169,12 +190,27 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel<T> { ...@@ -169,12 +190,27 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel<T> {
auto pooled_width = ctx.Attr<int>("pooled_width"); auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto spatial_scale = ctx.Attr<float>("spatial_scale");
size_t rois_num = rois->dims()[0]; int rois_num = rois->dims()[0];
int channels = in->dims()[1]; int channels = in->dims()[1];
int height = in->dims()[2]; int height = in->dims()[2];
int width = in->dims()[3]; int width = in->dims()[3];
if (x_grad) { if (x_grad) {
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
int* roi_batch_id_data =
roi_batch_id_list.mutable_data<int>(platform::CPUPlace());
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
framework::Tensor roi_batch_id_list_gpu;
framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(),
ctx.device_context(), &roi_batch_id_list_gpu);
x_grad->mutable_data<T>(ctx.GetPlace()); x_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero; math::SetConstant<Place, T> set_zero;
set_zero(ctx.cuda_device_context(), x_grad, static_cast<T>(0)); set_zero(ctx.cuda_device_context(), x_grad, static_cast<T>(0));
...@@ -189,6 +225,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel<T> { ...@@ -189,6 +225,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel<T> {
output_grad_size, rois->data<int64_t>(), out_grad->data<T>(), output_grad_size, rois->data<int64_t>(), out_grad->data<T>(),
argmax->data<int64_t>(), rois_num, spatial_scale, channels, height, argmax->data<int64_t>(), rois_num, spatial_scale, channels, height,
width, pooled_height, pooled_width, width, pooled_height, pooled_width,
roi_batch_id_list_gpu.data<int>(),
x_grad->mutable_data<T>(ctx.GetPlace())); x_grad->mutable_data<T>(ctx.GetPlace()));
} }
} }
......
...@@ -21,12 +21,14 @@ limitations under the License. */ ...@@ -21,12 +21,14 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
static constexpr int kROISize = 4;
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
class CPUROIPoolOpKernel : public framework::OpKernel<T> { class CPUROIPoolOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<framework::Tensor>("X"); auto* in = ctx.Input<framework::Tensor>("X");
auto* rois = ctx.Input<framework::Tensor>("ROIs"); auto* rois = ctx.Input<framework::LoDTensor>("ROIs");
auto* out = ctx.Output<framework::Tensor>("Out"); auto* out = ctx.Output<framework::Tensor>("Out");
auto* argmax = ctx.Output<framework::Tensor>("Argmax"); auto* argmax = ctx.Output<framework::Tensor>("Argmax");
...@@ -47,24 +49,36 @@ class CPUROIPoolOpKernel : public framework::OpKernel<T> { ...@@ -47,24 +49,36 @@ class CPUROIPoolOpKernel : public framework::OpKernel<T> {
auto out_stride = framework::stride(out->dims()); auto out_stride = framework::stride(out->dims());
const T* input_data = in->data<T>(); const T* input_data = in->data<T>();
const int64_t* rois_data = rois->data<int64_t>();
T* output_data = out->mutable_data<T>(ctx.GetPlace());
int64_t* argmax_data = argmax->mutable_data<int64_t>(ctx.GetPlace());
for (int n = 0; n < rois_num; ++n) { framework::Tensor roi_batch_id_list;
int roi_batch_id = rois_data[0]; roi_batch_id_list.Resize({rois_num});
PADDLE_ENFORCE_GE(roi_batch_id, 0); int* roi_batch_id_data =
PADDLE_ENFORCE_LT(roi_batch_id, batch_size); roi_batch_id_list.mutable_data<int>(ctx.GetPlace());
rois_data += roi_stride[0];
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
"The rois_batch_size and imgs batch_size must be the same.");
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
} }
}
T* output_data = out->mutable_data<T>(ctx.GetPlace());
int64_t* argmax_data = argmax->mutable_data<int64_t>(ctx.GetPlace());
rois_data = rois->data<int64_t>(); const int64_t* rois_data = rois->data<int64_t>();
for (int n = 0; n < rois_num; ++n) { for (int n = 0; n < rois_num; ++n) {
int roi_batch_id = rois_data[0]; int roi_batch_id = roi_batch_id_data[n];
int roi_start_w = round(rois_data[1] * spatial_scale); int roi_start_w = round(rois_data[0] * spatial_scale);
int roi_start_h = round(rois_data[2] * spatial_scale); int roi_start_h = round(rois_data[1] * spatial_scale);
int roi_end_w = round(rois_data[3] * spatial_scale); int roi_end_w = round(rois_data[2] * spatial_scale);
int roi_end_h = round(rois_data[4] * spatial_scale); int roi_end_h = round(rois_data[3] * spatial_scale);
// Force malformed ROIs to be 1x1 // Force malformed ROIs to be 1x1
int roi_height = std::max(roi_end_h - roi_start_h + 1, 1); int roi_height = std::max(roi_end_h - roi_start_h + 1, 1);
...@@ -133,7 +147,7 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel<T> { ...@@ -133,7 +147,7 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<framework::Tensor>("X"); auto* in = ctx.Input<framework::Tensor>("X");
auto* rois = ctx.Input<framework::Tensor>("ROIs"); auto* rois = ctx.Input<framework::LoDTensor>("ROIs");
auto* argmax = ctx.Input<framework::Tensor>("Argmax"); auto* argmax = ctx.Input<framework::Tensor>("Argmax");
auto* out_grad = auto* out_grad =
ctx.Input<framework::Tensor>(framework::GradVarName("Out")); ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
...@@ -143,6 +157,20 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel<T> { ...@@ -143,6 +157,20 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel<T> {
auto pooled_width = ctx.Attr<int>("pooled_width"); auto pooled_width = ctx.Attr<int>("pooled_width");
if (in_grad) { if (in_grad) {
int rois_num = rois->dims()[0];
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
int* roi_batch_id_data =
roi_batch_id_list.mutable_data<int>(ctx.GetPlace());
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
const int64_t* rois_data = rois->data<int64_t>(); const int64_t* rois_data = rois->data<int64_t>();
const T* out_grad_data = out_grad->data<T>(); const T* out_grad_data = out_grad->data<T>();
const int64_t* argmax_data = argmax->data<int64_t>(); const int64_t* argmax_data = argmax->data<int64_t>();
...@@ -156,11 +184,10 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel<T> { ...@@ -156,11 +184,10 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel<T> {
auto roi_stride = framework::stride(rois->dims()); auto roi_stride = framework::stride(rois->dims());
auto out_stride = framework::stride(out_grad->dims()); auto out_stride = framework::stride(out_grad->dims());
int rois_num = rois->dims()[0];
int channels = in->dims()[1]; int channels = in->dims()[1];
for (int n = 0; n < rois_num; ++n) { for (int n = 0; n < rois_num; ++n) {
int roi_batch_idx = rois_data[0]; int roi_batch_idx = roi_batch_id_data[n];
T* batch_grad_data = in_grad_data + roi_batch_idx * in_stride[0]; T* batch_grad_data = in_grad_data + roi_batch_idx * in_stride[0];
for (int c = 0; c < channels; ++c) { for (int c = 0; c < channels; ++c) {
for (int ph = 0; ph < pooled_height; ++ph) { for (int ph = 0; ph < pooled_height; ++ph) {
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/row_conv_op.h" #include "paddle/fluid/operators/row_conv_op.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -220,7 +220,7 @@ __global__ void RowConvGradFilterImproved(const T *in, const T *dout, ...@@ -220,7 +220,7 @@ __global__ void RowConvGradFilterImproved(const T *in, const T *dout,
for (int offset = 16; offset > 0; for (int offset = 16; offset > 0;
offset = offset / 2) { // blockDim.x is 32. offset = offset / 2) { // blockDim.x is 32.
val += __shfl_down(val, offset); val += platform::__shfl_down_sync(0, val, offset);
} }
__syncthreads(); __syncthreads();
...@@ -276,7 +276,7 @@ __global__ void RowConvGradFilter(const T *in, const T *dout, int num_sequence, ...@@ -276,7 +276,7 @@ __global__ void RowConvGradFilter(const T *in, const T *dout, int num_sequence,
for (int offset = 16; offset > 0; for (int offset = 16; offset > 0;
offset = offset / 2) { // blockDim.x is 32. offset = offset / 2) { // blockDim.x is 32.
val += __shfl_down(val, offset); val += platform::__shfl_down_sync(0, val, offset);
} }
__syncthreads(); __syncthreads();
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/float16.h"
USE_NO_KERNEL_OP(save); USE_NO_KERNEL_OP(save);
USE_NO_KERNEL_OP(load); USE_NO_KERNEL_OP(load);
...@@ -61,3 +62,35 @@ TEST(SaveLoadOp, CPU) { ...@@ -61,3 +62,35 @@ TEST(SaveLoadOp, CPU) {
} }
} }
} }
TEST(SaveLoadFP16Op, CPU) {
paddle::framework::Scope scope;
paddle::platform::CPUPlace place;
auto var = scope.Var("test_var");
auto tensor = var->GetMutable<paddle::framework::LoDTensor>();
tensor->Resize({3, 10});
float* expect = tensor->mutable_data<float>(place);
for (int64_t i = 0; i < tensor->numel(); ++i) {
expect[i] = static_cast<float>(paddle::platform::float16(i));
}
paddle::framework::AttributeMap attrs;
attrs.insert({"file_path", std::string("tensor.save")});
attrs.insert({"save_as_fp16", true});
auto save_op = paddle::framework::OpRegistry::CreateOp(
"save", {{"X", {"test_var"}}}, {}, attrs);
save_op->Run(scope, place);
auto load_var = scope.Var("out_var");
auto target = load_var->GetMutable<paddle::framework::LoDTensor>();
auto load_op = paddle::framework::OpRegistry::CreateOp(
"load", {}, {{"Out", {"out_var"}}}, attrs);
load_op->Run(scope, place);
paddle::platform::float16* actual = target->data<paddle::platform::float16>();
for (int64_t i = 0; i < tensor->numel(); ++i) {
EXPECT_EQ(expect[i], static_cast<float>(actual[i]));
}
}
...@@ -18,6 +18,7 @@ limitations under the License. */ ...@@ -18,6 +18,7 @@ limitations under the License. */
#include <numeric> #include <numeric>
#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
...@@ -68,6 +69,7 @@ class SaveOp : public framework::OperatorBase { ...@@ -68,6 +69,7 @@ class SaveOp : public framework::OperatorBase {
const platform::Place &place) const override { const platform::Place &place) const override {
auto filename = Attr<std::string>("file_path"); auto filename = Attr<std::string>("file_path");
auto overwrite = Attr<bool>("overwrite"); auto overwrite = Attr<bool>("overwrite");
auto save_as_fp16 = Attr<bool>("save_as_fp16");
if (FileExists(filename) && !overwrite) { if (FileExists(filename) && !overwrite) {
PADDLE_THROW("%s is existed, cannot save to it when overwrite=false", PADDLE_THROW("%s is existed, cannot save to it when overwrite=false",
...@@ -96,8 +98,19 @@ class SaveOp : public framework::OperatorBase { ...@@ -96,8 +98,19 @@ class SaveOp : public framework::OperatorBase {
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto &dev_ctx = *pool.Get(place); auto &dev_ctx = *pool.Get(place);
auto in_dtype = framework::ToDataType(tensor.type());
auto out_dtype = save_as_fp16 ? framework::proto::VarType::FP16 : in_dtype;
if (in_dtype != out_dtype) {
auto in_kernel_type = framework::OpKernelType(in_dtype, place);
auto out_kernel_type = framework::OpKernelType(out_dtype, place);
framework::LoDTensor out;
framework::TransDataType(in_kernel_type, out_kernel_type, tensor, &out);
framework::SerializeToStream(fout, out, dev_ctx);
} else {
framework::SerializeToStream(fout, tensor, dev_ctx); framework::SerializeToStream(fout, tensor, dev_ctx);
} }
}
}; };
class SaveOpProtoMaker : public framework::OpProtoAndCheckerMaker { class SaveOpProtoMaker : public framework::OpProtoAndCheckerMaker {
...@@ -114,6 +127,12 @@ This operator will serialize and write a tensor variable to file on disk. ...@@ -114,6 +127,12 @@ This operator will serialize and write a tensor variable to file on disk.
"(boolean, default true)" "(boolean, default true)"
"Overwrite the output file if exist") "Overwrite the output file if exist")
.SetDefault(true); .SetDefault(true);
AddAttr<bool>("save_as_fp16",
"(boolean, default false)"
"If true, the tensor will be converted to float16 data "
"type and then saved. Otherwise, the tensor will be "
"directly saved without data type conversion.")
.SetDefault(false);
AddAttr<std::string>("file_path", AddAttr<std::string>("file_path",
"(string)" "(string)"
"The \"file_path\" where the variable will be saved.") "The \"file_path\" where the variable will be saved.")
......
...@@ -41,6 +41,8 @@ class SendOp : public framework::OperatorBase { ...@@ -41,6 +41,8 @@ class SendOp : public framework::OperatorBase {
std::vector<std::string> endpoints = std::vector<std::string> endpoints =
Attr<std::vector<std::string>>("endpoints"); Attr<std::vector<std::string>>("endpoints");
bool sync_mode = Attr<bool>("sync_mode");
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto& ctx = *pool.Get(place); auto& ctx = *pool.Get(place);
...@@ -64,11 +66,13 @@ class SendOp : public framework::OperatorBase { ...@@ -64,11 +66,13 @@ class SendOp : public framework::OperatorBase {
} }
PADDLE_ENFORCE(rpc_client->Wait()); PADDLE_ENFORCE(rpc_client->Wait());
if (sync_mode) {
for (auto& ep : endpoints) { for (auto& ep : endpoints) {
VLOG(3) << "batch barrier, ep: " << ep; VLOG(3) << "batch barrier, ep: " << ep;
rpc_client->AsyncSendBatchBarrier(ep); rpc_client->AsyncSendBatchBarrier(ep);
} }
PADDLE_ENFORCE(rpc_client->Wait()); PADDLE_ENFORCE(rpc_client->Wait());
}
if (outs.size() > 0) { if (outs.size() > 0) {
for (size_t i = 0; i < outs.size(); i++) { for (size_t i = 0; i < outs.size(); i++) {
...@@ -112,6 +116,7 @@ This operator will send tensor to recv_op at the parameter server. ...@@ -112,6 +116,7 @@ This operator will send tensor to recv_op at the parameter server.
"Server endpoints in the order of input " "Server endpoints in the order of input "
"variables for mapping") "variables for mapping")
.SetDefault({}); .SetDefault({});
AddAttr<bool>("sync_mode", "work in sync_mode or not").SetDefault(true);
} }
}; };
......
...@@ -137,6 +137,8 @@ void StartServerNet(bool is_sparse) { ...@@ -137,6 +137,8 @@ void StartServerNet(bool is_sparse) {
attrs.insert({"GradList", std::vector<std::string>({"x1"})}); attrs.insert({"GradList", std::vector<std::string>({"x1"})});
attrs.insert({"OptimizeBlock", optimize_block}); attrs.insert({"OptimizeBlock", optimize_block});
attrs.insert({"PrefetchBlock", prefetch_block}); attrs.insert({"PrefetchBlock", prefetch_block});
attrs.insert({"grad_to_block_id", std::vector<std::string>({""})});
attrs.insert({"sync_mode", true});
listen_and_serv_op = listen_and_serv_op =
f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs);
listen_and_serv_op->Run(scope, place); listen_and_serv_op->Run(scope, place);
......
...@@ -15,7 +15,7 @@ limitations under the License. */ ...@@ -15,7 +15,7 @@ limitations under the License. */
#include <thrust/device_vector.h> #include <thrust/device_vector.h>
#include <thrust/host_vector.h> #include <thrust/host_vector.h>
#include "paddle/fluid/operators/sequence_erase_op.h" #include "paddle/fluid/operators/sequence_erase_op.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include "paddle/fluid/operators/sequence_expand_op.h" #include "paddle/fluid/operators/sequence_expand_op.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -14,7 +14,7 @@ limitations under the License. */ ...@@ -14,7 +14,7 @@ limitations under the License. */
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/fluid/operators/sgd_op.h" #include "paddle/fluid/operators/sgd_op.h"
#include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
......
...@@ -222,8 +222,8 @@ class WarpCTCGradKernel : public framework::OpKernel<T> { ...@@ -222,8 +222,8 @@ class WarpCTCGradKernel : public framework::OpKernel<T> {
const T* loss_grad_data = loss_grad->data<T>(); const T* loss_grad_data = loss_grad->data<T>();
math::ScaleLoDTensorFunctor<DeviceContext, T>()( math::ScaleLoDTensorFunctor<DeviceContext, T>()(
ctx.template device_context<DeviceContext>(), *logits_grad, ctx.template device_context<DeviceContext>(), loss_grad_data,
loss_grad_data); logits_grad);
} }
}; };
......
...@@ -66,5 +66,18 @@ CUDA_ATOMIC_WRAPPER(Add, double) { ...@@ -66,5 +66,18 @@ CUDA_ATOMIC_WRAPPER(Add, double) {
} }
#endif #endif
// __shfl_down has been deprecated as of CUDA 9.0.
#if CUDA_VERSION < 9000
template <typename T>
__forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) {
return __shfl_down(val, delta);
}
#define CREATE_SHFL_MASK(mask, predicate) mask = 0u;
#else
#define FULL_WARP_MASK 0xFFFFFFFF
#define CREATE_SHFL_MASK(mask, predicate) \
mask = __ballot_sync(FULL_WARP_MASK, (predicate))
#endif
} // namespace platform } // namespace platform
} // namespace paddle } // namespace paddle
...@@ -14,10 +14,12 @@ ...@@ -14,10 +14,12 @@
#pragma once #pragma once
#include <cublasXt.h>
#include <cublas_v2.h> #include <cublas_v2.h>
#include <cuda.h> #include <cuda.h>
#include <dlfcn.h> #include <dlfcn.h>
#include <mutex> // NOLINT #include <mutex> // NOLINT
#include <type_traits>
#include "paddle/fluid/platform/dynload/dynamic_loader.h" #include "paddle/fluid/platform/dynload/dynamic_loader.h"
namespace paddle { namespace paddle {
...@@ -37,14 +39,14 @@ extern void *cublas_dso_handle; ...@@ -37,14 +39,14 @@ extern void *cublas_dso_handle;
#ifdef PADDLE_USE_DSO #ifdef PADDLE_USE_DSO
#define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \ #define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \
struct DynLoad__##__name { \ struct DynLoad__##__name { \
using FUNC_TYPE = decltype(&::__name); \
template <typename... Args> \ template <typename... Args> \
inline cublasStatus_t operator()(Args... args) { \ inline cublasStatus_t operator()(Args... args) { \
typedef cublasStatus_t (*cublasFunc)(Args...); \
std::call_once(cublas_dso_flag, []() { \ std::call_once(cublas_dso_flag, []() { \
cublas_dso_handle = paddle::platform::dynload::GetCublasDsoHandle(); \ cublas_dso_handle = paddle::platform::dynload::GetCublasDsoHandle(); \
}); \ }); \
void *p_##__name = dlsym(cublas_dso_handle, #__name); \ void *p_##__name = dlsym(cublas_dso_handle, #__name); \
return reinterpret_cast<cublasFunc>(p_##__name)(args...); \ return reinterpret_cast<FUNC_TYPE>(p_##__name)(args...); \
} \ } \
}; \ }; \
extern DynLoad__##__name __name extern DynLoad__##__name __name
...@@ -71,8 +73,8 @@ extern void *cublas_dso_handle; ...@@ -71,8 +73,8 @@ extern void *cublas_dso_handle;
__macro(cublasDgemm_v2); \ __macro(cublasDgemm_v2); \
__macro(cublasHgemm); \ __macro(cublasHgemm); \
__macro(cublasSgemmEx); \ __macro(cublasSgemmEx); \
__macro(cublasSgeam_v2); \ __macro(cublasSgeam); \
__macro(cublasDgeam_v2); \ __macro(cublasDgeam); \
__macro(cublasCreate_v2); \ __macro(cublasCreate_v2); \
__macro(cublasDestroy_v2); \ __macro(cublasDestroy_v2); \
__macro(cublasSetStream_v2); \ __macro(cublasSetStream_v2); \
......
...@@ -34,7 +34,7 @@ extern void EnforceCUDNNLoaded(const char* fn_name); ...@@ -34,7 +34,7 @@ extern void EnforceCUDNNLoaded(const char* fn_name);
struct DynLoad__##__name { \ struct DynLoad__##__name { \
template <typename... Args> \ template <typename... Args> \
auto operator()(Args... args) -> decltype(__name(args...)) { \ auto operator()(Args... args) -> decltype(__name(args...)) { \
using cudnn_func = decltype(__name(args...)) (*)(Args...); \ using cudnn_func = decltype(&::__name); \
std::call_once(cudnn_dso_flag, []() { \ std::call_once(cudnn_dso_flag, []() { \
cudnn_dso_handle = paddle::platform::dynload::GetCUDNNDsoHandle(); \ cudnn_dso_handle = paddle::platform::dynload::GetCUDNNDsoHandle(); \
}); \ }); \
......
...@@ -41,7 +41,7 @@ extern void *cupti_dso_handle; ...@@ -41,7 +41,7 @@ extern void *cupti_dso_handle;
struct DynLoad__##__name { \ struct DynLoad__##__name { \
template <typename... Args> \ template <typename... Args> \
inline CUptiResult CUPTIAPI operator()(Args... args) { \ inline CUptiResult CUPTIAPI operator()(Args... args) { \
typedef CUptiResult CUPTIAPI (*cuptiFunc)(Args...); \ using cuptiFunc = decltype(&::__name); \
std::call_once(cupti_dso_flag, []() { \ std::call_once(cupti_dso_flag, []() { \
cupti_dso_handle = paddle::platform::dynload::GetCUPTIDsoHandle(); \ cupti_dso_handle = paddle::platform::dynload::GetCUPTIDsoHandle(); \
}); \ }); \
......
...@@ -30,7 +30,7 @@ extern void *curand_dso_handle; ...@@ -30,7 +30,7 @@ extern void *curand_dso_handle;
struct DynLoad__##__name { \ struct DynLoad__##__name { \
template <typename... Args> \ template <typename... Args> \
curandStatus_t operator()(Args... args) { \ curandStatus_t operator()(Args... args) { \
typedef curandStatus_t (*curandFunc)(Args...); \ using curandFunc = decltype(&::__name); \
std::call_once(curand_dso_flag, []() { \ std::call_once(curand_dso_flag, []() { \
curand_dso_handle = paddle::platform::dynload::GetCurandDsoHandle(); \ curand_dso_handle = paddle::platform::dynload::GetCurandDsoHandle(); \
}); \ }); \
......
...@@ -33,7 +33,7 @@ extern void* nccl_dso_handle; ...@@ -33,7 +33,7 @@ extern void* nccl_dso_handle;
struct DynLoad__##__name { \ struct DynLoad__##__name { \
template <typename... Args> \ template <typename... Args> \
auto operator()(Args... args) -> decltype(__name(args...)) { \ auto operator()(Args... args) -> decltype(__name(args...)) { \
using nccl_func = decltype(__name(args...)) (*)(Args...); \ using nccl_func = decltype(&::__name); \
std::call_once(nccl_dso_flag, []() { \ std::call_once(nccl_dso_flag, []() { \
nccl_dso_handle = paddle::platform::dynload::GetNCCLDsoHandle(); \ nccl_dso_handle = paddle::platform::dynload::GetNCCLDsoHandle(); \
}); \ }); \
......
...@@ -36,7 +36,7 @@ extern void* warpctc_dso_handle; ...@@ -36,7 +36,7 @@ extern void* warpctc_dso_handle;
struct DynLoad__##__name { \ struct DynLoad__##__name { \
template <typename... Args> \ template <typename... Args> \
auto operator()(Args... args) -> decltype(__name(args...)) { \ auto operator()(Args... args) -> decltype(__name(args...)) { \
using warpctcFunc = decltype(__name(args...)) (*)(Args...); \ using warpctcFunc = decltype(&::__name); \
std::call_once(warpctc_dso_flag, []() { \ std::call_once(warpctc_dso_flag, []() { \
warpctc_dso_handle = paddle::platform::dynload::GetWarpCTCDsoHandle(); \ warpctc_dso_handle = paddle::platform::dynload::GetWarpCTCDsoHandle(); \
}); \ }); \
......
...@@ -502,11 +502,11 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -502,11 +502,11 @@ All parameter, weight, gradient are variables in Paddle.
const std::unordered_set<std::string> &bcast_vars, const std::unordered_set<std::string> &bcast_vars,
const ProgramDesc &main_program, const std::string &loss_var_name, const ProgramDesc &main_program, const std::string &loss_var_name,
Scope *scope, std::vector<Scope *> &local_scopes, Scope *scope, std::vector<Scope *> &local_scopes,
bool allow_op_delay, bool customize_loss_grad) { bool allow_op_delay, bool use_default_grad_scale) {
new (&self) ParallelExecutor(num_threads, use_event, places, new (&self) ParallelExecutor(
params, bcast_vars, main_program, num_threads, use_event, places, params, bcast_vars,
loss_var_name, scope, local_scopes, main_program, loss_var_name, scope, local_scopes,
allow_op_delay, customize_loss_grad); allow_op_delay, use_default_grad_scale);
}) })
.def("bcast_params", &ParallelExecutor::BCastParamsToGPUs) .def("bcast_params", &ParallelExecutor::BCastParamsToGPUs)
// NOTE: even we return a vec<Scope*>* to Python use reference policy. // NOTE: even we return a vec<Scope*>* to Python use reference policy.
......
...@@ -63,15 +63,9 @@ struct CastToPyBufferImpl<true, I, ARGS...> { ...@@ -63,15 +63,9 @@ struct CastToPyBufferImpl<true, I, ARGS...> {
auto *dst_ptr = static_cast<void *>(dst_tensor.mutable_data<CUR_TYPE>( auto *dst_ptr = static_cast<void *>(dst_tensor.mutable_data<CUR_TYPE>(
tensor.dims(), platform::CPUPlace())); tensor.dims(), platform::CPUPlace()));
platform::DeviceContextPool &pool = paddle::platform::GpuMemcpySync(dst_ptr, src_ptr,
platform::DeviceContextPool::Instance(); sizeof(CUR_TYPE) * tensor.numel(),
auto dev_ctx = static_cast<const platform::CUDADeviceContext *>( cudaMemcpyDeviceToHost);
pool.Get(tensor.place()));
paddle::platform::GpuMemcpyAsync(
dst_ptr, src_ptr, sizeof(CUR_TYPE) * tensor.numel(),
cudaMemcpyDeviceToHost, dev_ctx->stream());
dev_ctx->Wait();
#else #else
PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); PADDLE_THROW("'CUDAPlace' is not supported in CPU only device.");
#endif #endif
...@@ -184,17 +178,8 @@ void PyCUDATensorSetFromArray( ...@@ -184,17 +178,8 @@ void PyCUDATensorSetFromArray(
self->Resize(framework::make_ddim(dims)); self->Resize(framework::make_ddim(dims));
auto *dst = self->mutable_data<T>(place); auto *dst = self->mutable_data<T>(place);
paddle::platform::GpuMemcpySync(dst, array.data(), sizeof(T) * array.size(),
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); cudaMemcpyHostToDevice);
auto dev_ctx =
static_cast<const platform::CUDADeviceContext *>(pool.Get(place));
paddle::platform::GpuMemcpyAsync(dst, array.data(), sizeof(T) * array.size(),
cudaMemcpyHostToDevice, dev_ctx->stream());
// NOTE: For safety, here wait the copy complete.
// It because the CPU array.data() could be destroyed after this method.
// If we make this method async, it could be copied data from a memory buffer
// that has been freed.
dev_ctx->Wait();
} }
template <> template <>
...@@ -214,18 +199,9 @@ void PyCUDATensorSetFromArray( ...@@ -214,18 +199,9 @@ void PyCUDATensorSetFromArray(
self->Resize(framework::make_ddim(dims)); self->Resize(framework::make_ddim(dims));
auto *dst = self->mutable_data<platform::float16>(place); auto *dst = self->mutable_data<platform::float16>(place);
paddle::platform::GpuMemcpySync(dst, array.data(),
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto dev_ctx =
static_cast<const platform::CUDADeviceContext *>(pool.Get(place));
paddle::platform::GpuMemcpyAsync(dst, array.data(),
sizeof(uint16_t) * array.size(), sizeof(uint16_t) * array.size(),
cudaMemcpyHostToDevice, dev_ctx->stream()); cudaMemcpyHostToDevice);
// NOTE: For safety, here wait the copy complete.
// It because the CPU array.data() could be destroyed after this method.
// If we make this method async, it could be copied data from a memory buffer
// that has been freed.
dev_ctx->Wait();
} }
template <typename T> template <typename T>
......
...@@ -155,7 +155,7 @@ EOF ...@@ -155,7 +155,7 @@ EOF
function gen_dockerfile() { function gen_dockerfile() {
# Set BASE_IMAGE according to env variables # Set BASE_IMAGE according to env variables
if [[ ${WITH_GPU} == "ON" ]]; then if [[ ${WITH_GPU} == "ON" ]]; then
BASE_IMAGE="nvidia/cuda:8.0-cudnn7-runtime-ubuntu16.04" BASE_IMAGE="nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04"
else else
BASE_IMAGE="ubuntu:16.04" BASE_IMAGE="ubuntu:16.04"
fi fi
......
...@@ -208,8 +208,8 @@ EOF ...@@ -208,8 +208,8 @@ EOF
--platform=android-$ANDROID_API \ --platform=android-$ANDROID_API \
--install-dir=$ANDROID_STANDALONE_TOOLCHAIN --install-dir=$ANDROID_STANDALONE_TOOLCHAIN
BUILD_ROOT=${PADDLE_ROOT}/build BUILD_ROOT=${PADDLE_ROOT}/build_android
DEST_ROOT={PADDLE_ROOT}/install DEST_ROOT=${PADDLE_ROOT}/install_android
mkdir -p $BUILD_ROOT mkdir -p $BUILD_ROOT
cd $BUILD_ROOT cd $BUILD_ROOT
...@@ -349,13 +349,18 @@ function gen_docs() { ...@@ -349,13 +349,18 @@ function gen_docs() {
======================================== ========================================
EOF EOF
cmake .. \ cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DWITH_DOC=ON \ -DWITH_DOC=ON \
-DWITH_GPU=OFF \ -DWITH_GPU=OFF \
-DWITH_AVX=${WITH_AVX:-ON} \ -DWITH_MKL=OFF \
-DWITH_SWIG_PY=ON \
-DWITH_STYLE_CHECK=OFF -DWITH_STYLE_CHECK=OFF
make -j `nproc` paddle_docs paddle_apis make -j `nproc` paddle_docs paddle_apis
# check websites for broken links
linkchecker doc/v2/en/html/index.html
linkchecker doc/v2/cn/html/index.html
linkchecker doc/v2/api/en/html/index.html
} }
function gen_html() { function gen_html() {
......
...@@ -28,11 +28,16 @@ function start_build_docker() { ...@@ -28,11 +28,16 @@ function start_build_docker() {
docker rm -f "${CONTAINER_ID}" 1>/dev/null docker rm -f "${CONTAINER_ID}" 1>/dev/null
fi fi
apt_mirror='s#http://archive.ubuntu.com/ubuntu#mirror://mirrors.ubuntu.com/mirrors.txt#g'
DOCKER_ENV=$(cat <<EOL DOCKER_ENV=$(cat <<EOL
-e FLAGS_fraction_of_gpu_memory_to_use=0.15 \ -e FLAGS_fraction_of_gpu_memory_to_use=0.15 \
-e CTEST_OUTPUT_ON_FAILURE=1 \ -e CTEST_OUTPUT_ON_FAILURE=1 \
-e CTEST_PARALLEL_LEVEL=5 \ -e CTEST_PARALLEL_LEVEL=5 \
-e APT_MIRROR=${apt_mirror} \
-e WITH_GPU=ON \ -e WITH_GPU=ON \
-e CUDA_ARCH_NAME=Auto \
-e WITH_AVX=ON \
-e WITH_GOLANG=OFF \
-e WITH_TESTING=ON \ -e WITH_TESTING=ON \
-e WITH_C_API=OFF \ -e WITH_C_API=OFF \
-e WITH_COVERAGE=ON \ -e WITH_COVERAGE=ON \
...@@ -42,18 +47,23 @@ function start_build_docker() { ...@@ -42,18 +47,23 @@ function start_build_docker() {
-e PADDLE_FRACTION_GPU_MEMORY_TO_USE=0.15 \ -e PADDLE_FRACTION_GPU_MEMORY_TO_USE=0.15 \
-e CUDA_VISIBLE_DEVICES=0,1 \ -e CUDA_VISIBLE_DEVICES=0,1 \
-e WITH_DISTRIBUTE=ON \ -e WITH_DISTRIBUTE=ON \
-e WITH_FLUID_ONLY=ON \
-e RUN_TEST=ON -e RUN_TEST=ON
EOL EOL
) )
DOCKER_CMD="nvidia-docker"
if ! [ -x "$(command -v ${DOCKER_CMD})" ]; then
DOCKER_CMD="docker"
fi
set -x set -x
nvidia-docker run -it \ ${DOCKER_CMD} run -it \
-d \
--name $CONTAINER_ID \ --name $CONTAINER_ID \
${DOCKER_ENV} \ ${DOCKER_ENV} \
-v $PADDLE_ROOT:/paddle \ -v $PADDLE_ROOT:/paddle \
-w /paddle \ -w /paddle \
$IMG \ $IMG \
/bin/bash paddle/scripts/paddle_build.sh $@
set +x set +x
} }
...@@ -67,23 +77,7 @@ function main() { ...@@ -67,23 +77,7 @@ function main() {
VERSION="latest-dev-android" VERSION="latest-dev-android"
fi fi
IMG=${DOCKER_REPO}:${VERSION} IMG=${DOCKER_REPO}:${VERSION}
start_build_docker $@
case $1 in
start)
start_build_docker
;;
build_android)
start_build_docker
docker exec ${CONTAINER_ID} bash -c "./paddle/scripts/paddle_build.sh $@"
*)
if container_running "${CONTAINER_ID}"; then
docker exec ${CONTAINER_ID} bash -c "./paddle/scripts/paddle_build.sh $@"
else
echo "Please start container first, with command:"
echo "$0 start"
fi
;;
esac
} }
main $@ main $@
...@@ -12,10 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,10 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <chrono> #include <gflags/gflags.h> // NOLINT
#include <gtest/gtest.h> // NOLINT
#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/CustomStackTrace.h"
#include "paddle/utils/Locks.h" #include "paddle/utils/Locks.h"
...@@ -39,12 +37,8 @@ void testNormalImpl( ...@@ -39,12 +37,8 @@ void testNormalImpl(
threads.reserve(FLAGS_test_thread_num); threads.reserve(FLAGS_test_thread_num);
for (int32_t i = 0; i < FLAGS_test_thread_num; ++i) { for (int32_t i = 0; i < FLAGS_test_thread_num; ++i) {
threads.emplace_back(new std::thread([&tracer, threads.emplace_back(
&countDown, new std::thread([&tracer, &startBarrier, &doneBarrier, &callback] {
&layerSize,
&startBarrier,
&doneBarrier,
&callback] {
callback(tracer, countDown, layerSize, startBarrier, doneBarrier); callback(tracer, countDown, layerSize, startBarrier, doneBarrier);
})); }));
} }
......
...@@ -20,6 +20,16 @@ from framework import * ...@@ -20,6 +20,16 @@ from framework import *
import executor import executor
from executor import * from executor import *
import trainer
from trainer import Trainer
from trainer import Event
import inferencer
from inferencer import Inferencer
import params
from params import Params
import io import io
import evaluator import evaluator
import initializer import initializer
...@@ -47,7 +57,8 @@ from parallel_executor import ParallelExecutor ...@@ -47,7 +57,8 @@ from parallel_executor import ParallelExecutor
Tensor = LoDTensor Tensor = LoDTensor
__all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ +\
trainer.__all__ + inferencer.__all__ + params.__all__ + [
'io', 'io',
'initializer', 'initializer',
'layers', 'layers',
...@@ -111,7 +122,9 @@ def __bootstrap__(): ...@@ -111,7 +122,9 @@ def __bootstrap__():
'eager_delete_scope' 'eager_delete_scope'
] ]
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
read_env_flags += ['fraction_of_gpu_memory_to_use'] read_env_flags += [
'fraction_of_gpu_memory_to_use', 'cudnn_algo_use_autotune'
]
core.init_gflags([sys.argv[0]] + core.init_gflags([sys.argv[0]] +
["--tryfromenv=" + ",".join(read_env_flags)]) ["--tryfromenv=" + ",".join(read_env_flags)])
core.init_glog(sys.argv[0]) core.init_glog(sys.argv[0])
......
...@@ -143,7 +143,8 @@ class DistributeTranspiler: ...@@ -143,7 +143,8 @@ class DistributeTranspiler:
program=None, program=None,
pservers="127.0.0.1:6174", pservers="127.0.0.1:6174",
trainers=1, trainers=1,
split_method=splitter.round_robin): split_method=splitter.round_robin,
sync_mode=True):
""" """
Transpile the program to distributed data-parallelism programs. Transpile the program to distributed data-parallelism programs.
The main_program will be transformed to use a remote parameter server The main_program will be transformed to use a remote parameter server
...@@ -184,6 +185,9 @@ class DistributeTranspiler: ...@@ -184,6 +185,9 @@ class DistributeTranspiler:
:param split_method: A function to determin how to split variables :param split_method: A function to determin how to split variables
to different servers equally. to different servers equally.
:type split_method: function :type split_method: function
:type sync_mode: boolean default True
:param sync_mode: if sync_mode is set True, it means that dist transpiler
will transpile the program into sync_mode pserver and trainer program.
""" """
assert (callable(split_method)) assert (callable(split_method))
if program is None: if program is None:
...@@ -191,6 +195,7 @@ class DistributeTranspiler: ...@@ -191,6 +195,7 @@ class DistributeTranspiler:
self.origin_program = program self.origin_program = program
self.trainer_num = trainers self.trainer_num = trainers
self.optimize_ops = optimize_ops self.optimize_ops = optimize_ops
self.sync_mode = sync_mode
# TODO(typhoonzero): currently trainer_id is fetched from cluster system # TODO(typhoonzero): currently trainer_id is fetched from cluster system
# like Kubernetes, we should port this to use etcd later when developing # like Kubernetes, we should port this to use etcd later when developing
# fluid distributed training with fault-tolerance. # fluid distributed training with fault-tolerance.
...@@ -295,8 +300,11 @@ class DistributeTranspiler: ...@@ -295,8 +300,11 @@ class DistributeTranspiler:
inputs={"X": send_inputs}, inputs={"X": send_inputs},
outputs={"Out": send_outputs, outputs={"Out": send_outputs,
"RPCClient": rpc_client_var}, "RPCClient": rpc_client_var},
attrs={"endpoints": pserver_endpoints, attrs={
"epmap": eplist}) "endpoints": pserver_endpoints,
"epmap": eplist,
"sync_mode": self.sync_mode
})
# step4: Concat the parameters splits together after recv. # step4: Concat the parameters splits together after recv.
for varname, splited_var in param_var_mapping.iteritems(): for varname, splited_var in param_var_mapping.iteritems():
if len(splited_var) <= 1: if len(splited_var) <= 1:
...@@ -356,7 +364,7 @@ class DistributeTranspiler: ...@@ -356,7 +364,7 @@ class DistributeTranspiler:
type=v.type, type=v.type,
dtype=v.dtype, dtype=v.dtype,
shape=v.shape) shape=v.shape)
if self.trainer_num > 1: if self.sync_mode and self.trainer_num > 1:
for trainer_id in xrange(self.trainer_num): for trainer_id in xrange(self.trainer_num):
var = pserver_program.global_block().create_var( var = pserver_program.global_block().create_var(
name="%s.trainer_%d" % (orig_var_name, trainer_id), name="%s.trainer_%d" % (orig_var_name, trainer_id),
...@@ -402,13 +410,13 @@ class DistributeTranspiler: ...@@ -402,13 +410,13 @@ class DistributeTranspiler:
for op in self.optimize_ops: for op in self.optimize_ops:
if op.type == "scale": if op.type == "scale":
for in_name in op.input_arg_names: for in_name in op.input_arg_names:
if in_name.startswith("beta1_pow_acc") or\ if in_name.startswith("beta1_pow_acc") or \
in_name.startswith("beta2_pow_acc"): in_name.startswith("beta2_pow_acc"):
global_ops.append(op) global_ops.append(op)
def __append_optimize_op__(op, block): def __append_optimize_op__(op, block, grad_to_block_id):
if self._is_opt_op(op): if self._is_opt_op(op):
self._append_pserver_ops(block, op, endpoint, self._append_pserver_ops(block, op, endpoint, grad_to_block_id,
default_main_program()) default_main_program())
else: else:
self._append_pserver_non_opt_ops(block, op) self._append_pserver_non_opt_ops(block, op)
...@@ -422,21 +430,22 @@ class DistributeTranspiler: ...@@ -422,21 +430,22 @@ class DistributeTranspiler:
self._append_pserver_non_opt_ops(lr_decay_block, op) self._append_pserver_non_opt_ops(lr_decay_block, op)
# append op to the current block # append op to the current block
grad_to_block_id = []
pre_block_idx = pserver_program.num_blocks - 1 pre_block_idx = pserver_program.num_blocks - 1
for idx, opt_op in enumerate(opt_op_on_pserver): for idx, opt_op in enumerate(opt_op_on_pserver):
per_opt_block = pserver_program.create_block(pre_block_idx) per_opt_block = pserver_program.create_block(pre_block_idx)
for _, op in enumerate(self.optimize_ops): for _, op in enumerate(self.optimize_ops):
# optimizer is connected to itself # optimizer is connected to itself
if ufind.is_connected(op, opt_op) and op not in global_ops: if ufind.is_connected(op, opt_op) and op not in global_ops:
__append_optimize_op__(op, per_opt_block) __append_optimize_op__(op, per_opt_block, grad_to_block_id)
# append global ops # append global ops
opt_state_block = None
if global_ops: if global_ops:
opt_state_block = pserver_program.create_block( opt_state_block = pserver_program.create_block(
pserver_program.num_blocks - 1) pserver_program.num_blocks - 1)
for glb_op in global_ops: for glb_op in global_ops:
__append_optimize_op__(glb_op, opt_state_block) __append_optimize_op__(glb_op, opt_state_block,
grad_to_block_id)
# NOT USED: single block version: # NOT USED: single block version:
# #
...@@ -472,7 +481,9 @@ class DistributeTranspiler: ...@@ -472,7 +481,9 @@ class DistributeTranspiler:
"OptimizeBlock": pserver_program.block(1), "OptimizeBlock": pserver_program.block(1),
"endpoint": endpoint, "endpoint": endpoint,
"Fanin": self.trainer_num, "Fanin": self.trainer_num,
"PrefetchBlock": prefetch_block "PrefetchBlock": prefetch_block,
"sync_mode": self.sync_mode,
"grad_to_block_id": grad_to_block_id
}) })
pserver_program.sync_with_cpp() pserver_program.sync_with_cpp()
...@@ -683,6 +694,16 @@ class DistributeTranspiler: ...@@ -683,6 +694,16 @@ class DistributeTranspiler:
self.table_name)], self.table_name)],
persistable=False) persistable=False)
# create table optimize block in pserver program
table_opt_op = [
op for op in self.optimize_ops
if op.input("Param")[0] == self.table_name
][0]
table_opt_block = pserver_program.create_block(pre_block_idx)
# only support sgd now
assert table_opt_op.type == "sgd"
if self.sync_mode:
# create grad vars in pserver program # create grad vars in pserver program
table_grad_var = self.table_param_grad[1] table_grad_var = self.table_param_grad[1]
table_grad_list = [ table_grad_list = [
...@@ -691,18 +712,10 @@ class DistributeTranspiler: ...@@ -691,18 +712,10 @@ class DistributeTranspiler:
(table_grad_var.name, index, pserver_index), (table_grad_var.name, index, pserver_index),
type=table_grad_var.type, type=table_grad_var.type,
shape=table_grad_var.shape, shape=table_grad_var.shape,
dtype=table_grad_var.dtype) for index in range(self.trainer_num) dtype=table_grad_var.dtype)
for index in range(self.trainer_num)
] ]
# create table optimize block in pserver program
table_opt_op = [
op for op in self.optimize_ops
if op.input("Param")[0] == self.table_name
][0]
table_opt_block = pserver_program.create_block(pre_block_idx)
# only support sgd now
assert table_opt_op.type == "sgd"
# append sum op for table_grad_list # append sum op for table_grad_list
table_opt_block.append_op( table_opt_block.append_op(
type="sum", type="sum",
...@@ -746,7 +759,7 @@ class DistributeTranspiler: ...@@ -746,7 +759,7 @@ class DistributeTranspiler:
for varname, splited in block_map.iteritems(): for varname, splited in block_map.iteritems():
orig_var = program.global_block().var(varname) orig_var = program.global_block().var(varname)
if len(splited) == 1: if len(splited) == 1:
if add_trainer_suffix: if self.sync_mode and add_trainer_suffix:
new_var_name = "%s.trainer_%d" % \ new_var_name = "%s.trainer_%d" % \
(orig_var.name, self.trainer_id) (orig_var.name, self.trainer_id)
program.global_block().rename_var(varname, new_var_name) program.global_block().rename_var(varname, new_var_name)
...@@ -770,7 +783,7 @@ class DistributeTranspiler: ...@@ -770,7 +783,7 @@ class DistributeTranspiler:
if len(orig_shape) >= 2: if len(orig_shape) >= 2:
splited_shape.extend(orig_shape[1:]) splited_shape.extend(orig_shape[1:])
new_var_name = "" new_var_name = ""
if add_trainer_suffix: if self.sync_mode and add_trainer_suffix:
new_var_name = "%s.block%d.trainer_%d" % \ new_var_name = "%s.block%d.trainer_%d" % \
(varname, i, self.trainer_id) (varname, i, self.trainer_id)
else: else:
...@@ -879,7 +892,7 @@ class DistributeTranspiler: ...@@ -879,7 +892,7 @@ class DistributeTranspiler:
return orig_var_name return orig_var_name
def _append_pserver_ops(self, optimize_block, opt_op, endpoint, def _append_pserver_ops(self, optimize_block, opt_op, endpoint,
origin_program): grad_to_block_id, origin_program):
program = optimize_block.program program = optimize_block.program
pserver_block = program.global_block() pserver_block = program.global_block()
new_inputs = dict() new_inputs = dict()
...@@ -900,7 +913,9 @@ class DistributeTranspiler: ...@@ -900,7 +913,9 @@ class DistributeTranspiler:
return return
merged_var = \ merged_var = \
pserver_block.vars[self._orig_varname(grad_block.name)] pserver_block.vars[self._orig_varname(grad_block.name)]
if self.trainer_num > 1: grad_to_block_id.append(merged_var.name + ":" + str(
optimize_block.idx))
if self.sync_mode and self.trainer_num > 1:
vars2merge = [] vars2merge = []
for i in xrange(self.trainer_num): for i in xrange(self.trainer_num):
per_trainer_name = "%s.trainer_%d" % \ per_trainer_name = "%s.trainer_%d" % \
...@@ -918,6 +933,7 @@ class DistributeTranspiler: ...@@ -918,6 +933,7 @@ class DistributeTranspiler:
inputs={"X": merged_var}, inputs={"X": merged_var},
outputs={"Out": merged_var}, outputs={"Out": merged_var},
attrs={"scale": 1.0 / float(self.trainer_num)}) attrs={"scale": 1.0 / float(self.trainer_num)})
new_inputs[key] = merged_var new_inputs[key] = merged_var
elif key == "Param": elif key == "Param":
# param is already created on global program # param is already created on global program
......
...@@ -658,10 +658,10 @@ class Operator(object): ...@@ -658,10 +658,10 @@ class Operator(object):
class Block(object): class Block(object):
def __init__(self, program, idx): def __init__(self, program, idx):
self.desc = program.desc.block(idx) self.desc = program.desc.block(idx)
self.vars = dict() # var_name --> var self.vars = collections.OrderedDict() # var_name --> var
self.ops = list() # operator list self.ops = list() # operator list
self.program = program self.program = program
self.removed_vars = dict() self.removed_vars = collections.OrderedDict()
def __str__(self): def __str__(self):
return self.to_string(True) return self.to_string(True)
...@@ -1070,16 +1070,25 @@ class Program(object): ...@@ -1070,16 +1070,25 @@ class Program(object):
for t in targets: for t in targets:
if not isinstance(t, Operator): if not isinstance(t, Operator):
if isinstance(t, Variable): if isinstance(t, Variable):
if t.op is None: # After transpiler processing, the op that output this
# variable maybe has been changed, so t.op is not reliable
# and we need to find the current op that generate this
# variable here.
t.op = None
global_block = self.global_block() global_block = self.global_block()
for op in global_block.ops: for idx, op in enumerate(global_block.ops):
if t.name in op.output_arg_names: if t.name in op.output_arg_names:
t.op = op t.op = op
break break
t = t.op t = t.op
if t is None:
raise ValueError(
"The target variable must have an "
"associated operator that generates it.")
else: else:
raise ValueError(("All targets of prune() can only be " raise ValueError("All targets of prune() can only be "
"Variable or Operator.")) "Variable or Operator.")
targets_idx.append([t.block.idx, t.idx]) targets_idx.append([t.block.idx, t.idx])
res = Program() res = Program()
......
...@@ -121,7 +121,60 @@ class InferenceTranspiler: ...@@ -121,7 +121,60 @@ class InferenceTranspiler:
# And a better solution will be considered later. # And a better solution will be considered later.
program = program.clone() program = program.clone()
def float16_transpile(self, program, place, scope=None):
'''
Transpile the program desc and cast the weights to float16 data type to
enable float16 inference.
Since the operator in a program desc will automatically choose the
right compute kernel to run based on the data type of the input tensor.
We actually don't need to change the program desc to run in float16 mode.
However, in this way, users who are used to feeding and fetching tensors
of float32 data type when running typical inference may find it confusing
and difficult to run inference in float16 mode as they need to convert
input data to float16 dtype and then convert the results back to float32
dtype to match the rest of code.
So this function appends cast ops to the program desc where necessary so
that users are able to run inference in float16 mode while providing input
tensor (feed_holder) of float data type and obtaining output tensor
(fetch_holder) of float data type.
Moreover, it is desired that when we have the scope and program desc to run
inference in float32 mode, we can use a single API to do the necessary
modification and then user can run float16 inference on the fly. To make
this happen, this function also create new parameters in the scope to have the
converted float16 weights and change the operators in program desc to use
these new parameters.
:param program: program to transpile
:type program: Program
:param place: inference place
:type place: Place
:param scope: inference scope
:type scope: Scope
'''
if scope is None:
scope = global_scope()
self.scope = scope
self.place = place
self.block = program.block(0)
self.input_map = {} # store the input names should be adjusted
self._modify_feed_fetch()
self._convert_param_to_float16()
self._adjust_input(skip=True)
self._remove_unused_var()
# TODO(luotao): use clone() method to flush the program.desc in force,
# since some large program.desc will not be flushed immediately.
# And a better solution will be considered later.
program = program.clone()
# ====================== private transpiler functions ===================== # ====================== private transpiler functions =====================
def _insert_bias_op(self, index, current_op, bn_op): def _insert_bias_op(self, index, current_op, bn_op):
''' '''
Construct elementwise_add operator for adding bias Construct elementwise_add operator for adding bias
...@@ -216,9 +269,27 @@ class InferenceTranspiler: ...@@ -216,9 +269,27 @@ class InferenceTranspiler:
# collect the renamed input # collect the renamed input
self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0] self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0]
def _adjust_input(self): def _adjust_input(self, skip=False):
'''
Change the input variable name in operators.
When we are in the process of modifying a program desc, we usually
replace some variables with some other variables, where we create
a dictionary input_map to record the one-to-one correspondence
between each old variable and the new one.
After that, this function will search all the operators that use the
old variables and change the info in op to use the new variables. There
maybe some exceptions to this rule when we are using the float16 transpiler
and insert cast ops to cast float32 variable to float16 one. After we
insert the cast op to cast var_1 to var_1_fp16, we don't want to change
the input of cast op to var_1_fp16 after using this function.
'''
skip_ops = {"cast"}
for i in range(len(self.block.ops)): for i in range(len(self.block.ops)):
current_op = self.block.ops[i] current_op = self.block.ops[i]
if skip and current_op.type in skip_ops:
continue
for input_arg in current_op.input_arg_names: for input_arg in current_op.input_arg_names:
if input_arg in self.input_map: if input_arg in self.input_map:
current_op.rename_input(input_arg, current_op.rename_input(input_arg,
...@@ -238,3 +309,138 @@ class InferenceTranspiler: ...@@ -238,3 +309,138 @@ class InferenceTranspiler:
for var in self.block.vars.keys(): for var in self.block.vars.keys():
if var not in args: if var not in args:
self.block.remove_var(var) self.block.remove_var(var)
def _modify_feed_fetch(self):
'''
Modify feed fetch op/vars for float16 inference.
For each feed op:
feed_op->feed_target_var
Change it to:
feed_op->feed_target_var->cast_op(from other dtype to float16)->tmp_var
For each fetch op:
fetch_target_var->fetch_op
Change it to:
tmp_var->cast_op(from float16 to other dtype)->fetch_target_var->fetch_op
:return: None
'''
def find_op(var):
# It is possible that var.op is not up to date after some
# modifications to program desc. Here we force to make it up to date.
var.op = None
for op in self.block.ops:
if var.name in op.output_arg_names:
var.op = op
break
if var.op is None:
raise ValueError("The target variable must have an "
"associated operator that generates it.")
i = 0
while i < len(self.block.ops):
cur_op = self.block.ops[i]
if cur_op.type == "feed":
var_name = cur_op.output("Out")[0]
tmp_var_name = var_name + ".fp16"
var = self.block.vars[var_name]
tmp_var = self.block.create_var(
name=tmp_var_name.encode('ascii'),
type=var.type,
dtype=core.VarDesc.VarType.FP16,
shape=var.shape,
persistable=var.persistable)
self.block.insert_op(
i + 1,
type="cast",
inputs={"X": var},
outputs={"Out": tmp_var},
attrs={
'in_dtype': int(var.dtype),
'out_dtype': int(tmp_var.dtype)
})
self.input_map[var_name] = tmp_var_name
i = i + 1
elif cur_op.type == "fetch":
var_name = cur_op.input("X")[0]
tmp_var_name = var_name + ".fp16"
var = self.block.vars[var_name]
tmp_var = self.block.create_var(
name=tmp_var_name.encode('ascii'),
type=var.type,
dtype=core.VarDesc.VarType.FP16,
shape=var.shape,
persistable=var.persistable)
find_op(var)
var.op.rename_output(var_name, tmp_var_name)
self.block.insert_op(
i,
type="cast",
inputs={"X": tmp_var},
outputs={"Out": var},
attrs={
'in_dtype': int(tmp_var.dtype),
'out_dtype': int(var.dtype)
})
i = i + 1
i = i + 1
def _convert_param_to_float16(self):
def _get_no_fp16_conversion_var_names():
'''
Get the set of input variable names that shouldn't be converted to float16.
When we want to run inference in float16 mode, most parameters need to be
firstly converted to float16. However, there are some parameters that
shouldn't be converted to float16 because the corresponding operator
requires float32 parameters even in float16 mode (when the input data is
of float16 data type). Currently, the only operator that has this exclusion
is the batch norm op.
:return: set of input variable names
:type var_names: set
'''
op_names = {'batch_norm'}
var_names = []
for op in self.block.ops:
if op.type in op_names:
var_names += op.input_arg_names
return set(var_names)
def _should_be_converted(var):
return var.persistable and \
var.name not in self.no_conversion_vars and \
var.type != core.VarDesc.VarType.FEED_MINIBATCH and \
var.type != core.VarDesc.VarType.FETCH_LIST
self.no_conversion_vars = _get_no_fp16_conversion_var_names()
conversion_var_list = filter(_should_be_converted,
self.block.vars.values())
for var in conversion_var_list:
fp16_var_name = var.name + ".fp16"
fp16_var = self.block.create_parameter(
name=fp16_var_name.encode('ascii'),
type=var.type,
dtype=core.VarDesc.VarType.FP16,
shape=var.shape)
# cast the data in the tensor of the original var to float16
# data type and store it in the tensor of the new float16 var
self.scope.var(fp16_var_name)
fp16_tensor = self.scope.find_var(fp16_var_name).get_tensor()
tensor = np.array(self.scope.find_var(var.name).get_tensor())
# After the old tensor data is converted to np.float16, view(np.uint16)
# is used so that the internal memory of the numpy array will be
# reinterpreted to be of np.uint16 data type, which is binded to fluid
# float16 data type via the help of pybind in tensor_py.h.
fp16_tensor.set(
tensor.astype(np.float16).view(np.uint16), self.place)
# old var will be replaced by the fp16 var in program desc
self.input_map[var.name] = fp16_var_name
self.block.remove_var(var.name)
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['Inferencer', ]
class Inferencer(object):
def __init__(self, network_func, params, place=None):
# 1. we need to generate a framework.Program by calling
# network_func. Reference: fluid.program_guard in test_word2vec.py
# 2. move the default_main_program to self.program.
# 3. run the default_startup program.
self.params = params
self.place = place
def infer(self, inputs):
# run self.program
pass
...@@ -336,7 +336,7 @@ def save_inference_model(dirname, ...@@ -336,7 +336,7 @@ def save_inference_model(dirname,
if main_program is None: if main_program is None:
main_program = default_main_program() main_program = default_main_program()
copy_program = main_program copy_program = main_program.clone()
if not os.path.isdir(dirname): if not os.path.isdir(dirname):
os.makedirs(dirname) os.makedirs(dirname)
......
...@@ -79,6 +79,7 @@ __all__ = [ ...@@ -79,6 +79,7 @@ __all__ = [
'lrn', 'lrn',
'pad', 'pad',
'label_smooth', 'label_smooth',
'roi_pool',
] ]
...@@ -3759,3 +3760,53 @@ def label_smooth(label, ...@@ -3759,3 +3760,53 @@ def label_smooth(label,
outputs={"Out": smooth_label}, outputs={"Out": smooth_label},
attrs={"epsilon": float(epsilon)}) attrs={"epsilon": float(epsilon)})
return smooth_label return smooth_label
def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0):
"""
Region of interest pooling (also known as RoI pooling) is to perform
is to perform max pooling on inputs of nonuniform sizes to obtain
fixed-size feature maps (e.g. 7*7).
The operator has three steps:
1. Dividing each region proposal into equal-sized sections with
the pooled_width and pooled_height
2. Finding the largest value in each section
3. Copying these max values to the output buffer
Args:
input (Variable): The input for ROI pooling.
rois (Variable): ROIs (Regions of Interest) to pool over. It should
be a 2-D one level LoTensor of shape [num_rois, 4].
The layout is [x1, y1, x2, y2], where (x1, y1)
is the top left coordinates, and (x2, y2) is the
bottom right coordinates. The num_rois is the
total number of ROIs in this batch data.
pooled_height (integer): The pooled output height. Default: 1
pooled_width (integer): The pooled output width. Default: 1
spatial_scale (float): Multiplicative spatial scale factor. To
translate ROI coords from their input scale
to the scale used when pooling. Default: 1.0
Returns:
pool_out (Variable): The output is a 4-D tensor of the shape
(num_rois, channels, pooled_h, pooled_w).
Examples:
pool_out = fluid.layers.roi_pool(input=x, rois=rois, 7, 7, 1.0)
"""
helper = LayerHelper('roi_pool', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype)
argmaxes = helper.create_tmp_variable(dtype='int32')
helper.append_op(
type="roi_pool",
inputs={"X": input,
"ROIs": rois},
outputs={"Out": pool_out,
"Argmax": argmaxes},
attrs={
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale
})
return pool_out
...@@ -193,10 +193,7 @@ def assign(input, output): ...@@ -193,10 +193,7 @@ def assign(input, output):
helper = LayerHelper('assign', **locals()) helper = LayerHelper('assign', **locals())
if isinstance(input, Variable): if isinstance(input, Variable):
helper.append_op( helper.append_op(
type='scale', type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
inputs={'X': [input]},
outputs={'Out': [output]},
attrs={'scale': 1.0})
elif isinstance(input, numpy.ndarray): elif isinstance(input, numpy.ndarray):
dtype = convert_np_dtype_to_dtype_(input.dtype) dtype = convert_np_dtype_to_dtype_(input.dtype)
if dtype == VarDesc.VarType.FP32: if dtype == VarDesc.VarType.FP32:
......
...@@ -30,7 +30,7 @@ class ParallelExecutor(object): ...@@ -30,7 +30,7 @@ class ParallelExecutor(object):
num_threads=None, num_threads=None,
allow_op_delay=False, allow_op_delay=False,
share_vars_from=None, share_vars_from=None,
customize_loss_grad=False): use_default_grad_scale=True):
""" """
ParallelExecutor can run program in parallel. ParallelExecutor can run program in parallel.
...@@ -46,6 +46,11 @@ class ParallelExecutor(object): ...@@ -46,6 +46,11 @@ class ParallelExecutor(object):
improve performance in some cases, defalut False. improve performance in some cases, defalut False.
share_vars_from(ParallelExecutor, default None): If provied, share_vars_from(ParallelExecutor, default None): If provied,
it will share variables from the specified ParallelExecutor. it will share variables from the specified ParallelExecutor.
use_default_grad_scale(bool, default True): If set True, a default
scale value equal to `1./device_count` would be multiplied to
gradients of each device and scaled gradients would be
aggregated. Otherwise, a customized scale value should be fed
to the network.
Returns: Returns:
A ParallelExecutor object. A ParallelExecutor object.
...@@ -124,7 +129,7 @@ class ParallelExecutor(object): ...@@ -124,7 +129,7 @@ class ParallelExecutor(object):
scope, scope,
local_scopes, local_scopes,
allow_op_delay, allow_op_delay,
customize_loss_grad) use_default_grad_scale)
self.scope = scope self.scope = scope
def run(self, fetch_list, feed=None, feed_dict=None): def run(self, fetch_list, feed=None, feed_dict=None):
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
__all__ = ['Params', ]
class Params(object):
def __init__(self, path=None):
self.scope = core.Scope()
if path:
self._load(path)
def _load(self, path):
# reference: load_persistables in io.py
pass
def save(self, path):
# reference: save_persistables in io.py
pass
def add_params(self, scope):
# take the keys from the scope,
# if not already exists in self.scope,
# add the key and value into self.scope.
pass
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import numpy
def resnet_cifar10(input, depth=32):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
def shortcut(input, ch_in, ch_out, stride):
if ch_in != ch_out:
return conv_bn_layer(input, ch_out, 1, stride, 0, None)
else:
return input
def basicblock(input, ch_in, ch_out, stride):
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True)
short = shortcut(input, ch_in, ch_out, stride)
return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
tmp = block_func(input, ch_in, ch_out, stride)
for i in range(1, count):
tmp = block_func(tmp, ch_out, ch_out, 1)
return tmp
assert (depth - 2) % 6 == 0
n = (depth - 2) / 6
conv1 = conv_bn_layer(
input=input, ch_out=16, filter_size=3, stride=1, padding=1)
res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)
res2 = layer_warp(basicblock, res1, 16, 32, n, 2)
res3 = layer_warp(basicblock, res2, 32, 64, n, 2)
pool = fluid.layers.pool2d(
input=res3, pool_size=8, pool_type='avg', pool_stride=1)
return pool
def inference_network():
classdim = 10
data_shape = [3, 32, 32]
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
net = resnet_cifar10(images, 32)
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
return predict
def train_network():
predict = inference_network()
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=predict, label=label)
return avg_cost, accuracy
def train(use_cuda, save_path):
BATCH_SIZE = 128
EPOCH_NUM = 1
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)
def event_handler(event):
if isinstance(event, fluid.EndIteration):
if (event.batch_id % 10) == 0:
avg_cost, accuracy = trainer.test(reader=test_reader)
print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format(
event.batch_id + 1, avg_cost, accuracy))
if accuracy > 0.01: # Low threshold for speeding up CI
trainer.params.save(save_path)
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
train_network,
optimizer=fluid.optimizer.Adam(learning_rate=0.001),
place=place,
event_handler=event_handler)
trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler)
def infer(use_cuda, save_path):
params = fluid.Params(save_path)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(inference_network, params, place=place)
# The input's dimension of conv should be 4-D or 5-D.
# Use normilized image pixels as input data, which should be in the range
# [0, 1.0].
tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32")
results = inferencer.infer({'pixel': tensor_img})
print("infer results: ", results)
def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
save_path = "image_classification_resnet.inference.model"
train(use_cuda, save_path)
infer(use_cuda, save_path)
if __name__ == '__main__':
for use_cuda in (False, True):
main(use_cuda=use_cuda)
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import numpy
def vgg16_bn_drop(input):
def conv_block(input, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=input,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')
conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=4096, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=4096, act=None)
return fc2
def inference_network():
classdim = 10
data_shape = [3, 32, 32]
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
net = vgg16_bn_drop(images)
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
return predict
def train_network():
predict = inference_network()
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=predict, label=label)
return avg_cost, accuracy
def train(use_cuda, save_path):
BATCH_SIZE = 128
EPOCH_NUM = 1
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)
def event_handler(event):
if isinstance(event, fluid.EndIteration):
if (event.batch_id % 10) == 0:
avg_cost, accuracy = trainer.test(reader=test_reader)
print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format(
event.batch_id + 1, avg_cost, accuracy))
if accuracy > 0.01: # Low threshold for speeding up CI
trainer.params.save(save_path)
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
train_network,
optimizer=fluid.optimizer.Adam(learning_rate=0.001),
place=place,
event_handler=event_handler)
trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler)
def infer(use_cuda, save_path):
params = fluid.Params(save_path)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(inference_network, params, place=place)
# The input's dimension of conv should be 4-D or 5-D.
# Use normilized image pixels as input data, which should be in the range
# [0, 1.0].
tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32")
results = inferencer.infer({'pixel': tensor_img})
print("infer results: ", results)
def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
save_path = "image_classification_vgg.inference.model"
train(use_cuda, save_path)
infer(use_cuda, save_path)
if __name__ == '__main__':
for use_cuda in (False, True):
main(use_cuda=use_cuda)
...@@ -252,6 +252,26 @@ def infer(use_cuda, save_dirname=None): ...@@ -252,6 +252,26 @@ def infer(use_cuda, save_dirname=None):
fetch_targets, exe, fetch_targets, exe,
inference_transpiler_program) inference_transpiler_program)
if use_cuda and fluid.core.is_float16_supported(place):
# Use float16_transpiler to speedup
fp16_transpiler_program = inference_transpiler_program.clone()
t.float16_transpile(fp16_transpiler_program, place)
fp16_results = exe.run(fp16_transpiler_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
assert len(results[0]) == len(fp16_results[0])
for i in range(len(results[0])):
np.testing.assert_almost_equal(
results[0][i], fp16_results[0][i], decimal=2)
print("float16 infer results: ", fp16_results[0])
fluid.io.save_inference_model("float16_" + save_dirname,
feed_target_names, fetch_targets, exe,
fp16_transpiler_program)
def main(net_type, use_cuda, is_local=True): def main(net_type, use_cuda, is_local=True):
if use_cuda and not fluid.core.is_compiled_with_cuda(): if use_cuda and not fluid.core.is_compiled_with_cuda():
......
...@@ -275,10 +275,7 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference): ...@@ -275,10 +275,7 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference):
class TestBatchNormOpTraining(unittest.TestCase): class TestBatchNormOpTraining(unittest.TestCase):
def __assert_close(self, tensor, np_array, msg, atol=1e-4): def __assert_close(self, tensor, np_array, msg, atol=1e-4):
if not np.allclose(np.array(tensor), np_array, atol=atol): np.allclose(np.array(tensor), np_array, atol=atol)
import pdb
pdb.set_trace()
self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg)
def test_forward_backward(self): def test_forward_backward(self):
def test_with_place(place, data_layout, shape): def test_with_place(place, data_layout, shape):
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import unittest import unittest
import numpy as np import numpy as np
import numpy.random as random
import sys import sys
import math import math
from op_test import OpTest from op_test import OpTest
...@@ -25,14 +26,27 @@ class TestIOUSimilarityOp(OpTest): ...@@ -25,14 +26,27 @@ class TestIOUSimilarityOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "iou_similarity" self.op_type = "iou_similarity"
self.boxes1 = np.array( self.boxes1 = random.rand(2, 4).astype('float32')
[[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]).astype('float32') self.boxes2 = random.rand(3, 4).astype('float32')
self.boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], self.output = random.rand(2, 3).astype('float32')
[0.0, 0.0, 20.0, 20.0]]).astype('float32') for row in range(self.boxes1.shape[0]):
self.output = np.array( for col in range(self.boxes2.shape[0]):
[[2.0 / 16.0, 0, 6.0 / 400.0], xmin1, ymin1, xmax1, ymax1 = self.boxes1[row]
[1.0 / 16.0, 0.0, 5.0 / 400.0]]).astype('float32') xmin2, ymin2, xmax2, ymax2 = self.boxes2[col]
area1 = (ymax1 - ymin1) * (xmax1 - xmin1)
area2 = (ymax2 - ymin2) * (xmax2 - xmin2)
inter_xmax = min(xmax1, xmax2)
inter_ymax = min(ymax1, ymax2)
inter_xmin = max(xmin1, xmin2)
inter_ymin = max(ymin1, ymin2)
inter_height = inter_ymax - inter_ymin
inter_width = inter_xmax - inter_xmin
inter_height = max(inter_height, 0)
inter_width = max(inter_width, 0)
inter_area = inter_width * inter_height
union_area = area1 + area2 - inter_area
sim_score = inter_area / union_area
self.output[row, col] = sim_score
self.inputs = {'X': self.boxes1, 'Y': self.boxes2} self.inputs = {'X': self.boxes1, 'Y': self.boxes2}
self.outputs = {'Out': self.output} self.outputs = {'Out': self.output}
......
...@@ -359,6 +359,16 @@ class TestBook(unittest.TestCase): ...@@ -359,6 +359,16 @@ class TestBook(unittest.TestCase):
self.assertIsNotNone(indices) self.assertIsNotNone(indices)
print(str(program)) print(str(program))
def test_roi_pool(self):
program = Program()
with program_guard(program):
x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
rois = layers.data(
name="rois", shape=[4], dtype="float32", lod_level=1)
output = layers.roi_pool(x, rois, 7, 7, 0.6)
self.assertIsNotNone(output)
print(str(program))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -25,7 +25,7 @@ class TestROIPoolOp(OpTest): ...@@ -25,7 +25,7 @@ class TestROIPoolOp(OpTest):
self.make_rois() self.make_rois()
self.calc_roi_pool() self.calc_roi_pool()
self.inputs = {'X': self.x, 'ROIs': self.rois} self.inputs = {'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod)}
self.attrs = { self.attrs = {
'spatial_scale': self.spatial_scale, 'spatial_scale': self.spatial_scale,
...@@ -36,7 +36,7 @@ class TestROIPoolOp(OpTest): ...@@ -36,7 +36,7 @@ class TestROIPoolOp(OpTest):
self.outputs = {'Out': self.outs, 'Argmax': self.argmaxes} self.outputs = {'Out': self.outs, 'Argmax': self.argmaxes}
def init_test_case(self): def init_test_case(self):
self.batch_size = 5 self.batch_size = 3
self.channels = 3 self.channels = 3
self.height = 6 self.height = 6
self.width = 4 self.width = 4
...@@ -47,7 +47,6 @@ class TestROIPoolOp(OpTest): ...@@ -47,7 +47,6 @@ class TestROIPoolOp(OpTest):
self.spatial_scale = 1.0 / 4.0 self.spatial_scale = 1.0 / 4.0
self.pooled_height = 2 self.pooled_height = 2
self.pooled_width = 2 self.pooled_width = 2
self.rois_num = 2
self.x = np.random.random(self.x_dim).astype('float32') self.x = np.random.random(self.x_dim).astype('float32')
...@@ -106,8 +105,10 @@ class TestROIPoolOp(OpTest): ...@@ -106,8 +105,10 @@ class TestROIPoolOp(OpTest):
def make_rois(self): def make_rois(self):
rois = [] rois = []
batch_ids = np.random.randint(0, self.batch_size, size=self.rois_num) self.rois_lod = [[]]
for i in range(self.rois_num): for bno in range(self.batch_size):
self.rois_lod[0].append(len(rois))
for i in range(bno + 1):
x1 = np.random.random_integers( x1 = np.random.random_integers(
0, self.width / self.spatial_scale - self.pooled_width) 0, self.width / self.spatial_scale - self.pooled_width)
y1 = np.random.random_integers( y1 = np.random.random_integers(
...@@ -118,8 +119,10 @@ class TestROIPoolOp(OpTest): ...@@ -118,8 +119,10 @@ class TestROIPoolOp(OpTest):
y2 = np.random.random_integers(y1 + self.pooled_height, y2 = np.random.random_integers(y1 + self.pooled_height,
self.height / self.spatial_scale) self.height / self.spatial_scale)
roi = [batch_ids[i], x1, y1, x2, y2] roi = [bno, x1, y1, x2, y2]
rois.append(roi) rois.append(roi)
self.rois_lod[0].append(len(rois))
self.rois_num = len(rois)
self.rois = np.array(rois).astype("int64") self.rois = np.array(rois).astype("int64")
def setUp(self): def setUp(self):
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'Event',
'Trainer',
]
class Event(object):
BEGIN_EPOCH = 0
END_EPOCH = 1
BEGIN_STEP = 2
END_STEP = 3
def __init__(self):
self.step = 0
self.epoch = 0
self.type = Event.BEGIN_EPOCH
class Trainer(object):
def __init__(self, network_func, optimizer, params=None, place=None):
# 1. we need to generate a framework.Program by calling
# network_func. Reference: fluid.program_guard in
# test_word2vec.py
# 2. move the default_main_program to self.program and run the
# default_startup program on an empty core.Scope()
# 3. call self.params.add_vars with the initialized scope, it
# will add the new vars of the initialized scope into
# self.params.
self.network_func = network_func
self.optimizer = optimizer
self.params = params
self.place = place
# TODO(helin): support distributed training
def train(self, reader, num_epochs, event_handler):
pass
def test(self, reader):
pass
...@@ -8,3 +8,4 @@ scipy>=0.19.0 ...@@ -8,3 +8,4 @@ scipy>=0.19.0
Pillow Pillow
nltk>=3.2.2 nltk>=3.2.2
graphviz graphviz
LinkChecker
...@@ -640,6 +640,7 @@ def start_server(args): ...@@ -640,6 +640,7 @@ def start_server(args):
elif request_path == "/cleanup": elif request_path == "/cleanup":
self._set_headers() self._set_headers()
logging.info("Received request to cleanup cluster") logging.info("Received request to cleanup cluster")
args.no_clean_up = False
cleanup(args.task_name) cleanup(args.task_name)
self.wfile.write("cleanup in progress") self.wfile.write("cleanup in progress")
......
#!/bin/bash #!/bin/bash
DEB="nccl-repo-ubuntu1604-2.1.4-ga-cuda8.0_1-1_amd64.deb" VERSION=$(nvcc --version | grep release | grep -oEi "release ([0-9]+)\.([0-9])"| sed "s/release //")
if [ "$VERSION" == "9.0" ]; then
DEB="nccl-repo-ubuntu1604-2.1.15-ga-cuda9.0_1-1_amd64.deb"
URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.15-ga-cuda9.0_1-1_amd64.deb"
else
DEB="nccl-repo-ubuntu1604-2.1.15-ga-cuda8.0_1-1_amd64.deb"
URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.15-ga-cuda8.0_1-1_amd64.deb"
fi
DIR="/nccl2" DIR="/nccl2"
mkdir -p $DIR mkdir -p $DIR
# we cached the nccl2 deb package in BOS, so we can download it with wget # we cached the nccl2 deb package in BOS, so we can download it with wget
# install nccl2: http://docs.nvidia.com/deeplearning/sdk/nccl-install-guide/index.html#down # install nccl2: http://docs.nvidia.com/deeplearning/sdk/nccl-install-guide/index.html#down
wget -O $DIR/$DEB \ wget -O $DIR/$DEB $URL
"http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.4-ga-cuda8.0_1-1_amd64.deb?responseContentDisposition=attachment"
cd $DIR && ar x $DEB && tar xf data.tar.xz cd $DIR && ar x $DEB && tar xf data.tar.xz
DEBS=$(find ./var/ -name "*.deb") DEBS=$(find ./var/ -name "*.deb")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册