diff --git a/Dockerfile b/Dockerfile index 9ac58f37f2893613ca9f82be08136d9da674737e..c257dbfc2987323f8ed2a24dfffa8b3c15e09399 100644 --- a/Dockerfile +++ b/Dockerfile @@ -49,7 +49,11 @@ ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin RUN curl -s -q https://glide.sh/get | sh # Install TensorRT -# The unnecessary files has been removed to make the library small. It only contains include and lib now. +# following TensorRT.tar.gz is not the default official one, we do two miny changes: +# 1. Remove the unnecessary files to make the library small. TensorRT.tar.gz only contains include and lib now, +# and its size is only one-third of the official one. +# 2. Manually add ~IPluginFactory() in IPluginFactory class of NvInfer.h, otherwise, it couldn't work in paddle. +# See https://github.com/PaddlePaddle/Paddle/issues/10129 for details. RUN wget -qO- http://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz | \ tar -xz -C /usr/local && \ cp -rf /usr/local/TensorRT/include /usr && \ diff --git a/benchmark/fluid/machine_translation.py b/benchmark/fluid/machine_translation.py index d7a421c10979c3b9d6865a8c0b99a6410e0f46a8..adde5f21acd4e77d58a453d6868abeccfca4bb5a 100644 --- a/benchmark/fluid/machine_translation.py +++ b/benchmark/fluid/machine_translation.py @@ -21,7 +21,7 @@ import argparse import time import distutils.util -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework diff --git a/benchmark/fluid/mnist.py b/benchmark/fluid/mnist.py index dc10ac2ec195acc9a5693718141ddb32417dfb71..1e2185dfac1072d1f1046f4616a9d53a8fc76061 100644 --- a/benchmark/fluid/mnist.py +++ b/benchmark/fluid/mnist.py @@ -20,7 +20,7 @@ import numpy as np import argparse import time -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.profiler as profiler diff --git a/benchmark/fluid/resnet.py b/benchmark/fluid/resnet.py index 1af5eaf6b46be47cb6b778cedcf53830c201ef39..831fa2c019fc2868cd85b1ca7b2c8c76a2f1628c 100644 --- a/benchmark/fluid/resnet.py +++ b/benchmark/fluid/resnet.py @@ -23,7 +23,7 @@ import time import cProfile, pstats, StringIO -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.profiler as profiler diff --git a/benchmark/fluid/stacked_dynamic_lstm.py b/benchmark/fluid/stacked_dynamic_lstm.py index 5fcbdd64af9dc196c9d5b2b82ce4213478ea1418..73bcc47b4d404af2c01d61ca3dfb11971bbcfe9c 100644 --- a/benchmark/fluid/stacked_dynamic_lstm.py +++ b/benchmark/fluid/stacked_dynamic_lstm.py @@ -23,10 +23,10 @@ import random import time import numpy -import paddle.v2 as paddle -import paddle.v2.dataset.imdb as imdb +import paddle +import paddle.dataset.imdb as imdb import paddle.fluid as fluid -from paddle.v2 import batch +import paddle.batch as batch import paddle.fluid.profiler as profiler diff --git a/benchmark/fluid/vgg.py b/benchmark/fluid/vgg.py index 9d990eff62ec368dc7033f55cc0862fa974a64e0..53e34e0cbd15914791c305db6797f826ebfae34e 100644 --- a/benchmark/fluid/vgg.py +++ b/benchmark/fluid/vgg.py @@ -17,7 +17,7 @@ from __future__ import print_function import sys import time import numpy as np -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import argparse diff --git a/cmake/tensorrt.cmake b/cmake/tensorrt.cmake index 0c07d36bed65400164853b99f18ec0335341cd94..ac19b1651893f18b14c62a0986df75bed25d7e80 100644 --- a/cmake/tensorrt.cmake +++ b/cmake/tensorrt.cmake @@ -30,4 +30,6 @@ if(TENSORRT_FOUND) message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. " "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ") + include_directories(${TENSORRT_INCLUDE_DIR}) + list(APPEND EXTERNAL_LIBS ${TENSORRT_LIBRARY}) endif() diff --git a/doc/fluid/api/data.rst b/doc/fluid/api/data.rst new file mode 100644 index 0000000000000000000000000000000000000000..b56c7332cc284649c7e04328e51a7faa78593a39 --- /dev/null +++ b/doc/fluid/api/data.rst @@ -0,0 +1,10 @@ +================================== +Data Reader Interface and DataSets +================================== + +.. toctree:: + :maxdepth: 1 + + data/data_reader.rst + data/image.rst + data/dataset.rst diff --git a/doc/fluid/api/data/data_reader.rst b/doc/fluid/api/data/data_reader.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a35d0bbc8f9d751f49c7e1fc26feb1bcb3ae7f0 --- /dev/null +++ b/doc/fluid/api/data/data_reader.rst @@ -0,0 +1,72 @@ +===================== +Data Reader Interface +===================== + + +DataTypes +========= + +.. autofunction:: paddle.v2.data_type.dense_array + :noindex: + +.. autofunction:: paddle.v2.data_type.integer_value + :noindex: + +.. autofunction:: paddle.v2.data_type.integer_value_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.integer_value_sub_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_binary_vector + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_binary_vector_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_binary_vector_sub_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_float_vector + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_float_vector_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_float_vector_sub_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_non_value_slot + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_value_slot + :noindex: + +.. autoclass:: paddle.v2.data_type.InputType + :members: + :noindex: + +DataFeeder +========== + +.. automodule:: paddle.v2.data_feeder + :members: + :noindex: + +Reader +====== + +.. automodule:: paddle.reader + :members: + :noindex: + +.. automodule:: paddle.reader.creator + :members: + :noindex: + +minibatch +========= + +.. automodule:: paddle.v2.minibatch + :members: + :noindex: diff --git a/doc/fluid/api/data/dataset.rst b/doc/fluid/api/data/dataset.rst new file mode 100644 index 0000000000000000000000000000000000000000..e7c8be4452bf55e0967d750c2e624e8e316e9330 --- /dev/null +++ b/doc/fluid/api/data/dataset.rst @@ -0,0 +1,82 @@ +Dataset +======= + +.. automodule:: paddle.dataset + :members: + :noindex: + +mnist ++++++ + +.. automodule:: paddle.dataset.mnist + :members: + :noindex: + +cifar ++++++ + +.. automodule:: paddle.dataset.cifar + :members: + :noindex: + +conll05 ++++++++ + +.. automodule:: paddle.dataset.conll05 + :members: get_dict,get_embedding,test + :noindex: + +imdb +++++ + +.. automodule:: paddle.dataset.imdb + :members: + :noindex: + +imikolov +++++++++ + +.. automodule:: paddle.dataset.imikolov + :members: + :noindex: + +movielens ++++++++++ + +.. automodule:: paddle.dataset.movielens + :members: + :noindex: + +.. autoclass:: paddle.dataset.movielens.MovieInfo + :noindex: + +.. autoclass:: paddle.dataset.movielens.UserInfo + :noindex: + +sentiment ++++++++++ + +.. automodule:: paddle.dataset.sentiment + :members: + :noindex: + +uci_housing ++++++++++++ + +.. automodule:: paddle.dataset.uci_housing + :members: + :noindex: + +wmt14 ++++++ + +.. automodule:: paddle.dataset.wmt14 + :members: + :noindex: + +wmt16 ++++++ + +.. automodule:: paddle.dataset.wmt16 + :members: + :noindex: diff --git a/doc/fluid/api/data/image.rst b/doc/fluid/api/data/image.rst new file mode 100644 index 0000000000000000000000000000000000000000..97651ffa6be56cf3ecaca2caca38a353fa5c1f49 --- /dev/null +++ b/doc/fluid/api/data/image.rst @@ -0,0 +1,5 @@ +Image Interface +=============== + +.. automodule:: paddle.v2.image + :members: diff --git a/doc/fluid/api/index_en.rst b/doc/fluid/api/index_en.rst index b0710d8b19956eb235890fdb2a2d764084416aa5..06c686d9508635abd41571983e00be174e94743e 100644 --- a/doc/fluid/api/index_en.rst +++ b/doc/fluid/api/index_en.rst @@ -16,3 +16,4 @@ Fluid profiler.rst regularizer.rst io.rst + data.rst diff --git a/doc/fluid/api/layers.rst b/doc/fluid/api/layers.rst index 3790f09c84563fe541bd8d0bc08e23b19d4287ca..ff3c9346a2cd777a5294d536911f39de9032fe52 100644 --- a/doc/fluid/api/layers.rst +++ b/doc/fluid/api/layers.rst @@ -479,6 +479,13 @@ label_smooth .. autofunction:: paddle.fluid.layers.label_smooth :noindex: +roi_pool +--------- + +.. autofunction:: paddle.fluid.layers.roi_pool + :noindex: + + ops === @@ -820,3 +827,5 @@ topk .. autofunction:: paddle.fluid.layers.topk :noindex: + + diff --git a/doc/fluid/design/data_type/float16.md b/doc/fluid/design/data_type/float16.md index 1ea95ed6b5d6792171569b6ff76d09be92fcb13e..844d2aafcf257b85057e1ac200ed3d5cf0be2ff0 100644 --- a/doc/fluid/design/data_type/float16.md +++ b/doc/fluid/design/data_type/float16.md @@ -3,7 +3,7 @@ ## Why float16 Half precision (float16) is a binary floating-point format that occupies 16 bits in memory. float16 is half the size of traditional 32-bit single precision format (float) and has lower precision and smaller range. -When high precision computation is not required, using float16 data type could potentially +When high precision computation is not required (which is usually the case at least in the deep learning inference stage), using float16 data type could potentially - reduce storage space, memory bandwidth, and power usages; - increase the chance of data fitting into a smaller cache of lower latency; @@ -12,7 +12,7 @@ When high precision computation is not required, using float16 data type could p ## Survey of current float16 support A brief survey of float16 support on different compilers, hardwares, and libraries can be found below. Interested readers can refer to [link1](https://github.com/PaddlePaddle/Paddle/issues/4853) and [link2](https://github.com/Xreki/Xreki.github.io/blob/master/multi_data_types_in_dl_framework/ppt/float16_and_quantized_type.md) for more info. -The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernel. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier. +The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernels. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier. ### Compiler - nvcc supports `__half` data type after CUDA 7.5. @@ -95,11 +95,89 @@ float half_to_float(float16 h); ``` which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM instrinsics will be used when the corresonding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion. -## To do -After float16 class is available, some of the future items are below: +## float16 inference +In Fluid, a neural network is represented as a protobuf message called [ProgramDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/program.md), whose Python wrapper is a [Program](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#program). The basic structure of a program is some nested [blocks](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#block), where each block consists of some [variable](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#variable) definitions and a sequence of [operators](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#operator). An [executor](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/executor.md) will run a given program desc by executing the sequence of operators in the entrance block of the program one by one. -- Update pybind/tensor_py.h to bind c++ float16 with numpy float16. +### Operator level requirement +Each operator has many kernels for different data types, devices, and library types. The operator will select the appropriate kernel to run based on, among other things, the data type of the input variables. By default, every Fluid operator has a float data type kernel that takes float variables as input and generates float output. -- Modify `GetKernelType()` method in `framework/operator.h` to make it compatible with float16. +This means that if we provide float input to the first operator in a program, then each opeartor will use float kernel to compute float output and send it as input to the next operator to trigger the float kernel. Overall, the program will run in float mode and give us a final output of float data type. -- Create a type-casting operator that can convert the data type in tensor between float16 and other types. +The same principle applies if we want a program to run in float16 mode. We provide input variable of float16 data type to the first operator, and then one by one, each operator in the program will run the float16 kernel (provided that each operator in this program has float16 kernels registered) until we finally obtain a float16 output variable. + +So the preliminary requirement for float16 inference is to add float16 kernel to operators that are needed in a specific kind of program. For example, float16 inference on an image classification neural network like Vgg or Resnet, typically requires the following operators to have float16 kernels: convolution, pooling, multiplication, addition, batch norm, dropout, relu, and softmax. Please refer to [new_op_en](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/dev/new_op_en.md) for details of how to add new kernels to an operator. + +### Variable level requirement +Operators including convolution and multiplication (used in fully-connected layers) takes as input not only the variables generated by the preceding operators but also [parameter](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#parameter) variables, which contains the trained weights to apply to the input data. These weights are obtained in the Fluid training process and are by default of float data type. + +When these operators are running in float16 mode, the float16 kernel requires those parameter variables to contain weights of Fluid float16 data type. Thus, we need a convenient way to convert the original float weights to float16 weights. + +In Fluid, we use tensor to hold actual data for a variable on the c++ end. [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h) is used to bind c++ tensors of certain data type with numpy array of the correponding numpy data type on the Python end. Each common c++ built-in data type has a corresponding numpy data type of the same name. However, since there is no built-in float16 type in c++, we cannot directly bind numpy float16 data type with the Fluid float16 class. Since both Fluid float16 and numpy float16 use uint16 as the internal data storage type, we use c++ built-in type `uint16_t` and the corresponding numpy uint16 data type to bridge the gap via [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h). + +The following code demonstrates how to do the tensor conversion. +```Python +# var is the variable of float weights +# tensor is a numpy array of data copied from the tensor data in var +# fp16_var is the variable that will contain float16 weights converted from var +tensor = numpy.array(var.get_tensor()) +fp16_tensor = fp16_var.get_tensor() + +# After the original tensor data is converted to numpy float16 data type, +# view(numpy.uint16) is used so that the internal memory of the numpy array +# will be reinterpreted to be of uint16 data type, which is binded to +# Fluid float16 class via pybind with the help of uint16_t built-in c++ type +fp16_tensor.set(tensor.astype(numpy.float16).view(numpy.uint16), GPUPlace) +``` + +### Consistent API requirement +The basic inference in float16 mode requires users to feed input and obtain output both of float16 data type. However, in this way, the inference APIs are not consistent between float16 mode and float mode, and users may find it confusing and diffcult to use float16 inference since they need to do extra steps to provide float16 input data and convert float16 output data back to float. To have consistent API for different inference modes, we need to transpile the program desc in some way so that we can run float16 inference by feeding and fetching variables of float data type. + +This problem can be solved by introducing a type-casting operator which takes an input variable of certain data type, cast it to another specified data type, and put the casted data into the output variable. Insert cast operator where needed can make a program internally run in float16 mode. + +### float16 transpiler +Put all the above requirements in mind, we designed a float16 inference transpiler that can tranpile a float32 mode inference program desc to a float16 mode one. + +Given a float inference program and the corresponding variables of float32 weights in the [scope](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/scope.md), +this transpiler mainly does the following modifications: + +1. Insert cast operators at the beginning of the program so that the input float data will be converted to float16 data type before feeding to subsequent operators to invoke the float16 kernel. + +2. Insert cast operators at the end of the program so that the output float16 data will be converted back to float data type before users obtain the result. + +3. For each parameter variable of float weights, create in the scope a corresponding variable of float16 weights which are converted from the corresponding float weights and add this new float16 variable to the program. + +4. Update the operator information in the program so that each relevant operator use the newly created float16 variable instead of its float counterpart. + +Below is an example of usage: +```Python +# Get the float inference program +[float_inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + +# Prepare the float input data +tensor_img = numpy.random.rand(1, 3, 32, 32).astype(numpy.float32) + +# Running inference_program in float mode +float_results = exe.run(float_inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + +# Use float16 transpiler to speedup +float16_inference_program = float_inference_program.clone() +t = fluid.InferenceTranspiler() +t.float16_transpile(float16_inference_program, GPUPlace) + +# Running +float16_results = exe.run(float16_inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) +``` + +As we can see from the example above, users can simply use the `float16_transpile` method provided by the infernece transpiler class on an existing float inference program to run inference in float16 mode. + +### Speedup on GPU +Currently, Fluid inference in float16 mode is only supported on Nvidia GPU device. There is no motivation to support float16 inference on non-ARM CPUs because float16 is not natively supported there and float16 calculation will only be slower than its float counterpart. + +Nvidia started to support its native float16 data type (which has the same internal memory representation as Fluid float16 class) on CUDA 7.5. Moreover, float16 speedups on common computational intensive tasks including GEMM (general matrix-matrix multiplication) and convolution are supported since cublas 7.5 and cuDNN 5.0. + +Recently, the introduction of [tensor core](https://devblogs.nvidia.com/programming-tensor-cores-cuda-9/) in volta architecture GPUs and the support of tensor core calculation in CUDA 9.0 and cuDNN 7.0 make float16 truly superior to float in certain deep learning applications. Please refer to this [benchmark report](https://github.com/kexinzhao/Paddle_benchmark/blob/master/float16_benchmark.md) for more details. diff --git a/doc/fluid/design/dynamic_rnn/rnn_design_en.md b/doc/fluid/design/dynamic_rnn/rnn_design_en.md new file mode 100644 index 0000000000000000000000000000000000000000..9493908f4f73b3e7d91f5f6364a2a3660257d508 --- /dev/null +++ b/doc/fluid/design/dynamic_rnn/rnn_design_en.md @@ -0,0 +1,175 @@ +# Varient Length supported RNN Design +For the learning of variable length sequences, the existing mainstream frameworks such as tensorflow, pytorch, caffe2, mxnet and so on all use padding. + +Different-length sequences in a mini-batch will be padded with zeros and transformed to same length. + +The existing RNN implementations of the PaddlePaddle is `RecurrentLayerGroup`, +which supports the variable length sequences without padding. +This doc will design fluid's RNN based on this idea. + +## Multi-layer sequence data format `LODTensor` +At present, Paddle stores data in one mini-batch in one-dimensional array. + +`Argument.sequenceStartPositions` is used to store information for each sentence. + +In Paddle, `Argument.subSequenceStartPositions` is used to store 2 levels of sequence information, while higher dimensional sequences can not be supported. + +In order to support the storage of `N-level` sequences, we define sequence information as the following data structure. + + +```c++ +std::shared_ptr>> lod_start_pos_; +``` + +Or more clearly defined here + +```c++ +typedef std::vector level_t; +std::vector lod_start_pos; +``` +Each `level_t` here stores a level of offset information consistent with paddle's current practice. + +In order to transmit sequence information more transparently, we have introduced a new tensor called `LODTensor`[1]. +Its tensor-related interfaces all inherit directly from `Tensor`, but it also adds serial-related interfaces. +Thus, when working with a `LODTensor`, ordinary `Op` is used directly as `Tensor`. +The `Op` of the operation sequence will additionally operate the relevant interface of the `LODTensor` variable-length sequence operation. + +The definition of `LODTensor` is as follows: + + +```c++ +class LODTensor : public Tensor { +public: + size_t Levels() const { return seq_start_positions_.size(); } + size_t Elements(int level = 0) const { + return seq_start_positions_[level].size(); + } + // slice of level[elem_begin: elem_end] + // NOTE low performance in slice seq_start_positions_. + // TODO should call Tensor's Slice. + LODTensor LODSlice(int level, int elem_begin, int elem_end) const; + + // slice with tensor's data shared with this. + LODTensor LODSliceShared(int level, int elem_begin, int elem_end) const; + + // copy other's lod_start_pos_, to share LOD info. + // NOTE the LOD info sould not be changed. + void ShareConstLODFrom(const LODTensor &other) { + lod_start_pos_ = other.lod_start_pos_; + } + // copy other's lod_start_pos_'s content, free to mutate. + void ShareMutableLODFrom(const LODTensor &other) { + lod_start_pos_ = std::make_shared < + std::vector>(other.lod_start_pos_.begin(), + other.lod_start_pos_.end()); + } + +private: + std::shared_ptr>> lod_start_pos_; +}; +``` +Among them, `lod_start_pos_` uses `shared_ptr` to reduce the cost of storage and replication. +`LODTensor` can be thought as an extension of `Tensor`, which is almost completely compatible with the original `Tensor`. + +## How to support the framework +### Replace `Tensor` with `LoDTensor` +To implement the passing of `LODTensor`, most `Tensor` in the framework need to be replaced with `LODTensor`. +Simple implementation, directly **replace all previous `Tensor` with `LODTensor`** , where you can directly modify the `Tensor` interface created in `pybind.cc`. + +In addition, the user may need to perceive the existence of a sequence (such as the sequence of the visualization needs to parse the output sequence in the model), so some of the serial operation APIs also need to be exposed to the python layer. + +### Transmit `lod_start_pos` along with the Op call chain +`lod_start_pos` is passed along with the Op call chain +The framework needs to support the following features to implement the transmit of `lod_start_pos`: + +1. Implement the transfer as `shared_ptr` + - Do not modify the contents of `lod_start_pos` as a consumer + - Modify producer of `lod_start_pos` as producer + - Conventions consumer only needs to copy `shared_ptr` passed over + - producer needs to create its own independent memory to store its own independent modifications and expose `shared_ptr` to subsequent consumer + - Since the transfer process is implemented by copying `shared_ptr`, the framework only needs to pass `lod_start_pos` once. + +2. Op is transparent enough not to sense `lod_start_pos` +3. Producer Op that needs to modify `lod_start_pos` can update its `lod_start_pos` data when `Run` + +## sorted by length +After sorting by length, the batch size from the forward time step will naturally decrement, and you can directly plug it into Net to do the batch calculation. + +For example, the original input: + +``` +origin: +xxxx +xx +xxx + +-> sorted: +xxxx +xxx +xx +``` + +After `SegmentInputs`, there will be 4 time steps, the input of each time step is as follows (vertical arrangement) + +``` +0 1 2 3 +x x x x +x x x +x x +``` + +In order to track the changes before and after sorting, use here + +```c++ +struct SortedSeqItem { + void *start{nullptr}; + void *end{nullptr}; +}; + +std::vector sorted_seqs; +``` +To track the position of the sequence after sorting, and add a new interface + +```c++ +std::vector SortBySeqLen(const LODTensor& tensor); +``` +Due to the sequence of input sequences, the following existing interfaces need to be modified: + +- InitMemories, memory needs to be rearranged according to `sorted_seqs` +- SetmentInputs +- ConcatOutputs + +In addition, because `sorted_seqs` needs to be multiplexed with `RecurrentGradientOp`, it will become a new output of `RecurrentOp`. +It is passed in as an input to `RecurrentGradientOp`. + +## InitMemories +Due to the sequence change, the order of the elements on the `boot_memories` batch also needs to be rearranged accordingly. + +## SegmentInputs + +`SegmentInputs` relies on the information of `sorted_seqs` to cut the original sequence from the horizontal to the input of each step in the sorted sequence order. + +the transition is as follows: +``` +origin: +xxxx +xx +xxx + + | + | + \ / + ! +0 1 2 3 +x x x x +x x x +x x +``` +## ConcatOutputs +`ConcatOutputs` needs + +- Restore the output of each time step back to the original input sequence order (to prevent the order of Infer phase from being upset) +- Concat each sequence as a regular mini-batch representation + +## references +1. [Level of details](https://en.wikipedia.org/wiki/Level_of_detail) diff --git a/doc/fluid/design/onnx/images/project_structure.png b/doc/fluid/design/onnx/images/project_structure.png new file mode 100644 index 0000000000000000000000000000000000000000..ab1c2ff23cfff586516876684348bb15bd2084fc Binary files /dev/null and b/doc/fluid/design/onnx/images/project_structure.png differ diff --git a/doc/fluid/design/onnx/onnx_convertor.md b/doc/fluid/design/onnx/onnx_convertor.md new file mode 100644 index 0000000000000000000000000000000000000000..bc1665d7c33eb54cb63e5306a439c1ca67016d1e --- /dev/null +++ b/doc/fluid/design/onnx/onnx_convertor.md @@ -0,0 +1,131 @@ +# Background + +[ONNX (Open Neural Network Exchange)](https://github.com/onnx/onnx) bridges different deep learning frameworks by providing an open source graph format for models. The models trained in other frameworks can be converted into the ONNX format to execute inference by utilizing the built-in operators in ONNX - this is called a **frontend**. With the inverse conversion (called a **backend**), different frameworks can share any models supported by ONNX in principle. Now most mainstream frameworks have joined the ONNX community, e.g. Caffe2, PyTorch, and MXNet etc. And there is a momentum driving more and more vendors to begin supporting ONNX or even choose ONNX as the only machine learning runtime in their devices. + +Therefore, it is necessary to enable the conversion between PaddlePaddle and ONNX. This design doc is aimed at implementing a convertor, mainly for converting between **Fluid** models and ONNX (it is very likely that we may support older v2 models in the future). A complete convertor should be bidirectional - with a frontend AND a backend, but considering the importance, the we will start with the frontend i.e. Fluid models to ONNX models. + + +# How it works + +ONNX has a [working list of operators](https://github.com/onnx/onnx/blob/master/docs/Operators.md) which is versioned. + +When prioritizing implementation of a frontend over a backend, choice of coverage of Fluid -> ONNX operators comes down to choices of models to be supported (see section `Supported models`). Eventually, this will allow us to reach a really-wide coverage of all operators. + +Here are a few major considerations when it comes to converting models: + +- **Op-level conversion**: How to map the inputs, attributes, and outputs of each Paddle operator to those of the ONNX operator. In several cases, these require transformations. For each direction (frontend vs. backend), a different conversion mapping is needed. +- **Parameters (weights) initialization**: Setting initial parameters on different nodes. +- **Tensor data type mapping** (Note: Some ONNX data types are not supported in Fluid) +- **Network representation adaption**: Fluid `ProgramDesc` include nested blocks. Since ONNX is free of nesting, the `ProgramDesc` ops need to be traversed to only include ops from the global scope in the root block. The variables used as inputs and outputs should also be in this scope. +- **Model validation**: There are two kinds of validations that are necessary: + 1. We need to ensure that the inference outputs of the ops in run inside a model are the same as those when running the ONNX converted ops through an alternative ONNX backend. + 2. Checking to see if the generated nodes on the graph are validated by the internal ONNX checkers. +- **Versioning**: ONNX versions its op listing over versions. In fact, it has versioning on 3 different levels: ops, graphs, and ONNX models. This requires that we are conscious about versioning the convertor and updating tests and op convertor logic for each release. It also implies that we release pre-trained ONNX models upon each version release. + +One thing that makes this conversion more feasible in Fluid's case is the use of a static IR - the `ProgramDesc` - as opposed to a dynamic graph, as created in the cases of frameworks like PyTorch. + + +# Project structure + +

+ +

+ +The project contains four important parts: + +* **fluid**: The directory that contains wrappers for fluid related APIs. Fluid has provided some low-level APIs to parse or generate the inference model. However, directly using these low-level APIs makes the code tediously long. This module wraps low-level APIs to provide simplified interfaces. + +* **onnx**: This is a Python package provided by ONNX containing helpers for creating nodes, graphs, and eventually binary protobuf models with initializer parameters. + +* **onnx_fluid**: Contains two-way mapping (Fluid -> ONNX ops and ONNX -> Fluid ops). Called from `convert.py`, the program uses this mapping along with modifier functions to construct ONNX nodes with the help of ONNX's `make_node` helper. It also contains mapping between datatypes and tensor deprecation / amplification logic. + +* **convert.py**: The interface exposed to users. This will traverse the global program blocks/variables and construct the write-able model. + + +# Usage +The converter should be designed to very easy-to-use. Bidirectional conversion between a Fluid inference model and an ONNX binary model will be supported. Model validation will also provided to verify the correctness of converted model. + +* Convert Fluid inference model to ONNX binary model + + ``` + python convert.py --fluid_model --onnx_model validate True + ``` + +* Validate the converted model + + ``` + python validate.py --fluid_model --onnx_model + ``` + +The conversion and model validation will be completed consecutively, finally output a readable model structure description. And for the converse conversion, users only need to exchange the input and output. + + +# Challenges and mitigation + +## Cycles + +Cycles are unsupported in ONNX. In Paddle, the `while` op is the most prominent example of a cycle. + +*Resolution*: We won't support models with `while`s which can't be substituted until ONNX adds support for such ops. + +## Sequences + +Sequence processing operators like `sequence_expand`, `sequence_reshape`, `sequence_concat`, and `sequence_pool` are not supported by ONNX as well, because they do not support non-padded datatypes like LoDTensors. + +*Resolution*: Since the runtimes using our ONNX exported graphs won't be using LoDTensors in the first place, such sequence operators should be mapped to ONNX ops that will do the necessary transposing ops with the knowledge of the padding and shape of the Tensors. + +## Ops that can't easily be mapped + +There are ops that just aren't possible to map today: + +**Control flow operators** + +Paddle supports control flow ops like `If/Else` and `Switch` (if we ignore the CSP operations like `select` for now). ONNX has `If` support in the experimental phase. + +*Resolution*: Map Paddle's `If/Else` to ONNX's `If`, but ignore other control flow operators until ONNX brings support for them. + + +**Non-existent in Fluid** + +There are several ONNX operators that are not available in Fluid today, e.g. `InstanceNormalization`, `RandomUniform`, `Unsqueeze`, etc. + +*Resolution*: For the initial phase, we can choose to not support ops that our models don't care for and are subsequently not available in Fluid. However, for ops that we think might be necessary for Fluid users also, we must implement them on our side and support the ONNX conversion to them. This list is TBD. + + +**Concurrency** + +ONNX does not have any considerations for concurrency right now. + +*Resolution*: There are two ways to approach this: + +a. We choose to not support concurrent models. +b. We only support `go_op`s (basically threads) shallowly. This could mean that we enqueue `go_op` ops prior to gradient calculations OR even prior to the entire graph, and that's it - since `go_op`s do not have support for backprop anyways. One of the core target use cases of `go_op`: batch reading - can be handled through this approach. + + +**Overloaded in Fluid** + +There are ops in ONNX whose job can't be accomplished by a single corresponding Paddle operator (e.g. ), but a collection of operators. + +*Resolution*: Chain multiple Paddle operators. + + +## Lack of LoDTensors + +As stated above, ONNX only supports simple Tensor values. + +*Resolution*: Deprecate to plain old numpy-able tensors. + + +## Reconstruction from deprecated ONNX ops + +For higher-level Fluid ops, such as a few offered by the `nn` layer that do not have direct corresponding mappings but can be converted to ONNX by chaining a series of ops without cycles, it would be useful to map them back to the higher-level Fluid ops once converted back from the deprecated ONNX graphs. + +*Resolution*: Graphs that have the deprecation from Paddle -> ONNX. When converting back from ONNX, if we encounter the identical graphs by doing a forward search, we can replace the subgraphs with the matching ONNX op. + + +# Supported models + +As mentioned above, potential risks may come from the conversion of sequence-related models, including the LodTensor, ```if/else``` and ```while``` operator. So a good choice is to focus on some important feedforward models first, then implement some simple recurrent models. + +- Feedforward models: common models selected in PaddleBook, e.g. VGG, ResNet and some other models proposed by application teams. +- Recurrent models: language model, stacked LSTMs etc. diff --git a/doc/v2/api/data/data_reader.rst b/doc/v2/api/data/data_reader.rst index d7c896a6270b488ca4449e5211d0d0879eda6ac5..1a35d0bbc8f9d751f49c7e1fc26feb1bcb3ae7f0 100644 --- a/doc/v2/api/data/data_reader.rst +++ b/doc/v2/api/data/data_reader.rst @@ -56,11 +56,11 @@ DataFeeder Reader ====== -.. automodule:: paddle.v2.reader +.. automodule:: paddle.reader :members: :noindex: -.. automodule:: paddle.v2.reader.creator +.. automodule:: paddle.reader.creator :members: :noindex: diff --git a/doc/v2/api/data/dataset.rst b/doc/v2/api/data/dataset.rst index 02e41564b1e48c07da6ac071fc4b60089169e05a..e7c8be4452bf55e0967d750c2e624e8e316e9330 100644 --- a/doc/v2/api/data/dataset.rst +++ b/doc/v2/api/data/dataset.rst @@ -1,82 +1,82 @@ Dataset ======= -.. automodule:: paddle.v2.dataset +.. automodule:: paddle.dataset :members: :noindex: mnist +++++ -.. automodule:: paddle.v2.dataset.mnist +.. automodule:: paddle.dataset.mnist :members: :noindex: cifar +++++ -.. automodule:: paddle.v2.dataset.cifar +.. automodule:: paddle.dataset.cifar :members: :noindex: conll05 +++++++ -.. automodule:: paddle.v2.dataset.conll05 +.. automodule:: paddle.dataset.conll05 :members: get_dict,get_embedding,test :noindex: imdb ++++ -.. automodule:: paddle.v2.dataset.imdb +.. automodule:: paddle.dataset.imdb :members: :noindex: imikolov ++++++++ -.. automodule:: paddle.v2.dataset.imikolov +.. automodule:: paddle.dataset.imikolov :members: :noindex: movielens +++++++++ -.. automodule:: paddle.v2.dataset.movielens +.. automodule:: paddle.dataset.movielens :members: :noindex: -.. autoclass:: paddle.v2.dataset.movielens.MovieInfo +.. autoclass:: paddle.dataset.movielens.MovieInfo :noindex: - -.. autoclass:: paddle.v2.dataset.movielens.UserInfo + +.. autoclass:: paddle.dataset.movielens.UserInfo :noindex: sentiment +++++++++ -.. automodule:: paddle.v2.dataset.sentiment +.. automodule:: paddle.dataset.sentiment :members: :noindex: uci_housing +++++++++++ -.. automodule:: paddle.v2.dataset.uci_housing +.. automodule:: paddle.dataset.uci_housing :members: :noindex: wmt14 +++++ -.. automodule:: paddle.v2.dataset.wmt14 +.. automodule:: paddle.dataset.wmt14 :members: :noindex: wmt16 +++++ -.. automodule:: paddle.v2.dataset.wmt16 +.. automodule:: paddle.dataset.wmt16 :members: :noindex: diff --git a/doc/v2/dev/index_en.rst b/doc/v2/dev/index_en.rst index 36516b7953224e799e1065fd7930509eec0aa650..cbff313fc5b9468b58159cf2b04e8464f9bebc78 100644 --- a/doc/v2/dev/index_en.rst +++ b/doc/v2/dev/index_en.rst @@ -6,6 +6,7 @@ PaddlePaddle adheres to the following three sections of code and document specif PaddlePaddle uses git for version control and Docker is used for building and testing environment. The code includes Cuda, C++, Python, Shell and other programming languages,which comply with Google C++ Style, Pep-8, and the code base includes style checking by an automatic inspection tool. Code comments need to follow the Doxygen specification. The code that does not meet the style requirements will fail to compile. We provide the following guidelines for the use of Git, build tests and code development. + .. toctree:: :maxdepth: 1 diff --git a/doc/v2/howto/cluster/multi_cluster/index_en.rst b/doc/v2/howto/cluster/multi_cluster/index_en.rst index dac7aaef085c80851c1bbb89250faf2151de4ca6..b69bd5b2dbf1967d65558da06812d76f431c1d5a 100644 --- a/doc/v2/howto/cluster/multi_cluster/index_en.rst +++ b/doc/v2/howto/cluster/multi_cluster/index_en.rst @@ -1,19 +1,35 @@ Use different clusters ====================== -PaddlePaddle supports running jobs on several platforms including: -- `Kubernetes `_ open-source system for automating deployment, scaling, and management of containerized applications from Google. -- `OpenMPI `_ Mature high performance parallel computing framework. -- `Fabric `_ A cluster management tool. Write scripts to submit jobs or manage the cluster. +The user's cluster environment is not the same. To facilitate everyone's deployment, we provide a variety of cluster deployment methods to facilitate the submission of cluster training tasks, which will be introduced as follows: -We'll introduce cluster job management on these platforms. The examples can be found under `cluster_train_v2 `_ . +`Kubernetes `_ is a scheduling framework of Google open source container cluster, supporting a complete cluster solution for large-scale cluster production environment. The following guidelines show PaddlePaddle's support for Kubernetes: -These cluster platforms provide API or environment variables for training processes, when the job is dispatched to different nodes. Like node ID, IP or total number of nodes etc. +.. toctree:: + :maxdepth: 1 + + k8s_cn.md + k8s_distributed_cn.md + +`OpenMPI `_ is a mature high-performance parallel computing framework, which is widely used in the field of HPC. The following guide describes how to use OpenMPI to build PaddlePaddle's cluster training task: .. toctree:: :maxdepth: 1 - fabric_en.md - openmpi_en.md - k8s_en.md - k8s_aws_en.md + openmpi_cn.md + +`Fabric `_ is a convenient tool for program deployment and management. We provide a way to deploy and manage with Fabric. If you want to know more about it, please read the following guidelines: + +.. toctree:: + :maxdepth: 1 + + fabric_cn.md + +We also support the deployment of PaddlePaddle on AWS. Learn more about: + +.. toctree:: + :maxdepth: 1 + + k8s_aws_cn.md + +The examples can be found under `cluster_train_v2 `_ . \ No newline at end of file diff --git a/paddle/fluid/framework/blocking_queue.h b/paddle/fluid/framework/blocking_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..a19558c0ae59005bee575e8c469c7f95d8780ab1 --- /dev/null +++ b/paddle/fluid/framework/blocking_queue.h @@ -0,0 +1,74 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include // NOLINT +#include +#include // NOLINT +#include + +namespace paddle { +namespace framework { + +template +class BlockingQueue { + public: + void Push(const T &item) { + { + std::lock_guard g(mutex_); + q_.emplace_back(item); + } + cv_.notify_one(); + } + + template + void Extend(const U &items) { + { + std::lock_guard g(mutex_); + for (auto &item : items) { + q_.emplace_back(item); + } + } + cv_.notify_all(); + } + + std::deque PopAll(size_t ms, bool *timeout) { + auto time = + std::chrono::system_clock::now() + std::chrono::milliseconds(ms); + std::unique_lock lock(mutex_); + *timeout = !cv_.wait_until(lock, time, [this] { return !q_.empty(); }); + std::deque ret; + if (!*timeout) { + std::swap(ret, q_); + } + return ret; + } + + T Pop() { + std::unique_lock lock(mutex_); + cv_.wait(lock, [=] { return !q_.empty(); }); + T rc(std::move(q_.front())); + q_.pop_front(); + return rc; + } + + private: + std::mutex mutex_; + std::condition_variable cv_; + std::deque q_; +}; + +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index bfad9ac1e9cad1936ed961ad1da55787d2faa23e..9c277a27da5af34fc9fb18ca073e369c05ecdf22 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -63,16 +63,16 @@ void DataTransform(const OpKernelType& expected_kernel_type, } void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor, - Variable& out_var) { + Variable* out_var) { if (in_var.IsType()) { auto& in_lod_tensor = in_var.Get(); - auto* tran_lod_tensor = out_var.GetMutable(); + auto* tran_lod_tensor = out_var->GetMutable(); tran_lod_tensor->set_lod(in_lod_tensor.lod()); tran_lod_tensor->set_layout(in_lod_tensor.layout()); tran_lod_tensor->ShareDataWith(tensor); } else if (in_var.IsType()) { auto& in_selected_rows = in_var.Get(); - auto* trans_selected_rows = out_var.GetMutable(); + auto* trans_selected_rows = out_var->GetMutable(); trans_selected_rows->set_height(in_selected_rows.height()); trans_selected_rows->set_rows(in_selected_rows.rows()); trans_selected_rows->mutable_value()->ShareDataWith(tensor); diff --git a/paddle/fluid/framework/data_transform.h b/paddle/fluid/framework/data_transform.h index 9ec67e6f3d6358cd658e198602f5e802a0ba4cc9..dee5d8c7c1126013742460df1d94bb364220ad09 100644 --- a/paddle/fluid/framework/data_transform.h +++ b/paddle/fluid/framework/data_transform.h @@ -35,7 +35,7 @@ void DataTransform(const OpKernelType& expected_kernel_type, const Tensor& input_tensor, Tensor* out); void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor, - Variable& out_var); + Variable* out_var); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.cc b/paddle/fluid/framework/details/broadcast_op_handle_test.cc index 3f2dcde3e9597287d72046dd4f8b07faab1ede25..8f1b6d1615312fced0887f9ff14ae17877371b7e 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.cc @@ -139,7 +139,7 @@ struct TestBroadcastOpHandle { PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal."); f::Tensor result_tensor; - f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor); + f::TensorCopySync(out_tensor, cpu_place, &result_tensor); float* ct = result_tensor.mutable_data(cpu_place); for (int64_t i = 0; i < f::product(kDims); ++i) { @@ -185,7 +185,7 @@ struct TestBroadcastOpHandle { } f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor); + f::TensorCopySync(rt, cpu_place, &result_tensor); float* ct = result_tensor.data(); for (int64_t i = 0; i < f::product(kDims); ++i) { diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index b57c7dab3a086212deb9714b81a0f861dfeac5e2..1e8ca20b51d43554cf1898b41b31c27b90e6c642 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -66,7 +66,7 @@ void FetchOpHandle::RunImpl() { auto &t = var->Get(); if (platform::is_gpu_place(t.place())) { #ifdef PADDLE_WITH_CUDA - TensorCopy(t, cpu, *dev_ctxes_[t.place()], &tensors_[i], true); + TensorCopySync(t, cpu, &tensors_[i]); #endif } else { tensors_[i].ShareDataWith(t); diff --git a/paddle/fluid/framework/details/reduce_op_handle_test.cc b/paddle/fluid/framework/details/reduce_op_handle_test.cc index c17aabee53680fba10eac289cf8f8bd5f7d419e8..ffdd7c14eb5097cc8285da090e4a72e1e3f43d86 100644 --- a/paddle/fluid/framework/details/reduce_op_handle_test.cc +++ b/paddle/fluid/framework/details/reduce_op_handle_test.cc @@ -194,7 +194,7 @@ struct TestReduceOpHandle { } f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); + f::TensorCopySync(rt, cpu_place, &result_tensor); float *ct = result_tensor.data(); for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { @@ -239,7 +239,7 @@ struct TestReduceOpHandle { auto &rt = out_var->Get(); f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); + f::TensorCopySync(rt, cpu_place, &result_tensor); float *ct = result_tensor.data(); for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 3b7d61607301e685e67b5f4bc97fc837471e5722..5e6ed5cb7cdc534332d402380458f39aecd841b8 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -140,7 +140,9 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( if (timeout) { if (exception_) { - throw * exception_; + auto exp = *exception_; + exception_.reset(); + throw exp; } else { continue; } diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index d70bbd4ef0eb02d1b473bf88e526996819aec5f9..d089b79d91327e38408439a8019ec5189ff6d189 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -22,6 +22,7 @@ #include #include "ThreadPool.h" // ThreadPool in thrird party +#include "paddle/fluid/framework/blocking_queue.h" #include "paddle/fluid/framework/details/ssa_graph_executor.h" namespace paddle { @@ -30,46 +31,6 @@ class Scope; namespace details { -template -class BlockingQueue { - public: - void Push(const T &item) { - { - std::lock_guard g(mutex_); - q_.emplace_back(item); - } - cv_.notify_one(); - } - - template - void Extend(const U &items) { - { - std::lock_guard g(mutex_); - for (auto &item : items) { - q_.emplace_back(item); - } - } - cv_.notify_all(); - } - - std::deque PopAll(size_t ms, bool *timeout) { - auto time = - std::chrono::system_clock::now() + std::chrono::milliseconds(ms); - std::unique_lock lock(mutex_); - *timeout = !cv_.wait_until(lock, time, [this] { return !q_.empty(); }); - std::deque ret; - if (!*timeout) { - std::swap(ret, q_); - } - return ret; - } - - private: - std::mutex mutex_; - std::condition_variable cv_; - std::deque q_; -}; - class ThreadedSSAGraphExecutor : public SSAGraphExecutor { public: ThreadedSSAGraphExecutor(size_t num_threads, bool use_event, diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 513e720fd099bcd898a6c73afd1a3a16f6f53aab..766bf0ab0c1c50146ad3f6e048738209428707b9 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -226,15 +226,15 @@ static bool has_fetch_operators( } void Executor::Run(const ProgramDesc& program, Scope* scope, - std::map& feed_targets, - std::map& fetch_targets, + std::map* feed_targets, + std::map* fetch_targets, bool create_vars, const std::string& feed_holder_name, const std::string& fetch_holder_name) { platform::RecordBlock b(kProgramId); bool has_feed_ops = - has_feed_operators(program.Block(0), feed_targets, feed_holder_name); + has_feed_operators(program.Block(0), *feed_targets, feed_holder_name); bool has_fetch_ops = - has_fetch_operators(program.Block(0), fetch_targets, fetch_holder_name); + has_fetch_operators(program.Block(0), *fetch_targets, fetch_holder_name); ProgramDesc* copy_program = const_cast(&program); if (!has_feed_ops || !has_fetch_ops) { @@ -250,7 +250,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, feed_holder->SetPersistable(true); int i = 0; - for (auto& feed_target : feed_targets) { + for (auto& feed_target : (*feed_targets)) { std::string var_name = feed_target.first; VLOG(3) << "feed target's name: " << var_name; @@ -273,7 +273,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, fetch_holder->SetPersistable(true); int i = 0; - for (auto& fetch_target : fetch_targets) { + for (auto& fetch_target : (*fetch_targets)) { std::string var_name = fetch_target.first; VLOG(3) << "fetch target's name: " << var_name; @@ -361,16 +361,16 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, void Executor::RunPreparedContext( ExecutorPrepareContext* ctx, Scope* scope, - std::map& feed_targets, - std::map& fetch_targets, bool create_vars, + std::map* feed_targets, + std::map* fetch_targets, bool create_vars, const std::string& feed_holder_name, const std::string& fetch_holder_name) { auto& global_block = ctx->prog_.Block(ctx->block_id_); PADDLE_ENFORCE( - has_feed_operators(global_block, feed_targets, feed_holder_name), + has_feed_operators(global_block, *feed_targets, feed_holder_name), "Program in ExecutorPrepareContext should has feed_ops."); PADDLE_ENFORCE( - has_fetch_operators(global_block, fetch_targets, fetch_holder_name), + has_fetch_operators(global_block, *fetch_targets, fetch_holder_name), "Program in the prepared context should has fetch_ops."); // map the data of feed_targets to feed_holder @@ -378,8 +378,8 @@ void Executor::RunPreparedContext( if (op->Type() == kFeedOpType) { std::string feed_target_name = op->Output("Out")[0]; int idx = boost::get(op->GetAttr("col")); - SetFeedVariable(scope, *feed_targets[feed_target_name], feed_holder_name, - idx); + SetFeedVariable(scope, *(*feed_targets)[feed_target_name], + feed_holder_name, idx); } } @@ -390,7 +390,7 @@ void Executor::RunPreparedContext( if (op->Type() == kFetchOpType) { std::string fetch_target_name = op->Input("X")[0]; int idx = boost::get(op->GetAttr("col")); - *fetch_targets[fetch_target_name] = + *(*fetch_targets)[fetch_target_name] = GetFetchVariable(*scope, fetch_holder_name, idx); } } diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 43defdacf2a1c2f59cf3af2461ae6cfc4c61f5be..4a3d637e2d79f8cbd83412eea2d73e4b497ef1e7 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -55,8 +55,8 @@ class Executor { bool create_local_scope = true, bool create_vars = true); void Run(const ProgramDesc& program, Scope* scope, - std::map& feed_targets, - std::map& fetch_targets, + std::map* feed_targets, + std::map* fetch_targets, bool create_vars = true, const std::string& feed_holder_name = "feed", const std::string& fetch_holder_name = "fetch"); @@ -74,8 +74,8 @@ class Executor { bool create_vars = true); void RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, - std::map& feed_targets, - std::map& fetch_targets, + std::map* feed_targets, + std::map* fetch_targets, bool create_vars = true, const std::string& feed_holder_name = "feed", const std::string& fetch_holder_name = "fetch"); diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index b30f276b4b7c61fda1b40273ce6ccfa19738da41..85beae775b96c3b7e08a2795bcd0ec79b24faeb4 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -15,7 +15,6 @@ limitations under the License. */ #include #include #include -#include #include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/operator.h" @@ -31,6 +30,7 @@ std::once_flag p2p_init_flag; void InitGflags(std::vector argv) { std::call_once(gflags_init_flag, [&]() { + argv.insert(argv.begin(), "dummy"); int argc = argv.size(); char **arr = new char *[argv.size()]; std::string line; @@ -44,20 +44,23 @@ void InitGflags(std::vector argv) { }); } -void InitP2P(int count) { +void InitP2P(std::vector devices) { #ifdef PADDLE_WITH_CUDA std::call_once(p2p_init_flag, [&]() { + int count = devices.size(); for (int i = 0; i < count; ++i) { for (int j = 0; j < count; ++j) { - if (i == j) continue; + if (devices[i] == devices[j]) continue; int can_acess = -1; - PADDLE_ENFORCE(cudaDeviceCanAccessPeer(&can_acess, i, j), - "Failed to test P2P access."); + PADDLE_ENFORCE( + cudaDeviceCanAccessPeer(&can_acess, devices[i], devices[j]), + "Failed to test P2P access."); if (can_acess != 1) { - LOG(WARNING) << "Cannot enable P2P access from " << i << " to " << j; + LOG(WARNING) << "Cannot enable P2P access from " << devices[i] + << " to " << devices[j]; } else { - cudaSetDevice(i); - cudaDeviceEnablePeerAccess(j, 0); + cudaSetDevice(devices[i]); + cudaDeviceEnablePeerAccess(devices[j], 0); } } } @@ -67,11 +70,26 @@ void InitP2P(int count) { void InitDevices(bool init_p2p) { /*Init all available devices by default */ + std::vector devices; +#ifdef PADDLE_WITH_CUDA + try { + int count = platform::GetCUDADeviceCount(); + for (int i = 0; i < count; ++i) { + devices.push_back(i); + } + } catch (const std::exception &exp) { + LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime."; + } +#else + LOG(WARNING) + << "'CUDA' is not supported, Please re-compile with WITH_GPU option"; +#endif + InitDevices(init_p2p, devices); +} +void InitDevices(bool init_p2p, const std::vector devices) { std::vector places; - places.emplace_back(platform::CPUPlace()); int count = 0; - #ifdef PADDLE_WITH_CUDA try { count = platform::GetCUDADeviceCount(); @@ -83,12 +101,17 @@ void InitDevices(bool init_p2p) { << "'CUDA' is not supported, Please re-compile with WITH_GPU option"; #endif - for (int i = 0; i < count; ++i) { - places.emplace_back(platform::CUDAPlace(i)); + for (size_t i = 0; i < devices.size(); ++i) { + if (devices[i] >= count || devices[i] < 0) { + LOG(WARNING) << "Invalid devices id."; + continue; + } + places.emplace_back(platform::CUDAPlace(devices[i])); } if (init_p2p) { - InitP2P(count); + InitP2P(devices); } + places.emplace_back(platform::CPUPlace()); platform::DeviceContextPool::Init(places); } diff --git a/paddle/fluid/framework/init.h b/paddle/fluid/framework/init.h index 1155ca36049dc66e7ee40e8eca87285d7a728299..0e30594672927253cc8083dcb88bb867d63ec729 100644 --- a/paddle/fluid/framework/init.h +++ b/paddle/fluid/framework/init.h @@ -28,5 +28,7 @@ void InitGLOG(const std::string &prog_name); void InitDevices(bool init_p2p); +void InitDevices(bool init_p2p, const std::vector devices); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 46c834b38b758a2e050d990a464600154cbe51e5..076c45713015797f86a3611dd333132bae40044d 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -205,8 +205,8 @@ void OpDesc::SetAttr(const std::string &name, const Attribute &v) { need_update_ = true; } -void OpDesc::SetBlockAttr(const std::string &name, BlockDesc &block) { - this->attrs_[name] = █ +void OpDesc::SetBlockAttr(const std::string &name, BlockDesc *block) { + this->attrs_[name] = block; need_update_ = true; } diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index cd6777e60a8e354ac634ba1c1fe5db63539f6e93..3ee36a47c156da67a9ff70852665fbbd464bea17 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include #include "paddle/fluid/framework/attribute.h" @@ -73,7 +74,7 @@ class OpDesc { void SetAttr(const std::string &name, const Attribute &v); - void SetBlockAttr(const std::string &name, BlockDesc &block); + void SetBlockAttr(const std::string &name, BlockDesc *block); Attribute GetAttr(const std::string &name) const; diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index f97bd0827428feeb590fcad16c48f3461517a646..32576423a62a1a12f085d565e7ff267145bf979c 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -171,17 +171,6 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { return ss.str(); } -void OperatorBase::Rename(const std::string& old_name, - const std::string& new_name) { - for (auto& input : inputs_) { - std::replace(input.second.begin(), input.second.end(), old_name, new_name); - } - for (auto& output : outputs_) { - std::replace(output.second.begin(), output.second.end(), old_name, - new_name); - } -} - OperatorBase::OperatorBase(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, @@ -327,7 +316,6 @@ bool OpSupportGPU(const std::string& op_type) { auto it = all_kernels.find(op_type); if (it == all_kernels.end()) { // All control operator must support GPU - return true; } for (auto& kern_pair : it->second) { @@ -554,7 +542,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, std::shared_ptr out(new Tensor); DataTransform(expected_kernel_key, kernel_type_for_var, *tensor_in, out.get()); - CopyVariableWithTensor(*var, *(out.get()), *trans_var); + CopyVariableWithTensor(*var, *(out.get()), trans_var); } } } diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index b7a7c69b4c8493f945926c75797c49d327a3197e..826cc57b725ab4b52e5d67ab82e939cbd62a8460 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -79,31 +79,28 @@ class OperatorBase { virtual ~OperatorBase() {} - template - inline const T& Attr(const std::string& name) const { - PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap", - name); - return boost::get(attrs_.at(name)); - } - - /// if scope is not null, also show dimensions of arguments - virtual std::string DebugStringEx(const Scope* scope) const; - - std::string DebugString() const { return DebugStringEx(nullptr); } - - /// Net will call this interface function to Run an op. + /// Executor will call this interface function to Run an op. // The implementation should be written at RunImpl void Run(const Scope& scope, const platform::Place& place); // FIXME(typhoonzero): this is only used for recv_op to stop event_loop. virtual void Stop() {} - virtual bool IsNetOp() const { return false; } + /// if scope is not null, also show dimensions of arguments + virtual std::string DebugStringEx(const Scope* scope) const; + std::string DebugString() const { return DebugStringEx(nullptr); } virtual bool SupportGPU() const { return false; } - /// rename inputs outputs name - void Rename(const std::string& old_name, const std::string& new_name); + const std::string& Type() const { return type_; } + + template + inline const T& Attr(const std::string& name) const { + PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap", + name); + return boost::get(attrs_.at(name)); + } + const AttributeMap& Attrs() const { return attrs_; } const VariableNameMap& Inputs() const { return inputs_; } const VariableNameMap& Outputs() const { return outputs_; } @@ -112,7 +109,7 @@ class OperatorBase { std::string Input(const std::string& name) const; //! Get a input which has multiple variables. const std::vector& Inputs(const std::string& name) const; - + //! Get all inputs variable names std::vector InputVars() const; //! Get a output with argument's name described in `op_proto` @@ -120,13 +117,9 @@ class OperatorBase { //! Get an output which has multiple variables. //! TODO add a vector_view to prevent memory copy. const std::vector& Outputs(const std::string& name) const; - + //! Get all outputs variable names virtual std::vector OutputVars(bool has_intermediate) const; - const std::string& Type() const { return type_; } - void SetType(const std::string& type) { type_ = type; } - const AttributeMap& Attrs() const { return attrs_; } - // Return a new operator instance, which is as same as this. // Use unique_ptr to prevent caller forget to delete this pointer. virtual std::unique_ptr Clone() const = 0; @@ -278,20 +271,6 @@ class ExecutionContext { return res; } - void ShareLoD(const std::string& in, const std::string& out, size_t i = 0, - size_t j = 0) const { - PADDLE_ENFORCE_LT(i, InputSize(in)); - PADDLE_ENFORCE_LT(j, OutputSize(out)); - auto* in_var = MultiInputVar(in)[i]; - auto* out_var = MultiOutputVar(out)[j]; - if (!in_var->IsType()) return; - PADDLE_ENFORCE(out_var->IsType(), - "The %d-th output of Output(%s) must be LoDTensor.", j, out); - auto in_tensor = in_var->Get(); - auto* out_tensor = out_var->GetMutable(); - out_tensor->set_lod(in_tensor.lod()); - } - platform::Place GetPlace() const { return device_context_.GetPlace(); } template diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index a673fa52880f3f14cdf11a39d2272880a97be19c..de644e851999920251c762a75c050e8182e950c6 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -74,7 +74,7 @@ ParallelExecutor::ParallelExecutor( member_->own_local_scope = false; PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size()); for (size_t i = 0; i < member_->places_.size(); ++i) { - member_->local_scopes_.emplace_back(local_scopes[i]); + member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope()); } } diff --git a/paddle/fluid/framework/program_desc.cc b/paddle/fluid/framework/program_desc.cc index 16694bcf76486a9603c41dc19a58dd0a7cb2b719..64fb028f83a539d17885186d5d8ee6ef26f095e9 100644 --- a/paddle/fluid/framework/program_desc.cc +++ b/paddle/fluid/framework/program_desc.cc @@ -56,7 +56,7 @@ ProgramDesc::ProgramDesc(const ProgramDesc &o) { for (const auto &attr : op->Proto()->attrs()) { if (attr.type() == proto::AttrType::BLOCK) { size_t blk_idx = attr.block_idx(); - op->SetBlockAttr(attr.name(), *this->MutableBlock(blk_idx)); + op->SetBlockAttr(attr.name(), this->MutableBlock(blk_idx)); } } } @@ -73,7 +73,7 @@ ProgramDesc::ProgramDesc(const proto::ProgramDesc &desc) { for (const auto &attr : op->Proto()->attrs()) { if (attr.type() == proto::AttrType::BLOCK) { size_t blk_idx = attr.block_idx(); - op->SetBlockAttr(attr.name(), *this->MutableBlock(blk_idx)); + op->SetBlockAttr(attr.name(), this->MutableBlock(blk_idx)); } } } diff --git a/paddle/fluid/framework/prune.cc b/paddle/fluid/framework/prune.cc index 107c5bf8ecbc3b46dd5fae87c73d0be4f74d1587..57c1b822d8d4f095f33cba2bfd5210f7ee19dd9f 100644 --- a/paddle/fluid/framework/prune.cc +++ b/paddle/fluid/framework/prune.cc @@ -14,19 +14,19 @@ limitations under the License. */ #include "paddle/fluid/framework/prune.h" +#include + #include #include #include #include #include -#include - namespace paddle { namespace framework { -const std::string kFeedOpType = "feed"; -const std::string kFetchOpType = "fetch"; +const char kFeedOpType[] = "feed"; +const char kFetchOpType[] = "fetch"; bool HasDependentVar(const proto::OpDesc& op_desc, const std::set& dependent_vars) { @@ -68,7 +68,7 @@ bool HasSubBlock(const proto::OpDesc& op_desc) { // the child block to help pruning void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, int block_id, int parent_block_id, - std::set& dependent_vars) { + std::set* dependent_vars) { auto& block = input.blocks(block_id); auto& ops = block.ops(); @@ -90,11 +90,11 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, std::vector should_run; for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) { auto& op_desc = *op_iter; - if (IsTarget(op_desc) || HasDependentVar(op_desc, dependent_vars)) { + if (IsTarget(op_desc) || HasDependentVar(op_desc, *dependent_vars)) { // insert its input to the dependency graph for (auto& var : op_desc.inputs()) { for (auto& argu : var.arguments()) { - dependent_vars.insert(argu); + dependent_vars->insert(argu); } } should_run.push_back(true); @@ -138,7 +138,7 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, // GetSubBlockIndex(*op) is the idx of the sub_block in the input desc // output_block_id is the idx of the current block in the output desc prune_impl(input, output, GetSubBlockIndex(*op), output_block_id, - sub_block_dependent_vars); + &sub_block_dependent_vars); } } } @@ -181,7 +181,7 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, void Prune(const proto::ProgramDesc& input, proto::ProgramDesc* output) { std::set dependent_vars; output->clear_blocks(); - prune_impl(input, output, 0, -1, dependent_vars); + prune_impl(input, output, 0, -1, &dependent_vars); } void inference_optimize_impl(proto::ProgramDesc* input, int block_id) { diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index d2e60ab1dd16758a91d22ef6872edc5053ef88b3..e5bc74755f46449296a153e8b330968e6d9f1e1d 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -20,7 +20,7 @@ namespace paddle { namespace framework { void TensorCopy(const Tensor& src, const platform::Place& dst_place, - const platform::DeviceContext& ctx, Tensor* dst, bool sync) { + const platform::DeviceContext& ctx, Tensor* dst) { VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to " << dst_place; src.check_memory_size(); @@ -48,9 +48,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto ctx_gpu_place = boost::get(ctx_place); PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); auto stream = - sync ? nullptr - : reinterpret_cast(ctx) - .stream(); + reinterpret_cast(ctx).stream(); memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); } else if (platform::is_cpu_place(src_place) && platform::is_gpu_place(dst_place)) { @@ -61,9 +59,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto ctx_gpu_place = boost::get(ctx_place); PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place); auto stream = - sync ? nullptr - : reinterpret_cast(ctx) - .stream(); + reinterpret_cast(ctx).stream(); memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream); } else if (platform::is_gpu_place(src_place) && platform::is_gpu_place(dst_place)) { @@ -72,9 +68,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto ctx_place = ctx.GetPlace(); PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); auto stream = - sync ? nullptr - : reinterpret_cast(ctx) - .stream(); + reinterpret_cast(ctx).stream(); memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); } #endif @@ -92,6 +86,41 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, TensorCopy(src, dst_place, *dev_ctx, dst); } +void TensorCopySync(const Tensor& src, const platform::Place& dst_place, + Tensor* dst) { + VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place() + << " to " << dst_place; + src.check_memory_size(); + dst->Resize(src.dims()); + dst->set_layout(src.layout()); + auto src_place = src.place(); + auto src_ptr = src.data(); + auto dst_ptr = dst->mutable_data(dst_place, src.type()); + auto size = src.numel() * SizeOfType(src.type()); + if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { + memory::Copy(boost::get(dst_place), dst_ptr, + boost::get(src_place), src_ptr, size); + } +#ifdef PADDLE_WITH_CUDA + else if (platform::is_gpu_place(src_place) && // NOLINT + platform::is_cpu_place(dst_place)) { + auto src_gpu_place = boost::get(src_place); + auto dst_cpu_place = boost::get(dst_place); + memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr); + } else if (platform::is_cpu_place(src_place) && + platform::is_gpu_place(dst_place)) { + auto src_cpu_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); + memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, nullptr); + } else if (platform::is_gpu_place(src_place) && + platform::is_gpu_place(dst_place)) { + auto src_gpu_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); + memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr); + } +#endif +} + template struct AnyDTypeVisitor { Predicate predicate_; diff --git a/paddle/fluid/framework/tensor_util.h b/paddle/fluid/framework/tensor_util.h index 3af68402dc56230171e858bf8f8f8c89c2bfe760..dca279b69382b80e055f661cefe84b81326704b5 100644 --- a/paddle/fluid/framework/tensor_util.h +++ b/paddle/fluid/framework/tensor_util.h @@ -24,10 +24,11 @@ namespace paddle { namespace framework { void TensorCopy(const Tensor& src, const platform::Place& dst_place, - const platform::DeviceContext& ctx, Tensor* dst, - bool sync = false); + const platform::DeviceContext& ctx, Tensor* dst); void TensorCopy(const Tensor& src, const platform::Place& dst_place, Tensor* dst); +void TensorCopySync(const Tensor& src, const platform::Place& dst_place, + Tensor* dst); template void TensorFromVector(const std::vector& src, diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index cc45bfe9b17d767be039cc0d8d83234b6994d6c1..50f635a41a99b2ae292d13afde5637a3bf4e6f8c 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -21,7 +21,8 @@ endif() if(WITH_TESTING) add_subdirectory(tests/book) - if (TENSORRT_FOUND) - add_subdirectory(tensorrt) - endif() +endif() + +if (TENSORRT_FOUND) + add_subdirectory(tensorrt) endif() diff --git a/paddle/fluid/inference/engine.h b/paddle/fluid/inference/engine.h new file mode 100644 index 0000000000000000000000000000000000000000..6b0ac92fa908427a89a6a5fa74dacc3b24abd1c3 --- /dev/null +++ b/paddle/fluid/inference/engine.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/framework.pb.h" + +namespace paddle { +namespace inference { + +/* + * EngineBase is the base class of all inference engines. An inference engine + * takes a paddle program as input, and outputs the result in fluid Tensor + * format. It can be used to optimize performance of computation sub-blocks, for + * example, break down the original block into sub-blocks and execute each + * sub-blocks in different engines. + * + * For example: + * When inference, the resnet50 model can put most of the model into subgraph + * and run it on a TensorRT engine. + * + * There are several engines such as TensorRT and other frameworks, so an + * EngineBase is put forward to give an unified interface for all the + * different engine implemention. + */ +class EngineBase { + public: + using DescType = ::paddle::framework::proto::BlockDesc; + + // Build the model and do some preparation, for example, in TensorRT, run + // createInferBuilder, buildCudaEngine. + virtual void Build(const DescType& paddle_model) = 0; + + // Execute the engine, that will run the inference network. + virtual void Execute(int batch_size) = 0; + + virtual ~EngineBase() {} +}; // class EngineBase + +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 78d2f16746cf478c4424df929bd1f62b08f8a67c..65db7c7b5008dcb301e741ec17c3623715e10bb8 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -16,17 +16,29 @@ limitations under the License. */ #include #include +#include #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/pybind/pybind.h" +DEFINE_string(devices, "", "The devices to be used which is joined by comma."); +DEFINE_bool(init_p2p, false, "Whether to init p2p."); + namespace paddle { namespace inference { -// Temporarily add this function for exposing framework::InitDevices() when -// linking the inference shared library. -void Init(bool init_p2p) { framework::InitDevices(init_p2p); } +void Init(const std::vector argv) { + framework::InitGflags(argv); + // init devices + std::vector devices; + std::string token; + std::istringstream tokenStream(FLAGS_devices); + while (std::getline(tokenStream, token, ',')) { + devices.push_back(std::stoi(token)); + } + framework::InitDevices(FLAGS_init_p2p, devices); +} void ReadBinaryFile(const std::string& filename, std::string* contents) { std::ifstream fin(filename, std::ios::in | std::ios::binary); diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index ba3e45099ae7c1626bf11d9527d4fa4c7f772fec..caf599b1a68783f155cd134c2a29e9ffa49a0895 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -25,7 +25,7 @@ limitations under the License. */ namespace paddle { namespace inference { -void Init(bool init_p2p); +void Init(const std::vector argv); void LoadPersistables(framework::Executor* executor, framework::Scope* scope, const framework::ProgramDesc& main_program, diff --git a/paddle/fluid/inference/tensorrt/CMakeLists.txt b/paddle/fluid/inference/tensorrt/CMakeLists.txt index e39c0daac76e0993382868289f66351da3d16f8f..4b5866ad5dd5c2e33830b61e575ab92954cecb39 100644 --- a/paddle/fluid/inference/tensorrt/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/CMakeLists.txt @@ -1 +1,4 @@ -nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) +if(WITH_TESTING) + nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) + nv_test(test_tensorrt_engine SRCS test_engine.cc engine.cc DEPS dynload_cuda) +endif() diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc new file mode 100644 index 0000000000000000000000000000000000000000..03a25f8e8b52b452cb621aeddf3563cc21a033df --- /dev/null +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -0,0 +1,135 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/tensorrt/engine.h" + +#include +#include +#include +#include +#include "paddle/fluid/inference/tensorrt/helper.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +void TensorRTEngine::Build(const DescType& paddle_model) { + PADDLE_ENFORCE(false, "not implemented"); +} + +void TensorRTEngine::Execute(int batch_size) { + infer_context_->enqueue(batch_size, buffers_.data(), *stream_, nullptr); + cudaStreamSynchronize(*stream_); +} + +TensorRTEngine::~TensorRTEngine() { + // clean buffer + for (auto& buffer : buffers_) { + if (buffer != nullptr) { + PADDLE_ENFORCE_EQ(0, cudaFree(buffer)); + buffer = nullptr; + } + } +} + +void TensorRTEngine::FreezeNetwork() { + PADDLE_ENFORCE(infer_builder_ != nullptr, + "Call InitNetwork first to initialize network."); + PADDLE_ENFORCE(infer_network_ != nullptr, + "Call InitNetwork first to initialize network."); + // build engine. + infer_builder_->setMaxBatchSize(max_batch_); + infer_builder_->setMaxWorkspaceSize(max_workspace_); + + infer_engine_.reset(infer_builder_->buildCudaEngine(*infer_network_)); + PADDLE_ENFORCE(infer_engine_ != nullptr, "build cuda engine failed!"); + + infer_context_.reset(infer_engine_->createExecutionContext()); + + // allocate GPU buffers. + buffers_.resize(buffer_sizes_.size(), nullptr); + for (auto& item : buffer_sizes_) { + if (item.second == 0) { + auto slot_offset = infer_engine_->getBindingIndex(item.first.c_str()); + item.second = kDataTypeSize[static_cast( + infer_engine_->getBindingDataType(slot_offset))] * + AccumDims(infer_engine_->getBindingDimensions(slot_offset)); + } + PADDLE_ENFORCE_EQ(0, cudaMalloc(&buffer(item.first), item.second)); + } +} + +nvinfer1::ITensor* TensorRTEngine::DeclareInput(const std::string& name, + nvinfer1::DataType dtype, + const nvinfer1::Dims& dim) { + PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate input name %s", + name); + + PADDLE_ENFORCE(infer_network_ != nullptr, "should initnetwork first"); + auto* input = infer_network_->addInput(name.c_str(), dtype, dim); + PADDLE_ENFORCE(input, "infer network add input %s failed", name); + + buffer_sizes_[name] = kDataTypeSize[static_cast(dtype)] * AccumDims(dim); + return input; +} + +void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer* layer, int offset, + const std::string& name) { + PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate output name %s", + name); + + auto* output = layer->getOutput(offset); + PADDLE_ENFORCE(output != nullptr); + output->setName(name.c_str()); + infer_network_->markOutput(*output); + // output buffers' size can only be decided latter, set zero here to mark this + // and will reset latter. + buffer_sizes_[name] = 0; +} + +void* TensorRTEngine::GetOutputInGPU(const std::string& name) { + return buffer(name); +} + +void TensorRTEngine::GetOutputInCPU(const std::string& name, void* dst, + size_t max_size) { + // determine data size + auto it = buffer_sizes_.find(name); + PADDLE_ENFORCE(it != buffer_sizes_.end()); + PADDLE_ENFORCE_GT(it->second, 0); + PADDLE_ENFORCE_GE(max_size, it->second); + + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(dst, buffer(name), it->second, + cudaMemcpyDeviceToHost, *stream_)); +} + +void*& TensorRTEngine::buffer(const std::string& name) { + PADDLE_ENFORCE(infer_engine_ != nullptr, "call FreezeNetwork first."); + auto it = buffer_sizes_.find(name); + PADDLE_ENFORCE(it != buffer_sizes_.end()); + auto slot_offset = infer_engine_->getBindingIndex(name.c_str()); + return buffers_[slot_offset]; +} + +void TensorRTEngine::SetInputFromCPU(const std::string& name, void* data, + size_t size) { + void* buf = buffer(name); + PADDLE_ENFORCE_EQ( + 0, cudaMemcpyAsync(buf, data, size, cudaMemcpyHostToDevice, *stream_)); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h new file mode 100644 index 0000000000000000000000000000000000000000..82d8c3df4ece7e56a72b650f8ea58f3953af3b64 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -0,0 +1,146 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include +#include +#include "paddle/fluid/inference/engine.h" +#include "paddle/fluid/inference/tensorrt/helper.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +/* + * TensorRT Engine. + * + * There are two alternative ways to use it, one is to build from a paddle + * protobuf model, another way is to manully construct the network. + */ +class TensorRTEngine : public EngineBase { + public: + // Weight is model parameter. + class Weight { + public: + Weight(nvinfer1::DataType dtype, void* value, int num_elem) { + w_.type = dtype; + w_.values = value; + w_.count = num_elem; + } + const nvinfer1::Weights& get() { return w_; } + + private: + nvinfer1::Weights w_; + }; + + TensorRTEngine(int max_batch, int max_workspace, cudaStream_t* stream, + nvinfer1::ILogger& logger = NaiveLogger::Global()) + : max_batch_(max_batch), + max_workspace_(max_workspace), + stream_(stream), + logger_(logger) {} + + virtual ~TensorRTEngine(); + + // TODO(Superjomn) implement it later when graph segmentation is supported. + void Build(const DescType& paddle_model) override; + + void Execute(int batch_size) override; + + // Initialize the inference network, so that TensorRT layers can add to this + // network. + void InitNetwork() { + infer_builder_.reset(createInferBuilder(logger_)); + infer_network_.reset(infer_builder_->createNetwork()); + } + // After finishing adding ops, freeze this network and creates the executation + // environment. + void FreezeNetwork(); + + // Add an input and set its name, data type and dimention. + nvinfer1::ITensor* DeclareInput(const std::string& name, + nvinfer1::DataType dtype, + const nvinfer1::Dims& dim); + // Set the offset-th output from a layer as the network's output, and set its + // name. + void DeclareOutput(const nvinfer1::ILayer* layer, int offset, + const std::string& name); + + // GPU memory address for an ITensor with specific name. One can operate on + // these memory directly for acceleration, for example, output the converted + // data directly to the buffer to save data copy overhead. + // NOTE this should be used after calling `FreezeNetwork`. + void*& buffer(const std::string& name); + + // Fill an input from CPU memory with name and size. + void SetInputFromCPU(const std::string& name, void* data, size_t size); + // TODO(Superjomn) is this method necessary given that buffer(xxx) can be + // accessed directly. Fill an input from GPU memory with name and size. + void SetInputFromGPU(const std::string& name, void* data, size_t size); + // Get an output called name, the output of tensorrt is in GPU, so this method + // will just return the output's GPU memory address. + void* GetOutputInGPU(const std::string& name); + // LOW EFFICENCY! Get output to CPU, this will trigger a memory copy from GPU + // to CPU. + void GetOutputInCPU(const std::string& name, void* dst, size_t max_size); + + nvinfer1::ICudaEngine* engine() { return infer_engine_.get(); } + nvinfer1::INetworkDefinition* network() { return infer_network_.get(); } + + private: + // the max batch size + int max_batch_; + // the max memory size the engine uses + int max_workspace_; + cudaStream_t* stream_; + nvinfer1::ILogger& logger_; + + std::vector buffers_; + // max data size for the buffers. + std::unordered_map buffer_sizes_; + + // TensorRT related internal members + template + struct Destroyer { + void operator()(T* x) { x->destroy(); } + }; + template + using infer_ptr = std::unique_ptr>; + infer_ptr infer_builder_; + infer_ptr infer_network_; + infer_ptr infer_engine_; + infer_ptr infer_context_; +}; // class TensorRTEngine + +// Add an layer__ into engine__ with args ARGS. +// For example: +// TRT_ENGINE_ADD_LAYER(xxx, FullyConnected, input, dim, weights, bias) +// +// Reference +// https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#charRNN_define_network +// +// will add a fully connected layer into the engine. +// TensorRT has too many layers, so that is not wise to add member functions for +// them, and an macro like this is more extensible when underlying TensorRT +// library add new layer supports. +#define TRT_ENGINE_ADD_LAYER(engine__, layer__, ARGS...) \ + engine__->network()->add##layer__(ARGS); + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/helper.h b/paddle/fluid/inference/tensorrt/helper.h new file mode 100644 index 0000000000000000000000000000000000000000..796283d325ceb84c733eff5c119b808300bca069 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/helper.h @@ -0,0 +1,88 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include +#include +#include +#include "paddle/fluid/platform/dynload/tensorrt.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +namespace dy = paddle::platform::dynload; + +static size_t AccumDims(nvinfer1::Dims dims) { + size_t num = dims.nbDims == 0 ? 0 : 1; + for (int i = 0; i < dims.nbDims; i++) { + PADDLE_ENFORCE_GT(dims.d[i], 0); + num *= dims.d[i]; + } + return num; +} + +// TensorRT data type to size +const int kDataTypeSize[] = { + 4, // kFLOAT + 2, // kHALF + 1, // kINT8 + 4 // kINT32 +}; + +// The following two API are implemented in TensorRT's header file, cannot load +// from the dynamic library. So create our own implementation and directly +// trigger the method from the dynamic library. +static nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger& logger) { + return static_cast( + dy::createInferBuilder_INTERNAL(&logger, NV_TENSORRT_VERSION)); +} +static nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger& logger) { + return static_cast( + dy::createInferRuntime_INTERNAL(&logger, NV_TENSORRT_VERSION)); +} + +// A logger for create TensorRT infer builder. +class NaiveLogger : public nvinfer1::ILogger { + public: + void log(nvinfer1::ILogger::Severity severity, const char* msg) override { + switch (severity) { + case Severity::kINFO: + LOG(INFO) << msg; + break; + case Severity::kWARNING: + LOG(WARNING) << msg; + break; + case Severity::kINTERNAL_ERROR: + case Severity::kERROR: + LOG(ERROR) << msg; + break; + default: + break; + } + } + + static nvinfer1::ILogger& Global() { + static nvinfer1::ILogger* x = new NaiveLogger; + return *x; + } + + virtual ~NaiveLogger() override {} +}; + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/test_engine.cc b/paddle/fluid/inference/tensorrt/test_engine.cc new file mode 100644 index 0000000000000000000000000000000000000000..c6e1c71cdc8eea48ab6197057d1d389f03c9cf54 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/test_engine.cc @@ -0,0 +1,83 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include + +#include "paddle/fluid/inference/tensorrt/engine.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +class TensorRTEngineTest : public ::testing::Test { + protected: + void SetUp() override { + ASSERT_EQ(0, cudaStreamCreate(&stream_)); + engine_ = new TensorRTEngine(1, 1 << 10, &stream_); + engine_->InitNetwork(); + } + + void TearDown() override { + delete engine_; + cudaStreamDestroy(stream_); + } + + protected: + TensorRTEngine* engine_; + cudaStream_t stream_; +}; + +TEST_F(TensorRTEngineTest, add_layer) { + const int size = 1; + + float raw_weight[size] = {2.}; // Weight in CPU memory. + float raw_bias[size] = {3.}; + + LOG(INFO) << "create weights"; + TensorRTEngine::Weight weight(nvinfer1::DataType::kFLOAT, raw_weight, size); + TensorRTEngine::Weight bias(nvinfer1::DataType::kFLOAT, raw_bias, size); + auto* x = engine_->DeclareInput("x", nvinfer1::DataType::kFLOAT, + nvinfer1::DimsCHW{1, 1, 1}); + auto* fc_layer = TRT_ENGINE_ADD_LAYER(engine_, FullyConnected, *x, size, + weight.get(), bias.get()); + PADDLE_ENFORCE(fc_layer != nullptr); + + engine_->DeclareOutput(fc_layer, 0, "y"); + LOG(INFO) << "freeze network"; + engine_->FreezeNetwork(); + ASSERT_EQ(engine_->engine()->getNbBindings(), 2); + + // fill in real data + float x_v = 1234; + engine_->SetInputFromCPU("x", reinterpret_cast(&x_v), + 1 * sizeof(float)); + LOG(INFO) << "to execute"; + engine_->Execute(1); + + LOG(INFO) << "to get output"; + // void* y_v = + float y_cpu; + engine_->GetOutputInCPU("y", &y_cpu, sizeof(float)); + + LOG(INFO) << "to checkout output"; + ASSERT_EQ(y_cpu, x_v * 2 + 3); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/test_tensorrt.cc b/paddle/fluid/inference/tensorrt/test_tensorrt.cc index a81a708e7a79225fd52c4b8e081afdcd8fe7e9ad..aed5b5e1a22cbed1256d4f28d0a8a4c29c6cc744 100644 --- a/paddle/fluid/inference/tensorrt/test_tensorrt.cc +++ b/paddle/fluid/inference/tensorrt/test_tensorrt.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index 1e6555bb02033a28dedd2a1d1962981dfcc97cc2..1a685b9e2ebcd7d4b5a057b506bccf6adcd9952c 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -62,5 +62,21 @@ TEST(inference, image_classification) { LOG(INFO) << output2.dims(); CheckError(output1, output2); + + // float16 inference requires cuda GPUs with >= 5.3 compute capability + if (paddle::platform::GetCUDAComputeCapability(0) >= 53) { + paddle::framework::LoDTensor output3; + std::vector cpu_fetchs3; + cpu_fetchs3.push_back(&output3); + + LOG(INFO) << "--- GPU Runs in float16 mode: ---"; + std::string fp16_dirname = dirname; + fp16_dirname.replace(fp16_dirname.find("book/"), + std::string("book/").size(), "book/float16_"); + TestInference( + fp16_dirname, cpu_feeds, cpu_fetchs3, FLAGS_repeat); + + CheckError(output2, output3); + } #endif } diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 117472599f7c4874ab05e29c6ecb46fd61d0db9c..af2a7a5620487a10c1df6152fc4e4bf67b150752 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -178,10 +178,10 @@ void TestInference(const std::string& dirname, std::unique_ptr ctx; if (PrepareContext) { ctx = executor.Prepare(*inference_program, 0); - executor.RunPreparedContext(ctx.get(), scope, feed_targets, fetch_targets, - CreateVars); + executor.RunPreparedContext(ctx.get(), scope, &feed_targets, + &fetch_targets, CreateVars); } else { - executor.Run(*inference_program, scope, feed_targets, fetch_targets, + executor.Run(*inference_program, scope, &feed_targets, &fetch_targets, CreateVars); } @@ -197,10 +197,10 @@ void TestInference(const std::string& dirname, if (PrepareContext) { // Note: if you change the inference_program, you need to call // executor.Prepare() again to get a new ExecutorPrepareContext. - executor.RunPreparedContext(ctx.get(), scope, feed_targets, - fetch_targets, CreateVars); + executor.RunPreparedContext(ctx.get(), scope, &feed_targets, + &fetch_targets, CreateVars); } else { - executor.Run(*inference_program, scope, feed_targets, fetch_targets, + executor.Run(*inference_program, scope, &feed_targets, &fetch_targets, CreateVars); } } diff --git a/paddle/fluid/operators/adam_op.h b/paddle/fluid/operators/adam_op.h index b332b6716369ca355454dc57fbd6cc2cbc71c658..f82ff47b52490c354f383515d430d14e24cbf6af 100644 --- a/paddle/fluid/operators/adam_op.h +++ b/paddle/fluid/operators/adam_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include // for sqrt in CPU and CUDA +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" @@ -24,8 +25,14 @@ namespace operators { namespace scatter = paddle::operators::math::scatter; +struct GPUAdam; +struct CPUAdam; + +template +struct AdamFunctor; + template -struct AdamFunctor { +struct AdamFunctor { T beta1_; T beta2_; T epsilon_; @@ -71,6 +78,7 @@ struct AdamFunctor { // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); @@ -82,6 +90,71 @@ struct AdamFunctor { } }; +template +struct AdamFunctor { + T beta1_; + T beta2_; + T epsilon_; + + const T* beta1_pow_; + const T* beta2_pow_; + const T* moment1_; + T* moment1_out_; + const T* moment2_; + T* moment2_out_; + const T* lr_; + const T* grad_; + const T* param_; + T* param_out_; + + AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, + const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, + T* mom2_out, const T* lr, const T* grad, const T* param, + T* param_out) + : beta1_(beta1), + beta2_(beta2), + epsilon_(epsilon), + beta1_pow_(beta1_pow), + beta2_pow_(beta2_pow), + moment1_(mom1), + moment1_out_(mom1_out), + moment2_(mom2), + moment2_out_(mom2_out), + lr_(lr), + grad_(grad), + param_(param), + param_out_(param_out) {} + + void operator()(size_t numel) const { + Eigen::Map> g{ + grad_, static_cast(numel)}; + Eigen::Map> mom1{ + moment1_, static_cast(numel)}; + Eigen::Map> mom2{ + moment2_, static_cast(numel)}; + Eigen::Map> param{ + param_, static_cast(numel)}; + + Eigen::Map> param_out{ + param_out_, static_cast(numel)}; + Eigen::Map> moment1_out{ + moment1_out_, static_cast(numel)}; + Eigen::Map> moment2_out{ + moment2_out_, static_cast(numel)}; + + T lr = *lr_; + T beta1_pow = *beta1_pow_; + T beta2_pow = *beta2_pow_; + + // Calculation + lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + + moment1_out = beta1_ * mom1 + (1 - beta1_) * g; + moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g; + param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_)); + } +}; + template struct SparseAdamFunctor { T beta1_; @@ -134,6 +207,7 @@ struct SparseAdamFunctor { T p = param_[rows_[i] * row_numel_ + j]; lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); @@ -177,19 +251,34 @@ class AdamOpKernel : public framework::OpKernel { if (grad_var->IsType()) { auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); - AdamFunctor functor( - beta1, beta2, epsilon, beta1_pow.template data(), - beta2_pow.template data(), mom1.template data(), - mom1_out.template mutable_data(ctx.GetPlace()), - mom2.template data(), - mom2_out.template mutable_data(ctx.GetPlace()), - lr.template data(), grad.template data(), - param.template data(), - param_out.template mutable_data(ctx.GetPlace())); - platform::ForRange for_range( - static_cast(ctx.device_context()), - param.numel()); - for_range(functor); + + if (platform::is_cpu_place(ctx.GetPlace())) { + AdamFunctor functor( + beta1, beta2, epsilon, beta1_pow.template data(), + beta2_pow.template data(), mom1.template data(), + mom1_out.template mutable_data(ctx.GetPlace()), + mom2.template data(), + mom2_out.template mutable_data(ctx.GetPlace()), + lr.template data(), grad.template data(), + param.template data(), + param_out.template mutable_data(ctx.GetPlace())); + functor(param.numel()); + } else if (platform::is_gpu_place(ctx.GetPlace())) { + AdamFunctor functor( + beta1, beta2, epsilon, beta1_pow.template data(), + beta2_pow.template data(), mom1.template data(), + mom1_out.template mutable_data(ctx.GetPlace()), + mom2.template data(), + mom2_out.template mutable_data(ctx.GetPlace()), + lr.template data(), grad.template data(), + param.template data(), + param_out.template mutable_data(ctx.GetPlace())); + + platform::ForRange for_range( + static_cast(ctx.device_context()), + param.numel()); + for_range(functor); + } } else if (grad_var->IsType()) { auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); diff --git a/paddle/fluid/operators/beam_search_decode_op.h b/paddle/fluid/operators/beam_search_decode_op.h index 4cb0457d9285e20d4b6a2f9987b7fdb1c6ac157f..3c01f81c83555b985bb6b7a9e3330ab594a62863 100644 --- a/paddle/fluid/operators/beam_search_decode_op.h +++ b/paddle/fluid/operators/beam_search_decode_op.h @@ -223,8 +223,9 @@ void BeamSearchDecoder::ConvertSentenceVectorToLodTensor( sentence_vector_list[src_idx].size()); } - auto cpu_place = new paddle::platform::CPUPlace(); - paddle::platform::CPUDeviceContext cpu_ctx(*cpu_place); + auto cpu_place = std::unique_ptr( + new paddle::platform::CPUPlace()); + paddle::platform::CPUDeviceContext cpu_ctx(*cpu_place.get()); framework::LoD lod; lod.push_back(source_level_lod); diff --git a/paddle/fluid/operators/bilinear_interp_op.cc b/paddle/fluid/operators/bilinear_interp_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..69f79bf93be8ac7df9cab43b84cf755f2f3dfeaa --- /dev/null +++ b/paddle/fluid/operators/bilinear_interp_op.cc @@ -0,0 +1,94 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/fluid/operators/bilinear_interp_op.h" +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class BilinearInterpOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of BilinearInterOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of BilinearInterOp should not be null."); + + auto dim_x = ctx->GetInputDim("X"); // NCHW format + int out_h = ctx->Attrs().Get("out_h"); + int out_w = ctx->Attrs().Get("out_w"); + PADDLE_ENFORCE_EQ(dim_x.size(), 4, "X's dimension must be 4"); + + std::vector dim_out({dim_x[0], dim_x[1], out_h, out_w}); + ctx->SetOutputDim("Out", framework::make_ddim(dim_out)); + } +}; + +class BilinearInterpOpMaker : public framework::OpProtoAndCheckerMaker { + public: + BilinearInterpOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "(Tensor) The input tensor of bilinear interpolation, " + "This is a 4-D tensor with shape of (N x C x h x w)"); + AddOutput("Out", + "(Tensor) The dimension of output is (N x C x out_h x out_w]"); + + AddAttr("out_h", "(int) output height of bilinear interpolation op."); + AddAttr("out_w", "(int) output width of bilinear interpolation op."); + AddComment(R"DOC( + Bilinear interpolation is an extension of linear interpolation for + interpolating functions of two variables (e.g. H-direction and + W-direction in this op) on a rectilinear 2D grid. + + The key idea is to perform linear interpolation first in one + direction, and then again in the other direction. + + For details, please refer to Wikipedia: + https://en.wikipedia.org/wiki/Bilinear_interpolation + )DOC"); + } +}; + +class BilinearInterpOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + auto dim_x = ctx->GetInputDim("X"); + if (ctx->HasOutput(framework::GradVarName("X"))) { + ctx->SetOutputDim(framework::GradVarName("X"), dim_x); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(bilinear_interp, ops::BilinearInterpOp, + ops::BilinearInterpOpMaker, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(bilinear_interp_grad, ops::BilinearInterpOpGrad); +REGISTER_OP_CPU_KERNEL(bilinear_interp, ops::BilinearInterpKernel); +REGISTER_OP_CPU_KERNEL(bilinear_interp_grad, + ops::BilinearInterpGradKernel); diff --git a/paddle/fluid/operators/bilinear_interp_op.cu b/paddle/fluid/operators/bilinear_interp_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..82eb9e83bd84e6ec6881facbb2fac0aebce93d55 --- /dev/null +++ b/paddle/fluid/operators/bilinear_interp_op.cu @@ -0,0 +1,186 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/fluid/operators/bilinear_interp_op.h" +#include "paddle/fluid/platform/cuda_helper.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +template +__global__ void KeBilinearInterpFw( + const T* in, const size_t in_img_h, const size_t in_img_w, + const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, + const size_t out_img_w, const size_t output_h, const size_t output_w, + const size_t num_channels, const T ratio_h, const T ratioW) { + int nthreads = output_h * output_w; + int tid = blockIdx.x * blockDim.x + threadIdx.x; + if (tid < nthreads) { + int out_id_h = tid / output_w; + int out_id_w = tid % output_w; + int in_img_size = input_w / num_channels; + int out_img_size = output_w / num_channels; + int channel_id = out_id_w / out_img_size; + + int out_img_idy = (out_id_w % out_img_size) / out_img_w; + int in_img_idy = ratio_h * out_img_idy; + int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; + T h1lambda = ratio_h * out_img_idy - in_img_idy; + T h2lambda = 1.f - h1lambda; + + int out_img_idx = tid % out_img_w; + int in_img_idx = ratioW * out_img_idx; + int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; + T w1lambda = ratioW * out_img_idx - in_img_idx; + T w2lambda = 1.f - w1lambda; + + const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + + in_img_idy * in_img_w + in_img_idx]; + + // bilinear interpolation + out[out_id_h * output_w + out_id_w] = + h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) + + h1lambda * (w2lambda * in_pos[h_id * in_img_w] + + w1lambda * in_pos[h_id * in_img_w + w_id]); + } +} + +template +__global__ void KeBilinearInterpBw( + T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, + const size_t input_w, const T* out, const size_t out_img_h, + const size_t out_img_w, const size_t output_h, const size_t output_w, + const size_t num_channels, const T ratio_h, const T ratioW) { + int nthreads = output_h * output_w; + int tid = blockIdx.x * blockDim.x + threadIdx.x; + if (tid < nthreads) { + int out_id_h = tid / output_w; + int out_id_w = tid % output_w; + int in_img_size = input_w / num_channels; + int out_img_size = output_w / num_channels; + int channel_id = out_id_w / out_img_size; + + int out_img_idy = (out_id_w % out_img_size) / out_img_w; + int in_img_idy = ratio_h * out_img_idy; + int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; + T h1lambda = ratio_h * out_img_idy - in_img_idy; + T h2lambda = 1.f - h1lambda; + + int out_img_idx = tid % out_img_w; + int in_img_idx = ratioW * out_img_idx; + int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; + T w1lambda = ratioW * out_img_idx - in_img_idx; + T w2lambda = 1.f - w1lambda; + + T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + + in_img_idy * in_img_w + in_img_idx]; + const T* out_pos = &out[out_id_h * output_w + out_id_w]; + atomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); + atomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]); + atomicAdd(&in_pos[h_id * in_img_w], h1lambda * w2lambda * out_pos[0]); + atomicAdd(&in_pos[h_id * in_img_w + w_id], + h1lambda * w1lambda * out_pos[0]); + } +} + +template +class BilinearInterpOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "This kernel only runs on GPU device."); + auto* input_t = ctx.Input("X"); // float tensor + auto* output_t = ctx.Output("Out"); // float tensor + auto* input = input_t->data(); + auto* output = output_t->mutable_data(ctx.GetPlace()); + + int out_h = ctx.Attr("out_h"); + int out_w = ctx.Attr("out_w"); + int batch_size = input_t->dims()[0]; + int channels = input_t->dims()[1]; + int in_h = input_t->dims()[2]; + int in_w = input_t->dims()[3]; + + int in_hw = in_h * in_w; + int out_hw = out_h * out_w; + int in_chw = channels * in_hw; + int out_chw = channels * out_hw; + + T ratio_h = (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; + T ratio_w = (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; + + if (in_h == out_h && in_w == out_w) { + memcpy(output, input, input_t->numel() * sizeof(T)); + } else { + int threadNum = batch_size * out_chw; + int blocks = (threadNum + 1024 - 1) / 1024; + + KeBilinearInterpFw< + T><<>>( + input, in_h, in_w, batch_size, in_chw, output, out_h, out_w, + batch_size, out_chw, channels, ratio_h, ratio_w); + } + } +}; + +template +class BilinearInterpGradOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* d_input_t = ctx.Output(framework::GradVarName("X")); + auto* d_output_t = ctx.Input(framework::GradVarName("Out")); + auto* d_input = d_input_t->mutable_data(ctx.GetPlace()); + auto* d_output = d_output_t->data(); + + auto& device_ctx = + ctx.template device_context(); + math::SetConstant zero; + zero(device_ctx, d_input_t, static_cast(0.0)); + + int out_h = ctx.Attr("out_h"); + int out_w = ctx.Attr("out_w"); + int batch_size = d_input_t->dims()[0]; + int channels = d_input_t->dims()[1]; + int in_h = d_input_t->dims()[2]; + int in_w = d_input_t->dims()[3]; + + int in_hw = in_h * in_w; + int out_hw = out_h * out_w; + int in_chw = channels * in_hw; + int out_chw = channels * out_hw; + + T ratio_h = (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; + T ratio_w = (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; + + if (in_h == out_h && in_w == out_w) { + memcpy(d_input, d_output, d_input_t->numel() * sizeof(T)); + } else { + int threadNum = batch_size * out_chw; + int blocks = (threadNum + 1024 - 1) / 1024; + + KeBilinearInterpBw< + T><<>>( + d_input, in_h, in_w, batch_size, in_chw, d_output, out_h, out_w, + batch_size, out_chw, channels, ratio_h, ratio_w); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(bilinear_interp, + ops::BilinearInterpOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(bilinear_interp_grad, + ops::BilinearInterpGradOpCUDAKernel); diff --git a/paddle/fluid/operators/bilinear_interp_op.h b/paddle/fluid/operators/bilinear_interp_op.h new file mode 100644 index 0000000000000000000000000000000000000000..f6cd77e4d49b53ecde6a84908cdffc7e1e02ac6a --- /dev/null +++ b/paddle/fluid/operators/bilinear_interp_op.h @@ -0,0 +1,143 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class BilinearInterpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input_t = ctx.Input("X"); // float tensor + auto* output_t = ctx.Output("Out"); // float tensor + auto* input = input_t->data(); + auto* output = output_t->mutable_data(ctx.GetPlace()); + + int out_h = ctx.Attr("out_h"); + int out_w = ctx.Attr("out_w"); + int batch_size = input_t->dims()[0]; + int channels = input_t->dims()[1]; + int in_h = input_t->dims()[2]; + int in_w = input_t->dims()[3]; + + int in_hw = in_h * in_w; + int out_hw = out_h * out_w; + int in_chw = channels * in_hw; + int out_chw = channels * out_hw; + + T ratio_h = (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; + T ratio_w = (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; + + if (in_h == out_h && in_w == out_w) { + memcpy(output, input, input_t->numel() * sizeof(T)); + } else { + for (int k = 0; k < batch_size; ++k) { // loop for batches + for (int i = 0; i < out_h; ++i) { // loop for images + int h = ratio_h * i; + int hid = (h < in_h - 1) ? 1 : 0; + T h1lambda = ratio_h * i - h; + T h2lambda = 1 - h1lambda; + + for (int j = 0; j < out_w; ++j) { + int w = ratio_w * j; + int wid = (w < in_w - 1) ? 1 : 0; + T w1lambda = ratio_w * j - w; + T w2lambda = 1 - w1lambda; + // calculate four position for bilinear interpolation + const T* in_pos = &input[k * in_chw + h * in_w + w]; + T* out_pos = &output[k * out_chw + i * out_w + j]; + + for (int c = 0; c < channels; ++c) { // loop for channels + // bilinear interpolation + out_pos[0] = + h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[wid]) + + h1lambda * (w2lambda * in_pos[hid * in_w] + + w1lambda * in_pos[hid * in_w + wid]); + in_pos += in_hw; + out_pos += out_hw; + } + } + } + } + } + } +}; + +template +class BilinearInterpGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* d_input_t = ctx.Output(framework::GradVarName("X")); + auto* d_output_t = ctx.Input(framework::GradVarName("Out")); + auto* d_input = d_input_t->mutable_data(ctx.GetPlace()); + auto* d_output = d_output_t->data(); + + auto& device_ctx = + ctx.template device_context(); + math::SetConstant zero; + zero(device_ctx, d_input_t, static_cast(0.0)); + + int out_h = ctx.Attr("out_h"); + int out_w = ctx.Attr("out_w"); + int batch_size = d_input_t->dims()[0]; + int channels = d_input_t->dims()[1]; + int in_h = d_input_t->dims()[2]; + int in_w = d_input_t->dims()[3]; + + int in_hw = in_h * in_w; + int out_hw = out_h * out_w; + int in_chw = channels * in_hw; + int out_chw = channels * out_hw; + + T ratio_h = (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; + T ratio_w = (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; + + if (in_h == out_h && in_w == out_w) { + memcpy(d_input, d_output, d_input_t->numel() * sizeof(T)); + } else { + for (int k = 0; k < batch_size; ++k) { // loop for batches + for (int i = 0; i < out_h; ++i) { // loop for images + int h = ratio_h * i; + int hid = (h < in_h - 1) ? 1 : 0; + T h1lambda = ratio_h * i - h; + T h2lambda = 1 - h1lambda; + + for (int j = 0; j < out_w; ++j) { + int w = ratio_w * j; + int wid = (w < in_w - 1) ? 1 : 0; + T w1lambda = ratio_w * j - w; + T w2lambda = 1 - w1lambda; + T* in_pos = &d_input[k * in_chw + h * in_w + w]; + const T* out_pos = &d_output[k * out_chw + i * out_w + j]; + + for (int c = 0; c < channels; ++c) { // loop for channels + in_pos[0] += h2lambda * w2lambda * out_pos[0]; + in_pos[wid] += h2lambda * w1lambda * out_pos[0]; + in_pos[hid * in_w] += h1lambda * w2lambda * out_pos[0]; + in_pos[hid * in_w + wid] += h1lambda * w1lambda * out_pos[0]; + in_pos += in_hw; + out_pos += out_hw; + } + } + } + } + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/concurrency/channel_util.cc b/paddle/fluid/operators/concurrency/channel_util.cc index 246c99489c45efec16babb1d3980606318236605..fba4abf1897bceea615222b2438700085ed8e551 100644 --- a/paddle/fluid/operators/concurrency/channel_util.cc +++ b/paddle/fluid/operators/concurrency/channel_util.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "channel_util.h" +#include "paddle/fluid/operators/concurrency/channel_util.h" #include "paddle/fluid/framework/var_type.h" namespace poc = paddle::operators::concurrency; diff --git a/paddle/fluid/operators/conditional_block_op.cc b/paddle/fluid/operators/conditional_block_op.cc index 137fee99e82e5c7fad58a36ef49adb323f13f3a4..27f74a789beef02d31ebceb9b909e97ebd68232a 100644 --- a/paddle/fluid/operators/conditional_block_op.cc +++ b/paddle/fluid/operators/conditional_block_op.cc @@ -227,7 +227,7 @@ class ConditionalBlockGradMaker : public framework::SingleGradOpDescMaker { grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X", false)); grad_op->SetOutput(framework::GradVarName("Params"), InputGrad("Params", false)); - grad_op->SetBlockAttr("sub_block", *this->grad_block_[0]); + grad_op->SetBlockAttr("sub_block", this->grad_block_[0]); grad_op->SetAttr("is_scalar_condition", GetAttr("is_scalar_condition")); return std::unique_ptr(grad_op); } diff --git a/paddle/fluid/operators/detail/grpc_client.h b/paddle/fluid/operators/detail/grpc_client.h index 4425b19328f503eb7f9022916ed6452cdfea4eeb..f6229b71bc01a6de51f50f5fe880ada6e15e74dd 100644 --- a/paddle/fluid/operators/detail/grpc_client.h +++ b/paddle/fluid/operators/detail/grpc_client.h @@ -29,12 +29,12 @@ limitations under the License. */ #include "grpc++/support/byte_buffer.h" #include "grpc++/support/slice.h" #include "grpc/support/log.h" +#include "paddle/fluid/framework/blocking_queue.h" #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/operators/detail/sendrecvop_utils.h" -#include "paddle/fluid/operators/detail/simple_block_queue.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 119e146e078e476b2768a8495ea63e468f952fd2..95f4738b4ff50852d9591719133ca650533bf848 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -30,9 +30,13 @@ enum CallStatus { PROCESS = 0, FINISH }; class RequestBase { public: explicit RequestBase(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, const platform::DeviceContext* dev_ctx) - : service_(service), cq_(cq), status_(PROCESS), dev_ctx_(dev_ctx) { + : service_(service), + cq_(cq), + sync_mode_(sync_mode), + status_(PROCESS), + dev_ctx_(dev_ctx) { PADDLE_ENFORCE(cq_); } virtual ~RequestBase() {} @@ -49,6 +53,7 @@ class RequestBase { ::grpc::ServerContext ctx_; GrpcService::AsyncService* service_; ::grpc::ServerCompletionQueue* cq_; + const bool sync_mode_; CallStatus status_; const platform::DeviceContext* dev_ctx_; }; @@ -56,11 +61,17 @@ class RequestBase { class RequestSend final : public RequestBase { public: explicit RequestSend(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, framework::Scope* scope, ReceivedQueue* queue, const platform::DeviceContext* dev_ctx) - : RequestBase(service, cq, dev_ctx), queue_(queue), responder_(&ctx_) { - request_.reset(new VariableResponse(scope, dev_ctx_)); + : RequestBase(service, cq, sync_mode, dev_ctx), + queue_(queue), + responder_(&ctx_) { + if (sync_mode_) { + request_.reset(new VariableResponse(scope, dev_ctx_, false)); + } else { + request_.reset(new VariableResponse(scope, dev_ctx_, true)); + } int method_id = static_cast(detail::GrpcMethod::kSendVariable); service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, cq_, cq_, this); @@ -87,11 +98,11 @@ class RequestSend final : public RequestBase { class RequestGet final : public RequestBase { public: explicit RequestGet(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, framework::Scope* scope, const platform::DeviceContext* dev_ctx, - SimpleBlockQueue* queue) - : RequestBase(service, cq, dev_ctx), + framework::BlockingQueue* queue) + : RequestBase(service, cq, sync_mode, dev_ctx), responder_(&ctx_), scope_(scope), queue_(queue) { @@ -128,25 +139,29 @@ class RequestGet final : public RequestBase { sendrecv::VariableMessage request_; ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; framework::Scope* scope_; - SimpleBlockQueue* queue_; + framework::BlockingQueue* queue_; }; class RequestPrefetch final : public RequestBase { public: explicit RequestPrefetch(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, framework::Scope* scope, const platform::DeviceContext* dev_ctx, framework::Executor* executor, framework::ProgramDesc* program, framework::ExecutorPrepareContext* prefetch_ctx) - : RequestBase(service, cq, dev_ctx), + : RequestBase(service, cq, sync_mode, dev_ctx), responder_(&ctx_), scope_(scope), executor_(executor), program_(program), prefetch_ctx_(prefetch_ctx) { - request_.reset(new VariableResponse(scope, dev_ctx_)); + if (sync_mode_) { + request_.reset(new VariableResponse(scope, dev_ctx_, false)); + } else { + request_.reset(new VariableResponse(scope, dev_ctx_, true)); + } int method_id = static_cast(detail::GrpcMethod::kPrefetchVariable); service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, cq_, cq_, this); @@ -181,7 +196,6 @@ class RequestPrefetch final : public RequestBase { framework::Executor* executor_; framework::ProgramDesc* program_; framework::ExecutorPrepareContext* prefetch_ctx_; - int blkid_; }; void AsyncGRPCServer::WaitClientGet(int count) { @@ -254,8 +268,8 @@ void AsyncGRPCServer::TryToRegisterNewSendOne() { VLOG(3) << "shutdown, do not TryToRegisterNewSendOne"; return; } - RequestSend* send = new RequestSend(&service_, cq_send_.get(), scope_, - &var_recv_queue_, dev_ctx_); + RequestSend* send = new RequestSend(&service_, cq_send_.get(), sync_mode_, + scope_, &var_recv_queue_, dev_ctx_); VLOG(4) << "Create RequestSend status:" << send->Status(); } @@ -265,8 +279,8 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() { VLOG(3) << "shutdown, do not TryToRegisterNewGetOne"; return; } - RequestGet* get = new RequestGet(&service_, cq_get_.get(), scope_, dev_ctx_, - &var_get_queue_); + RequestGet* get = new RequestGet(&service_, cq_get_.get(), sync_mode_, scope_, + dev_ctx_, &var_get_queue_); VLOG(4) << "Create RequestGet status:" << get->Status(); } @@ -277,8 +291,8 @@ void AsyncGRPCServer::TryToRegisterNewPrefetchOne() { return; } RequestPrefetch* prefetch = - new RequestPrefetch(&service_, cq_prefetch_.get(), scope_, dev_ctx_, - executor_, program_, prefetch_ctx_); + new RequestPrefetch(&service_, cq_prefetch_.get(), sync_mode_, scope_, + dev_ctx_, executor_, program_, prefetch_ctx_); VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); } @@ -301,9 +315,11 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, VLOG(3) << "HandleRequest for " << cq_name << " while after Next"; PADDLE_ENFORCE(tag); - // FIXME(typhoonzero): de-couple the barriers with recv_op - if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); - if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); + if (sync_mode_) { + // FIXME(typhoonzero): de-couple the barriers with recv_op + if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); + if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); + } RequestBase* base = reinterpret_cast(tag); // reference: @@ -320,13 +336,13 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, switch (base->Status()) { case PROCESS: { - VLOG(4) << cq_name << " status:" << base->Status(); + VLOG(4) << cq_name << " PROCESS status:" << base->Status(); TryToRegisterNewOne(); base->Process(); break; } case FINISH: { - VLOG(4) << cq_name << " status:" << base->Status(); + VLOG(4) << cq_name << " FINISH status:" << base->Status(); delete base; break; } diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index 452ff5e967c086340e065a1b6a4b8672c75a4a3d..99b87b8c6cb3e597778b88c395e4abf400d82c39 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -19,6 +19,7 @@ limitations under the License. */ #include #include "grpc++/grpc++.h" +#include "paddle/fluid/framework/blocking_queue.h" #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/program_desc.h" @@ -29,7 +30,6 @@ limitations under the License. */ #include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" #include "paddle/fluid/operators/detail/send_recv.pb.h" #include "paddle/fluid/operators/detail/sendrecvop_utils.h" -#include "paddle/fluid/operators/detail/simple_block_queue.h" namespace paddle { namespace operators { @@ -37,14 +37,15 @@ namespace detail { typedef std::pair> ReceivedMessage; -typedef SimpleBlockQueue ReceivedQueue; +typedef framework::BlockingQueue ReceivedQueue; typedef std::pair MessageWithName; class RequestBase; class AsyncGRPCServer final { public: - explicit AsyncGRPCServer(const std::string &address) : address_(address) {} + explicit AsyncGRPCServer(const std::string &address, bool sync_mode) + : address_(address), sync_mode_(sync_mode) {} void RunSyncUpdate(); @@ -95,11 +96,12 @@ class AsyncGRPCServer final { std::unique_ptr<::grpc::Server> server_; std::string address_; + const bool sync_mode_; framework::Scope *scope_; const platform::DeviceContext *dev_ctx_; // received variable from RPC, operators fetch variable from this queue. - SimpleBlockQueue var_get_queue_; + framework::BlockingQueue var_get_queue_; // client send variable to this queue. ReceivedQueue var_recv_queue_; diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc index c51933718f4ca78e87c77e007c485642000d247d..25b95d608d10d6e456d5f563ce9fbe35d812cb0f 100644 --- a/paddle/fluid/operators/detail/grpc_server_test.cc +++ b/paddle/fluid/operators/detail/grpc_server_test.cc @@ -89,7 +89,7 @@ void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place, } void StartServer(const std::string& endpoint) { - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, true)); framework::ProgramDesc program; framework::Scope scope; platform::CPUPlace place; diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc index 69fcffe9bc34006aef2e5a39227cf6d947e4615f..766bcf1ac5e06628638fcc8a305c00ab2795bbf2 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ b/paddle/fluid/operators/detail/sendrecvop_utils.cc @@ -39,7 +39,9 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, // parallelism execution, need to know when to free the tensor. DestroyCallback destroy_callback = [](void* backing) {}; - void* buf = malloc(1024); + auto buffer = std::unique_ptr(new char[1024]); + void* buf = buffer.get(); + void* payload = nullptr; size_t payload_size; ProtoEncodeHelper e(static_cast(buf), 1024); diff --git a/paddle/fluid/operators/detail/simple_block_queue.h b/paddle/fluid/operators/detail/simple_block_queue.h deleted file mode 100644 index 69773e05df7ed76f31c26f4304693fec2e9aac9c..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/detail/simple_block_queue.h +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include // NOLINT -#include -#include // NOLINT - -namespace paddle { -namespace operators { -namespace detail { - -template -class SimpleBlockQueue { - private: - std::mutex mutex_; - std::condition_variable condition_; - std::deque queue_; - - public: - void Push(T const& value) { - { - std::unique_lock lock(this->mutex_); - queue_.push_front(value); - } - this->condition_.notify_one(); - } - - T Pop() { - std::unique_lock lock(this->mutex_); - this->condition_.wait(lock, [=] { return !this->queue_.empty(); }); - T rc(std::move(this->queue_.back())); - this->queue_.pop_back(); - return rc; - } -}; - -} // namespace detail -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/detail/variable_response.h index 3018a5c4af876828380ff4c1cbfdaafa8a2057e1..bf624da2a6c26472e47711b3c6409f78afba0a64 100644 --- a/paddle/fluid/operators/detail/variable_response.h +++ b/paddle/fluid/operators/detail/variable_response.h @@ -46,7 +46,9 @@ class VariableResponse { } virtual ~VariableResponse() { - if (create_scope_) scope_->DeleteScope(local_scope_); + if (create_scope_) { + scope_->DeleteScope(local_scope_); + } } // return: @@ -63,6 +65,8 @@ class VariableResponse { const framework::Scope& GetLocalScope() const { return *local_scope_; } + framework::Scope* GetMutableLocalScope() const { return local_scope_; } + inline std::string Varname() { return meta_.varname(); } inline std::string OutVarname() { return meta_.out_varname(); } diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index 7c7f3e9059fbb1e3f2cca4f04edfff55c9452761..18deec58137676a0b2c8d559e49d0f7a840cd5ba 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -57,10 +57,7 @@ class FetchOp : public framework::OperatorBase { // FIXME(yuyang18): Should we assume the fetch operator always generate // CPU outputs? - auto &dev_ctx = *pool.Get(src_item.place()); - - TensorCopy(src_item, platform::CPUPlace(), dev_ctx, &dst_item); - dev_ctx.Wait(); + TensorCopySync(src_item, platform::CPUPlace(), &dst_item); dst_item.set_lod(src_item.lod()); VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name; diff --git a/paddle/fluid/operators/gru_op.h b/paddle/fluid/operators/gru_op.h index 1d5c291495c0f0c0d8da9ff6949888b4cbb6036d..53f844a6607bd2e98c53b53c23422f6b48e2ced6 100644 --- a/paddle/fluid/operators/gru_op.h +++ b/paddle/fluid/operators/gru_op.h @@ -56,8 +56,6 @@ class GRUKernel : public framework::OpKernel { auto* hidden = context.Output("Hidden"); hidden->mutable_data(context.GetPlace()); - context.ShareLoD("Input", "Hidden"); - auto hidden_dims = hidden->dims(); bool is_reverse = context.Attr("is_reverse"); diff --git a/paddle/fluid/operators/iou_similarity_op.h b/paddle/fluid/operators/iou_similarity_op.h index c76448c736847a536be2021a8a5fef23bef23a50..9f193ebc59b7be44b987db7d068c209ef7f5a8da 100644 --- a/paddle/fluid/operators/iou_similarity_op.h +++ b/paddle/fluid/operators/iou_similarity_op.h @@ -41,22 +41,24 @@ struct IOUSimilarityFunctor { IOUSimilarityFunctor(const T* x, const T* y, T* z, int cols) : x_(x), y_(y), z_(z), cols_(static_cast(cols)) {} - inline HOSTDEVICE void operator()(size_t row_id) const { + inline HOSTDEVICE void operator()(size_t tid) const { + size_t row_id = tid / cols_; + size_t col_id = tid % cols_; + T x_min1 = x_[row_id * 4]; T y_min1 = x_[row_id * 4 + 1]; T x_max1 = x_[row_id * 4 + 2]; T y_max1 = x_[row_id * 4 + 3]; - for (size_t i = 0; i < cols_; ++i) { - T x_min2 = y_[i * 4]; - T y_min2 = y_[i * 4 + 1]; - T x_max2 = y_[i * 4 + 2]; - T y_max2 = y_[i * 4 + 3]; - T sim = IOUSimilarity(x_min1, y_min1, x_max1, y_max1, x_min2, y_min2, - x_max2, y_max2); + T x_min2 = y_[col_id * 4]; + T y_min2 = y_[col_id * 4 + 1]; + T x_max2 = y_[col_id * 4 + 2]; + T y_max2 = y_[col_id * 4 + 3]; + + T sim = IOUSimilarity(x_min1, y_min1, x_max1, y_max1, x_min2, y_min2, + x_max2, y_max2); - z_[row_id * cols_ + i] = sim; - } + z_[row_id * cols_ + col_id] = sim; } const T* x_; const T* y_; @@ -81,7 +83,7 @@ class IOUSimilarityKernel : public framework::OpKernel { out->mutable_data(ctx.GetPlace()), y_n); platform::ForRange for_range( - static_cast(ctx.device_context()), x_n); + static_cast(ctx.device_context()), x_n * y_n); for_range(functor); } }; // namespace operators diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index af235fb6a029a71ee275bebfbbd75aaa0b7d546d..57cff680ab89f2df7e71af4056ee06cdf330bbab 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -27,6 +27,38 @@ void RunServer(std::shared_ptr service) { VLOG(4) << "RunServer thread end"; } +static void split(const std::string &str, char sep, + std::vector *pieces) { + pieces->clear(); + if (str.empty()) { + return; + } + size_t pos = 0; + size_t next = str.find(sep, pos); + while (next != std::string::npos) { + pieces->push_back(str.substr(pos, next - pos)); + pos = next + 1; + next = str.find(sep, pos); + } + if (!str.substr(pos).empty()) { + pieces->push_back(str.substr(pos)); + } +} + +static void AsyncExecuteBlock(framework::Executor *executor, + framework::ExecutorPrepareContext *prepared, + framework::Scope *scope) { + std::future future = framework::Async([&executor, &prepared, &scope]() { + try { + executor->RunPreparedContext(prepared, scope, false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + }); + // TODO(qiao) maybe we can remove this + future.wait(); +} + static void ParallelExecuteBlocks( const std::vector ¶llel_blkids, framework::Executor *executor, const std::vector> @@ -169,15 +201,82 @@ void ListenAndServOp::RunSyncLoop(framework::Executor *executor, } // while(true) } +void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, + framework::ProgramDesc *program, + framework::Scope *recv_scope, + framework::BlockDesc *prefetch_block) const { + VLOG(3) << "RunAsyncLoop in"; + // grad name to block id + std::unordered_map grad_to_block_id; + std::unordered_map id_to_grad; + + auto grad_to_block_id_str = + Attr>("grad_to_block_id"); + for (auto &grad_and_id : grad_to_block_id_str) { + std::vector pieces; + split(grad_and_id, ':', &pieces); + VLOG(3) << "after split, grad = " << pieces[0] << ", id=" << pieces[1]; + PADDLE_ENFORCE_EQ(pieces.size(), 2); + PADDLE_ENFORCE_EQ(grad_to_block_id.count(pieces[0]), 0); + int block_id = std::stoi(pieces[1]); + grad_to_block_id[pieces[0]] = block_id; + id_to_grad[block_id] = pieces[0]; + } + size_t num_blocks = program->Size(); + PADDLE_ENFORCE_GE(num_blocks, 2, + "server program should have at least 2 blocks"); + + std::vector block_list; + for (size_t blkid = 1; blkid < num_blocks; ++blkid) { + block_list.push_back(blkid); + } + auto optimize_prepared = executor->Prepare(*program, block_list); + std::unordered_map> + grad_to_prepared_ctx; + for (size_t i = 0; i < block_list.size(); ++i) { + grad_to_prepared_ctx[id_to_grad[block_list[i]]] = optimize_prepared[i]; + } + + VLOG(3) << "RunAsyncLoop into while"; + bool exit_flag = false; + while (!exit_flag) { + const detail::ReceivedMessage v = rpc_service_->Get(); + auto recv_var_name = v.first; + if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { + LOG(INFO) << "received terminate message and exit"; + exit_flag = true; + break; + } else { + VLOG(3) << "received grad: " << recv_var_name; + auto var = v.second->GetVar(); + if (var == nullptr) { + LOG(ERROR) << "Can not find server side var: " << recv_var_name; + PADDLE_THROW("Can not find server side var"); + } + AsyncExecuteBlock(executor, grad_to_prepared_ctx[recv_var_name].get(), + v.second->GetMutableLocalScope()); + } + + if (exit_flag) { + rpc_service_->ShutDown(); + break; + } + } // while(true) +} + void ListenAndServOp::RunImpl(const framework::Scope &scope, const platform::Place &dev_place) const { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); framework::Scope &recv_scope = scope.NewScope(); + bool sync_mode = Attr("sync_mode"); + PADDLE_ENFORCE(!rpc_service_); std::string endpoint = Attr("endpoint"); - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); + + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, sync_mode)); auto *optimize_block = Attr(kOptimizeBlock); auto *prefetch_block = Attr(kPrefetchBlock); @@ -202,7 +301,11 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, sleep(5); // Write to a file of server selected port for python use. SavePort(rpc_service_); - RunSyncLoop(&executor, program, &recv_scope, prefetch_block); + if (sync_mode) { + RunSyncLoop(&executor, program, &recv_scope, prefetch_block); + } else { + RunAsyncLoop(&executor, program, &recv_scope, prefetch_block); + } } class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { @@ -221,6 +324,12 @@ from send_op and send back variables to recv_op. "IP address to listen on.") .SetDefault("127.0.0.1:6164") .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); + AddAttr>( + "grad_to_block_id", + "['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'] " + "a map from grad name to it's optimize block id") + .SetDefault({}); + AddAttr("sync_mode", "if works at sync_mode or not").SetDefault(true); AddAttr(kOptimizeBlock, "BlockID to run on server side."); AddAttr(kPrefetchBlock, diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index dfb7c77c8e36d9af79d8b1713d0c0c59c81b1ca6..3cc0f3047733bea94daa310cd39cb0a4f44bef85 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -46,6 +46,11 @@ class ListenAndServOp : public framework::OperatorBase { framework::Scope* recv_scope, framework::BlockDesc* prefetch_block) const; + void RunAsyncLoop(framework::Executor* executor, + framework::ProgramDesc* program, + framework::Scope* recv_scope, + framework::BlockDesc* prefetch_block) const; + void Stop() override; void RunImpl(const framework::Scope& scope, diff --git a/paddle/fluid/operators/math/concat_test.cc b/paddle/fluid/operators/math/concat_test.cc index 1741af8148bb90863f294ba4930006a58b5ddbf9..19d056fa54777eff2881a346da071ff95126173c 100644 --- a/paddle/fluid/operators/math/concat_test.cc +++ b/paddle/fluid/operators/math/concat_test.cc @@ -72,8 +72,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + TensorCopySync(input_a_cpu, Place(), &input_a); + TensorCopySync(input_b_cpu, Place(), &input_b); } std::vector input; @@ -89,7 +89,7 @@ void testConcat() { int* out_ptr; if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + TensorCopySync(out, CPUPlace(), &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -144,8 +144,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + TensorCopySync(input_a_cpu, Place(), &input_a); + TensorCopySync(input_b_cpu, Place(), &input_b); } input.clear(); @@ -159,7 +159,7 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + TensorCopySync(out, CPUPlace(), &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -216,8 +216,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + TensorCopySync(input_a_cpu, Place(), &input_a); + TensorCopySync(input_b_cpu, Place(), &input_b); } input.clear(); @@ -231,7 +231,7 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + TensorCopySync(out, CPUPlace(), &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -290,8 +290,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + TensorCopySync(input_a_cpu, Place(), &input_a); + TensorCopySync(input_b_cpu, Place(), &input_b); } input.clear(); @@ -305,7 +305,7 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + TensorCopySync(out, CPUPlace(), &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); diff --git a/paddle/fluid/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h index 4da94383af6c927a2db3337be6e82ca95a8cb036..027a019a284cac097eea50553e7d0dad5b09a218 100644 --- a/paddle/fluid/operators/math/context_project.h +++ b/paddle/fluid/operators/math/context_project.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/math/depthwise_conv.cu b/paddle/fluid/operators/math/depthwise_conv.cu index a5e6e4031bbaddc2d09c660d34a975b2a496bc63..d360728484a73ce844b4a36fbffd7dc631f8e786 100644 --- a/paddle/fluid/operators/math/depthwise_conv.cu +++ b/paddle/fluid/operators/math/depthwise_conv.cu @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/fluid/operators/math/depthwise_conv.h" #include "paddle/fluid/platform/cuda_helper.h" diff --git a/paddle/fluid/operators/math/depthwise_conv.h b/paddle/fluid/operators/math/depthwise_conv.h index 081bda891d044041f6814964e7076e28e812039c..97aec401889a56d3fc9ac08e766d931bb3725b01 100644 --- a/paddle/fluid/operators/math/depthwise_conv.h +++ b/paddle/fluid/operators/math/depthwise_conv.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/hostdevice.h" diff --git a/paddle/fluid/operators/math/detail/activation_functions.h b/paddle/fluid/operators/math/detail/activation_functions.h index d205ebf210818b91c3cf6d4563fca1e56702bcf9..b127fbe8c8515e7fe57b07ea1d4291675ec4efca 100644 --- a/paddle/fluid/operators/math/detail/activation_functions.h +++ b/paddle/fluid/operators/math/detail/activation_functions.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include +#include #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/hostdevice.h" diff --git a/paddle/fluid/operators/math/im2col.cc b/paddle/fluid/operators/math/im2col.cc index 123e10586f60f65d6c31a7c1f837bf32610d4ea5..336d6febc2ce3a55e82ed613bbc1081101f822f0 100644 --- a/paddle/fluid/operators/math/im2col.cc +++ b/paddle/fluid/operators/math/im2col.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/im2col.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/im2col.cu b/paddle/fluid/operators/math/im2col.cu index f41c78140fb601cd27a4e9997a736f46e903efc7..1268e21e0608000c1a8c22104912b32a973a9737 100644 --- a/paddle/fluid/operators/math/im2col.cu +++ b/paddle/fluid/operators/math/im2col.cu @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/platform/cuda_helper.h" diff --git a/paddle/fluid/operators/math/im2col.h b/paddle/fluid/operators/math/im2col.h index 451ec9d53498caa6d53a28dbfc764775d3ed3ecf..26d94e0f2e6163eb7452cf1fbea5966b4344ace1 100644 --- a/paddle/fluid/operators/math/im2col.h +++ b/paddle/fluid/operators/math/im2col.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc index b3978536bca7dc276bf5417bed36761a2d496536..8e3f0f286823c383bb0c44d0e7887040ec9b20a0 100644 --- a/paddle/fluid/operators/math/im2col_test.cc +++ b/paddle/fluid/operators/math/im2col_test.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/im2col.h" #include +#include template void testIm2col() { @@ -62,7 +63,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopySync(input_tmp, *place, &input); } output_cfo.mutable_data( {1, filter_size, filter_size, output_height, output_width}, *place); @@ -87,7 +88,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output_cfo.data(); } else { - TensorCopy(output_cfo, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopySync(output_cfo, paddle::platform::CPUPlace(), &output_tmp); out_cfo_ptr = output_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -98,7 +99,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_ocf_ptr = output_ocf.data(); } else { - TensorCopy(output_ocf, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopySync(output_ocf, paddle::platform::CPUPlace(), &output_tmp); out_ocf_ptr = output_tmp.data(); } @@ -119,7 +120,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopySync(input_tmp, *place, &input); } col2im(*context, output_cfo, dilation, stride, padding, &input); @@ -128,7 +129,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp); in_ptr = input_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -140,7 +141,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopySync(input_tmp, *place, &input); } col2im_ocf(*context, output_ocf, dilation, stride, padding, &input); @@ -148,7 +149,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp); in_ptr = input_tmp.data(); } for (int i = 0; i < 6; ++i) { diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 44fd739fb1d161c6c7d6ab1cc611c59220280a4e..b5ae41c8f9d7aeb8e410b795fb9fbbd57ec69d4b 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" +#include #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/operators/math/math_function_impl.h" #include "paddle/fluid/platform/float16.h" @@ -161,7 +162,8 @@ void batched_gemm( const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float16 alpha, const float16* A, const float16* B, const float16 beta, - float16* C, const int batchCount, const int strideA, const int strideB) { + float16* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { PADDLE_THROW("float16 batched_gemm not supported on CPU"); } @@ -172,7 +174,8 @@ void batched_gemm( const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, - float* C, const int batchCount, const int strideA, const int strideB) { + float* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { int lda = (transA == CblasNoTrans) ? K : M; int ldb = (transB == CblasNoTrans) ? N : K; int ldc = N; @@ -194,7 +197,8 @@ void batched_gemm( const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, - double* C, const int batchCount, const int strideA, const int strideB) { + double* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { int lda = (transA == CblasNoTrans) ? K : M; int ldb = (transB == CblasNoTrans) ? N : K; int ldc = N; @@ -220,7 +224,8 @@ void batched_gemm( const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, - float* C, const int batchCount, const int strideA, const int strideB) { + float* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { for (int k = 0; k < batchCount; ++k) { const float* Ak = &A[k * strideA]; const float* Bk = &B[k * strideB]; @@ -235,7 +240,8 @@ void batched_gemm( const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, - double* C, const int batchCount, const int strideA, const int strideB) { + double* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { for (int k = 0; k < batchCount; ++k) { const double* Ak = &A[k * strideA]; const double* Bk = &B[k * strideB]; diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index 9badf26c9bb80acad029be3d1b63377cef63d929..2aa819625e0f5213a6001908e715bcc73d4747c3 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU +#include #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function_impl.h" @@ -267,7 +268,8 @@ void batched_gemm( const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float16 alpha, const float16* A, const float16* B, const float16 beta, - float16* C, const int batchCount, const int strideA, const int strideB) { + float16* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { #if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. @@ -278,7 +280,7 @@ void batched_gemm( (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; - const int strideC = M * N; + const int64_t strideC = M * N; const half h_alpha = static_cast(alpha); const half h_beta = static_cast(beta); @@ -303,7 +305,8 @@ void batched_gemm( const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, - float* C, const int batchCount, const int strideA, const int strideB) { + float* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { #if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. @@ -314,7 +317,7 @@ void batched_gemm( (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; - const int strideC = M * N; + const int64_t strideC = M * N; PADDLE_ENFORCE(platform::dynload::cublasSgemmStridedBatched( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, @@ -329,7 +332,8 @@ void batched_gemm( const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, - double* C, const int batchCount, const int strideA, const int strideB) { + double* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { #if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. @@ -340,7 +344,7 @@ void batched_gemm( (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; - const int strideC = M * N; + const int64_t strideC = M * N; PADDLE_ENFORCE(platform::dynload::cublasDgemmStridedBatched( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, diff --git a/paddle/fluid/operators/math/math_function.h b/paddle/fluid/operators/math/math_function.h index cdbc7bfb37e83c6c2b696ba010277c9eec49f2a8..cdd02974722045457aacdfa517c147751185f332 100644 --- a/paddle/fluid/operators/math/math_function.h +++ b/paddle/fluid/operators/math/math_function.h @@ -26,7 +26,7 @@ limitations under the License. */ #ifndef LAPACK_FOUND extern "C" { -#include +#include // NOLINT int LAPACKE_sgetrf(int matrix_layout, int m, int n, float* a, int lda, int* ipiv); int LAPACKE_dgetrf(int matrix_layout, int m, int n, double* a, int lda, @@ -39,6 +39,7 @@ int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda, #endif #include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" @@ -78,8 +79,8 @@ template void batched_gemm(const DeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const T alpha, const T* A, const T* B, - const T beta, T* C, const int batchCount, const int strideA, - const int strideB); + const T beta, T* C, const int batchCount, + const int64_t strideA, const int64_t strideB); template void gemv(const DeviceContext& context, const bool trans_a, const int M, diff --git a/paddle/fluid/operators/math/math_function_impl.h b/paddle/fluid/operators/math/math_function_impl.h index f9d4e4532428e3387ae39621d155434f05eb89d3..b9bd49d77d935e985705f78402ffe1ea90f24cb3 100644 --- a/paddle/fluid/operators/math/math_function_impl.h +++ b/paddle/fluid/operators/math/math_function_impl.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/math/math_function_test.cu b/paddle/fluid/operators/math/math_function_test.cu index 8982d9d066165a9da0461288685baa0c60e5f114..7986326e96b2bb05c0936d366bda581d49b87032 100644 --- a/paddle/fluid/operators/math/math_function_test.cu +++ b/paddle/fluid/operators/math/math_function_test.cu @@ -40,15 +40,15 @@ TEST(math_function, notrans_mul_trans_fp32) { float arr[6] = {0, 1, 2, 3, 4, 5}; memcpy(input1_ptr, arr, 6 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input1, gpu_place, &input2_gpu); out_gpu.mutable_data({2, 2}, gpu_place); paddle::operators::math::matmul( context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); - TensorCopy(out_gpu, cpu_place, context, &out); + TensorCopySync(out_gpu, cpu_place, &out); float* out_ptr = out.data(); context.Wait(); @@ -80,8 +80,8 @@ TEST(math_function, notrans_mul_trans_fp16) { float16* input1_ptr = input1.mutable_data({2, 3}, cpu_place); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input1, gpu_place, &input2_gpu); out_gpu.mutable_data({2, 2}, gpu_place); @@ -89,7 +89,7 @@ TEST(math_function, notrans_mul_trans_fp16) { context, input1_gpu, false, input2_gpu, true, float16(1), &out_gpu, float16(0)); - TensorCopy(out_gpu, cpu_place, context, &out); + TensorCopySync(out_gpu, cpu_place, &out); float16* out_ptr = out.data(); context.Wait(); @@ -117,15 +117,15 @@ TEST(math_function, trans_mul_notrans_fp32) { float arr[6] = {0, 1, 2, 3, 4, 5}; memcpy(input1_ptr, arr, 6 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input1, gpu_place, &input2_gpu); out_gpu.mutable_data({3, 3}, gpu_place); paddle::operators::math::matmul( context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); - TensorCopy(out_gpu, cpu_place, context, &out); + TensorCopySync(out_gpu, cpu_place, &out); float* out_ptr = out.data(); context.Wait(); @@ -162,8 +162,8 @@ TEST(math_function, trans_mul_notrans_fp16) { float16* input1_ptr = input1.mutable_data({2, 3}, cpu_place); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input1, gpu_place, &input2_gpu); out_gpu.mutable_data({3, 3}, gpu_place); @@ -171,7 +171,7 @@ TEST(math_function, trans_mul_notrans_fp16) { context, input1_gpu, true, input2_gpu, false, float16(1), &out_gpu, float16(0)); - TensorCopy(out_gpu, cpu_place, context, &out); + TensorCopySync(out_gpu, cpu_place, &out); float16* out_ptr = out.data(); context.Wait(); @@ -214,9 +214,9 @@ TEST(math_function, gemm_notrans_cublas_fp32) { float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; memcpy(input3_ptr, arr3, 8 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input2, gpu_place, &input2_gpu); + TensorCopySync(input3, gpu_place, &input3_gpu); float* a = input1_gpu.data(); float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(gpu_place); @@ -224,7 +224,7 @@ TEST(math_function, gemm_notrans_cublas_fp32) { paddle::operators::math::gemm( context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + TensorCopySync(input3_gpu, cpu_place, &input3); // numpy code: // a = np.arange(6).reshape(2, 3) @@ -274,9 +274,9 @@ TEST(math_function, gemm_notrans_cublas_fp16) { float16* input3_ptr = input3.mutable_data({2, 4}, cpu_place); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input2, gpu_place, &input2_gpu); + TensorCopySync(input3, gpu_place, &input3_gpu); float16* a = input1_gpu.data(); float16* b = input2_gpu.data(); float16* c = input3_gpu.mutable_data(gpu_place); @@ -285,7 +285,7 @@ TEST(math_function, gemm_notrans_cublas_fp16) { context, false, false, m, n, k, float16(1), a, 3, b + 1, 4, float16(1), c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + TensorCopySync(input3_gpu, cpu_place, &input3); // numpy code: // a = np.arange(6).reshape(2, 3) @@ -332,9 +332,9 @@ TEST(math_function, gemm_trans_cublas_fp32) { float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; memcpy(input3_ptr, arr3, 8 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input2, gpu_place, &input2_gpu); + TensorCopySync(input3, gpu_place, &input3_gpu); float* a = input1_gpu.data(); float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(gpu_place); @@ -342,7 +342,7 @@ TEST(math_function, gemm_trans_cublas_fp32) { paddle::operators::math::gemm( context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + TensorCopySync(input3_gpu, cpu_place, &input3); context.Wait(); EXPECT_EQ(input3_ptr[0], 0); @@ -386,9 +386,9 @@ TEST(math_function, gemm_trans_cublas_fp16) { float16* input3_ptr = input3.mutable_data({2, 4}, cpu_place); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input2, gpu_place, &input2_gpu); + TensorCopySync(input3, gpu_place, &input3_gpu); float16* a = input1_gpu.data(); float16* b = input2_gpu.data(); float16* c = input3_gpu.mutable_data(gpu_place); @@ -397,7 +397,7 @@ TEST(math_function, gemm_trans_cublas_fp16) { context, false, true, m, n, k, float16(1), a, 3, b + 3, 3, float16(1), c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + TensorCopySync(input3_gpu, cpu_place, &input3); context.Wait(); EXPECT_EQ(static_cast(input3_ptr[0]), 0); @@ -441,14 +441,14 @@ void GemvTest(int m, int n, bool trans) { data_b[i] = static_cast(i); } - TensorCopy(mat_a, gpu_place, context, &g_mat_a); - TensorCopy(vec_b, gpu_place, context, &g_vec_b); + TensorCopySync(mat_a, gpu_place, &g_mat_a); + TensorCopySync(vec_b, gpu_place, &g_vec_b); paddle::operators::math::gemv( context, trans, static_cast(m), static_cast(n), 1., g_data_a, g_data_b, 0., g_data_c); - TensorCopy(g_vec_c, cpu_place, context, &vec_c); + TensorCopySync(g_vec_c, cpu_place, &vec_c); if (!trans) { for (int i = 0; i < m; ++i) { diff --git a/paddle/fluid/operators/math/matmul.h b/paddle/fluid/operators/math/matmul.h index 6e2d35cd0f3581c742197a66842696a8e3a936b1..0006c5062f3639da589eea44d47917d879933615 100644 --- a/paddle/fluid/operators/math/matmul.h +++ b/paddle/fluid/operators/math/matmul.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/operators/math/math_function.h" namespace paddle { diff --git a/paddle/fluid/operators/math/sampler.cc b/paddle/fluid/operators/math/sampler.cc index 3ec6538d7fb0a902f468aa848c3064bd0260fabb..3066dc0ba284611af89c4927f45089a570ab88bc 100644 --- a/paddle/fluid/operators/math/sampler.cc +++ b/paddle/fluid/operators/math/sampler.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "sampler.h" +#include "paddle/fluid/operators/math/sampler.h" namespace paddle { namespace random { diff --git a/paddle/fluid/operators/math/selected_rows_functor_test.cc b/paddle/fluid/operators/math/selected_rows_functor_test.cc index 679b6568ad09ce3a42a7ad4222310711e707d9a8..70bed820ee58885861fa8c5535c931f258625572 100644 --- a/paddle/fluid/operators/math/selected_rows_functor_test.cc +++ b/paddle/fluid/operators/math/selected_rows_functor_test.cc @@ -13,41 +13,50 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/selected_rows_functor.h" +#include #include "gtest/gtest.h" #include "paddle/fluid/operators/math/math_function.h" TEST(selected_rows_functor, cpu_add) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators::math; - - CPUPlace cpu_place; - CPUDeviceContext ctx(cpu_place); - SetConstant functor; + paddle::platform::CPUPlace cpu_place; + paddle::platform::CPUDeviceContext ctx(cpu_place); + paddle::operators::math::SetConstant + functor; int64_t height = 10; int64_t row_numel = 10; std::vector rows1{0, 4, 7}; - std::unique_ptr selected_rows1{new SelectedRows(rows1, height)}; + std::unique_ptr selected_rows1{ + new paddle::framework::SelectedRows(rows1, height)}; auto* in1_value = selected_rows1->mutable_value(); in1_value->mutable_data( - make_ddim({static_cast(rows1.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows1.size()), row_numel}), + cpu_place); functor(ctx, in1_value, 1.0); std::vector rows2{0, 5, 7, 9}; - std::unique_ptr selected_rows2{new SelectedRows(rows2, height)}; + std::unique_ptr selected_rows2{ + new paddle::framework::SelectedRows(rows2, height)}; auto* in2_value = selected_rows2->mutable_value(); in2_value->mutable_data( - make_ddim({static_cast(rows2.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows2.size()), row_numel}), + cpu_place); functor(ctx, in2_value, 2.0); - std::unique_ptr output{new SelectedRows()}; + std::unique_ptr output{ + new paddle::framework::SelectedRows()}; auto* out_value = output->mutable_value(); // simplely concat two SelectedRows - out_value->mutable_data(make_ddim({7, 10}), cpu_place); + out_value->mutable_data(paddle::framework::make_ddim({7, 10}), + cpu_place); - SelectedRowsAdd add_functor; + paddle::operators::math::SelectedRowsAdd + add_functor; add_functor(ctx, *selected_rows1, *selected_rows2, output.get()); auto out_height = output->height(); @@ -78,14 +87,20 @@ TEST(selected_rows_functor, cpu_add) { EXPECT_EQ(out_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_data[6 * row_numel + 9], 2.0); - std::unique_ptr tensor1{new Tensor()}; - tensor1->mutable_data(make_ddim({height, row_numel}), cpu_place); + std::unique_ptr tensor1{ + new paddle::framework::Tensor()}; + tensor1->mutable_data( + paddle::framework::make_ddim({height, row_numel}), cpu_place); functor(ctx, tensor1.get(), 3.0); - std::unique_ptr tensor2{new Tensor()}; - tensor2->mutable_data(make_ddim({height, row_numel}), cpu_place); + std::unique_ptr tensor2{ + new paddle::framework::Tensor()}; + tensor2->mutable_data( + paddle::framework::make_ddim({height, row_numel}), cpu_place); - SelectedRowsAddTensor add_tensor_functor; + paddle::operators::math::SelectedRowsAddTensor< + paddle::platform::CPUDeviceContext, float> + add_tensor_functor; add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); auto* tensor2_data = tensor2->data(); @@ -106,38 +121,46 @@ TEST(selected_rows_functor, cpu_add) { } TEST(selected_rows_functor, cpu_add_to) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators::math; - - CPUPlace cpu_place; - CPUDeviceContext ctx(cpu_place); - SetConstant functor; + paddle::platform::CPUPlace cpu_place; + paddle::platform::CPUDeviceContext ctx(cpu_place); + paddle::operators::math::SetConstant + functor; int64_t height = 10; int64_t row_numel = 10; std::vector rows1{0, 4, 7}; - std::unique_ptr selected_rows1{new SelectedRows(rows1, height)}; + std::unique_ptr selected_rows1{ + new paddle::framework::SelectedRows(rows1, height)}; auto* in1_value = selected_rows1->mutable_value(); in1_value->mutable_data( - make_ddim({static_cast(rows1.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows1.size()), row_numel}), + cpu_place); functor(ctx, in1_value, 1.0); std::vector rows2{0, 5, 7, 9}; - std::unique_ptr selected_rows2{new SelectedRows(rows2, height)}; + std::unique_ptr selected_rows2{ + new paddle::framework::SelectedRows(rows2, height)}; auto* in2_value = selected_rows2->mutable_value(); in2_value->mutable_data( - make_ddim({static_cast(rows2.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows2.size()), row_numel}), + cpu_place); functor(ctx, in2_value, 2.0); - std::unique_ptr output{new SelectedRows()}; + std::unique_ptr output{ + new paddle::framework::SelectedRows()}; output->set_height(height); auto* out_value = output->mutable_value(); // simplely concat two SelectedRows - out_value->mutable_data(make_ddim({7, 10}), cpu_place); + out_value->mutable_data(paddle::framework::make_ddim({7, 10}), + cpu_place); - SelectedRowsAddTo add_to_functor; + paddle::operators::math::SelectedRowsAddTo + add_to_functor; add_to_functor(ctx, *selected_rows1, 0, output.get()); add_to_functor(ctx, *selected_rows2, in1_value->numel(), output.get()); @@ -169,11 +192,15 @@ TEST(selected_rows_functor, cpu_add_to) { EXPECT_EQ(out_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_data[6 * row_numel + 9], 2.0); - std::unique_ptr tensor1{new Tensor()}; - tensor1->mutable_data(make_ddim({height, row_numel}), cpu_place); + std::unique_ptr tensor1{ + new paddle::framework::Tensor()}; + tensor1->mutable_data( + paddle::framework::make_ddim({height, row_numel}), cpu_place); functor(ctx, tensor1.get(), 3.0); - SelectedRowsAddToTensor add_to_tensor_functor; + paddle::operators::math::SelectedRowsAddToTensor< + paddle::platform::CPUDeviceContext, float> + add_to_tensor_functor; add_to_tensor_functor(ctx, *output, tensor1.get()); auto* tensor1_data = tensor1->data(); diff --git a/paddle/fluid/operators/math/sequence_padding_test.cc b/paddle/fluid/operators/math/sequence_padding_test.cc index bece46e75374cc38512d77c9d6b2fc6584e39db2..e3d62144856eb30a64007b54c16719cc028c70f2 100644 --- a/paddle/fluid/operators/math/sequence_padding_test.cc +++ b/paddle/fluid/operators/math/sequence_padding_test.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/sequence_padding.h" #include +#include template void TestSequencePadding(const paddle::framework::LoD& lod, @@ -75,7 +76,7 @@ void TestSequencePadding(const paddle::framework::LoD& lod, delete place; delete context; -}; +} TEST(Seq2BatchPadding, CPU) { paddle::framework::LoD lod1; diff --git a/paddle/fluid/operators/math/sequence_pooling.cc b/paddle/fluid/operators/math/sequence_pooling.cc index 5ae42ab973c81d3794fbbbe088e37ab02168c8dc..f25d3d3f1ee1f89d46b8e7c88ca68048f5203544 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cc +++ b/paddle/fluid/operators/math/sequence_pooling.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/sequence_pooling.h" +#include #include "paddle/fluid/operators/math/math_function.h" namespace paddle { diff --git a/paddle/fluid/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu index 1935364da37e9a9881651455d2da4ecef1b1e266..36f6402396379ab79fcbc71fd43d380227adccc4 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cu +++ b/paddle/fluid/operators/math/sequence_pooling.cu @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence_pooling.h" #include "paddle/fluid/platform/cuda_helper.h" diff --git a/paddle/fluid/operators/math/sequence_pooling.h b/paddle/fluid/operators/math/sequence_pooling.h index 38e780222955644c14e5bbbf16dee720c7758f5c..8dcbee65d0b63a137e5f422ec8667cc950641b4a 100644 --- a/paddle/fluid/operators/math/sequence_pooling.h +++ b/paddle/fluid/operators/math/sequence_pooling.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/math/vol2col.cc b/paddle/fluid/operators/math/vol2col.cc index 09e9f85cca349be1dc46a3f3a0b2d919485d6fa1..e92adc09ba01b032aba8eba94bcb4ba96524c641 100644 --- a/paddle/fluid/operators/math/vol2col.cc +++ b/paddle/fluid/operators/math/vol2col.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/vol2col.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/vol2col.cu b/paddle/fluid/operators/math/vol2col.cu index 619730d394d075d05a016e421fd75c4549015216..e0f3ef36879327c0592bb955dd800b44b228e721 100644 --- a/paddle/fluid/operators/math/vol2col.cu +++ b/paddle/fluid/operators/math/vol2col.cu @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include "paddle/fluid/operators/math/vol2col.h" #include "paddle/fluid/platform/cuda_helper.h" diff --git a/paddle/fluid/operators/math/vol2col.h b/paddle/fluid/operators/math/vol2col.h index dbc2ed7a6939cfb5dca61082decb8960e5f9ebda..5f59de8f02a52209a3901ca03680eb2d0dbc2658 100644 --- a/paddle/fluid/operators/math/vol2col.h +++ b/paddle/fluid/operators/math/vol2col.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/math/vol2col_test.cc b/paddle/fluid/operators/math/vol2col_test.cc index eb91f862e39d9f158200f69fd48e7245dde47171..aa979c4f10907e604758c3e2cfb776cb994c9ceb 100644 --- a/paddle/fluid/operators/math/vol2col_test.cc +++ b/paddle/fluid/operators/math/vol2col_test.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/vol2col.h" #include #include +#include template void testVol2col() { @@ -71,7 +72,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - paddle::framework::TensorCopy(input_tmp, *place, *context, &input); + paddle::framework::TensorCopySync(input_tmp, *place, &input); } output.mutable_data({1, filter_size, filter_size, filter_size, output_depth, output_height, output_width}, @@ -85,7 +86,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output.data(); } else { - TensorCopy(output, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopySync(output, paddle::platform::CPUPlace(), &output_tmp); out_cfo_ptr = output_tmp.data(); } @@ -99,7 +100,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopySync(input_tmp, *place, &input); } paddle::operators::math::Col2VolFunctor col2vol; @@ -109,7 +110,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp); in_ptr = input_tmp.data(); } diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc index 20b8a5c98ab16ac8121cb2fd01deb8ecc1966d44..ef54d79fdf2becde98c68044d14bd4347773b975 100644 --- a/paddle/fluid/operators/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl_op_test.cu.cc @@ -228,10 +228,8 @@ TEST_F(NCCLTester, ncclReduceOp) { result_tensor->Resize(kDims); auto *ct = result_tensor->mutable_data(cpu_place); - paddle::memory::Copy( - cpu_place, ct, p::CUDAPlace(gpu_list_[kRoot]), rt, - recv_tensor.numel() * sizeof(float), - static_cast(dev_ctxs_[kRoot])->stream()); + paddle::memory::Copy(cpu_place, ct, p::CUDAPlace(gpu_list_[kRoot]), rt, + recv_tensor.numel() * sizeof(float), nullptr); for (int64_t j = 0; j < f::product(kDims); ++j) { ASSERT_NEAR(ct[j], expected_result, 1e-5); diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index b28c16b13fce30c6e9be9953009b53e722cf4885..ae34fe2184b43cc104c14672dec30efd3b0e9f3b 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -364,7 +364,7 @@ class ParallelDoGradOpDescMaker : public framework::SingleGradOpDescMaker { } } grad->SetAttrMap(this->Attrs()); - grad->SetBlockAttr(kParallelBlock, *grad_block_[0]); + grad->SetBlockAttr(kParallelBlock, grad_block_[0]); return std::unique_ptr(grad); } diff --git a/paddle/fluid/operators/reader/CMakeLists.txt b/paddle/fluid/operators/reader/CMakeLists.txt index 845528860f91d0b479bb3c4dbbe05e32c68dc16f..3106978eb0149b14849dfd1aaad8bbe76791f2f6 100644 --- a/paddle/fluid/operators/reader/CMakeLists.txt +++ b/paddle/fluid/operators/reader/CMakeLists.txt @@ -23,5 +23,7 @@ reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_o reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc) reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc) reader_library(create_threaded_reader_op SRCS create_threaded_reader_op.cc) + +cc_test(reader_blocking_queue_test SRCS reader_blocking_queue_test.cc) # Export local libraries to parent set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE) diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h new file mode 100644 index 0000000000000000000000000000000000000000..71684b14176edc8f71efbefa9a7decffc8f3011e --- /dev/null +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -0,0 +1,112 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include // NOLINT +#include + +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace operators { +namespace reader { + +template +class BlockingQueue { + // BlockingQueue is for buffered reading and is supposed to use only the + // reader package. It is true that we could and we should have been using + // framework::Channel, but which has currently a deadlock bug. BlockingQueue + // is a workaround and a simplified version of framework::Channel as it + // doesn't support GPU and it implements on buffered blocking queue. + public: + explicit BlockingQueue(size_t capacity) + : capacity_(capacity), closed_(false) { + PADDLE_ENFORCE_GT( + capacity_, 0, + "The capacity of a reader::BlockingQueue must be greater than 0."); + } + + bool Send(const T& elem) { + std::unique_lock lock(mutex_); + send_cv_.wait(lock, [&] { return queue_.size() < capacity_ || closed_; }); + if (closed_) { + VLOG(5) + << "WARNING: Sending an element to a closed reader::BlokcingQueue."; + return false; + } + PADDLE_ENFORCE_LT(queue_.size(), capacity_); + queue_.push_back(elem); + receive_cv_.notify_one(); + return true; + } + + bool Send(T&& elem) { + std::unique_lock lock(mutex_); + send_cv_.wait(lock, [&] { return queue_.size() < capacity_ || closed_; }); + if (closed_) { + VLOG(5) + << "WARNING: Sending an element to a closed reader::BlokcingQueue."; + return false; + } + PADDLE_ENFORCE_LT(queue_.size(), capacity_); + queue_.emplace_back(std::move(elem)); + receive_cv_.notify_one(); + return true; + } + + bool Receive(T* elem) { + std::unique_lock lock(mutex_); + receive_cv_.wait(lock, [&] { return !queue_.empty() || closed_; }); + if (!queue_.empty()) { + PADDLE_ENFORCE_NOT_NULL(elem); + *elem = queue_.front(); + queue_.pop_front(); + send_cv_.notify_one(); + return true; + } else { + PADDLE_ENFORCE(closed_); + return false; + } + } + + void Close() { + std::lock_guard lock(mutex_); + closed_ = true; + send_cv_.notify_all(); + receive_cv_.notify_all(); + } + + bool IsClosed() { + std::lock_guard lock(mutex_); + return closed_; + } + + size_t Cap() { + std::lock_guard lock(mutex_); + return capacity_; + } + + private: + size_t capacity_; + bool closed_; + std::deque queue_; + + std::mutex mutex_; + std::condition_variable receive_cv_; + std::condition_variable send_cv_; +}; +} // namespace reader +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index 4372f23fc1dbd85e43b04a9d644977392316c2e9..e5efac461512a9a1869318d6547233589ca45a77 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -14,7 +14,7 @@ #include // NOLINT -#include "paddle/fluid/framework/channel.h" +#include "paddle/fluid/operators/reader/blocking_queue.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { @@ -23,13 +23,13 @@ namespace reader { // 'Double buffer' means we shall maintain two batches of input data at the same // time. So the kCacheSize shoul be at least 2. -static constexpr size_t kCacheSize = 2; +static constexpr size_t kCacheSize = 3; // There will be two bacthes out of the channel during training: // 1. the one waiting to be sent to the channel // 2. the one just be received from the channel, which is also being used by // subsequent operators. // So the channel size should be kChacheSize - 2 -static constexpr size_t kChannelSize = 0; // kCacheSize - 2 +static constexpr size_t kChannelSize = 1; // kCacheSize - 2 class DoubleBufferReader : public framework::DecoratedReader { public: @@ -55,10 +55,8 @@ class DoubleBufferReader : public framework::DecoratedReader { ~DoubleBufferReader() { EndPrefetcher(); } private: - bool HasNext() const; - void StartPrefetcher() { - channel_ = framework::MakeChannel(kChannelSize); + channel_ = new reader::BlockingQueue(kChannelSize); prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); } @@ -74,7 +72,7 @@ class DoubleBufferReader : public framework::DecoratedReader { void PrefetchThreadFunc(); std::thread prefetcher_; - framework::Channel* channel_; + reader::BlockingQueue* channel_; platform::Place place_; std::vector> cpu_tensor_cache_; std::vector> gpu_tensor_cache_; @@ -139,17 +137,16 @@ class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { }; void DoubleBufferReader::ReadNext(std::vector* out) { - out->clear(); - if (HasNext()) { - size_t cached_tensor_id; - channel_->Receive(&cached_tensor_id); + size_t cached_tensor_id; + if (channel_->Receive(&cached_tensor_id)) { if (platform::is_gpu_place(place_)) { *out = gpu_tensor_cache_[cached_tensor_id]; - ctxs_[cached_tensor_id]->Wait(); } else { // CPU place *out = cpu_tensor_cache_[cached_tensor_id]; } + } else { + out->clear(); } } @@ -159,12 +156,6 @@ void DoubleBufferReader::ReInit() { StartPrefetcher(); } -bool DoubleBufferReader::HasNext() const { - while (!channel_->IsClosed() && !channel_->CanReceive()) { - } - return channel_->CanReceive(); -} - void DoubleBufferReader::PrefetchThreadFunc() { VLOG(5) << "A new prefetch thread starts."; size_t cached_tensor_id = 0; @@ -177,18 +168,14 @@ void DoubleBufferReader::PrefetchThreadFunc() { } if (platform::is_gpu_place(place_)) { auto& gpu_batch = gpu_tensor_cache_[cached_tensor_id]; - auto* gpu_ctx = ctxs_[cached_tensor_id].get(); gpu_batch.resize(cpu_batch.size()); for (size_t i = 0; i < cpu_batch.size(); ++i) { - framework::TensorCopy(cpu_batch[i], place_, *gpu_ctx, &gpu_batch[i], - true); + // TODO(fengjiayi): Use asynchronous TensorCopy instead + framework::TensorCopySync(cpu_batch[i], place_, &gpu_batch[i]); gpu_batch[i].set_lod(cpu_batch[i].lod()); } } - try { - size_t tmp = cached_tensor_id; - channel_->Send(&tmp); - } catch (paddle::platform::EnforceNotMet e) { + if (!channel_->Send(cached_tensor_id)) { VLOG(5) << "WARNING: The double buffer channel has been closed. The " "prefetch thread will terminate."; break; diff --git a/paddle/fluid/operators/reader/create_threaded_reader_op.cc b/paddle/fluid/operators/reader/create_threaded_reader_op.cc index cbf709d5e734c0f2adf3735dc28043c1340349da..1cb9bd36455a2287b8ba4fb4ca14a4c5338da098 100644 --- a/paddle/fluid/operators/reader/create_threaded_reader_op.cc +++ b/paddle/fluid/operators/reader/create_threaded_reader_op.cc @@ -21,26 +21,16 @@ namespace reader { class ThreadedReader : public framework::DecoratedReader { public: - ThreadedReader(ReaderBase* reader, bool safe_mode) - : DecoratedReader(reader), safe_mode_(safe_mode) {} + explicit ThreadedReader(ReaderBase* reader) : DecoratedReader(reader) {} void ReadNext(std::vector* out) override { std::lock_guard lock(mutex_); reader_->ReadNext(out); } - void ReInit() override { - if (safe_mode_) { - PADDLE_THROW( - "ThreadedReader::ReInit() is disabled when 'safe_mode' is true."); - } - VLOG(5) << "ThreadedReader::ReInit() is invoked! It might be buggy in " - "multi-thread environment."; - reader_->ReInit(); - } + void ReInit() override { reader_->ReInit(); } private: - bool safe_mode_; std::mutex mutex_; }; @@ -58,8 +48,7 @@ class CreateThreadedReaderOp : public framework::OperatorBase { } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - bool safe_mode = Attr("safe_mode"); - out->Reset(new ThreadedReader(underlying_reader.Get(), safe_mode)); + out->Reset(new ThreadedReader(underlying_reader.Get())); } }; @@ -67,10 +56,6 @@ class CreateThreadedReaderOpMaker : public DecoratedReaderMakerBase { public: CreateThreadedReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) : DecoratedReaderMakerBase(op_proto, op_checker) { - AddAttr("safe_mode", - "When 'safe_mode' is true, 'ReInit()' is disabled to avoid " - "unexpected bugs in multi-thread environment.") - .SetDefault(true); AddComment(R"DOC( CreateThreadedReader Operator diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index 779dc8a6a0deb7792e0540071e3a2588102fa708..91ad7d56583446ee4686e74187de166f387125df 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -14,7 +14,7 @@ #include // NOLINT -#include "paddle/fluid/framework/channel.h" +#include "paddle/fluid/operators/reader/blocking_queue.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { @@ -37,7 +37,6 @@ class MultiFileReader : public framework::ReaderBase { ~MultiFileReader() { EndScheduler(); } private: - bool HasNext(); void StartNewScheduler(); void EndScheduler(); void ScheduleThreadFunc(); @@ -48,15 +47,14 @@ class MultiFileReader : public framework::ReaderBase { std::thread scheduler_; std::vector prefetchers_; size_t buffer_size_; - framework::Channel* waiting_file_idx_; - framework::Channel* available_thread_idx_; - framework::Channel>* buffer_; + reader::BlockingQueue* waiting_file_idx_; + reader::BlockingQueue* available_thread_idx_; + reader::BlockingQueue>* buffer_; }; void MultiFileReader::ReadNext(std::vector* out) { - out->clear(); - if (HasNext()) { - buffer_->Receive(out); + if (!buffer_->Receive(out)) { + out->clear(); } } @@ -65,25 +63,19 @@ void MultiFileReader::ReInit() { StartNewScheduler(); } -bool MultiFileReader::HasNext() { - while (!buffer_->IsClosed() && !buffer_->CanReceive()) { - } - return buffer_->CanReceive(); -} - void MultiFileReader::StartNewScheduler() { size_t thread_num = prefetchers_.size(); - waiting_file_idx_ = framework::MakeChannel(file_names_.size()); - available_thread_idx_ = framework::MakeChannel(thread_num); - buffer_ = - framework::MakeChannel>(buffer_size_); + waiting_file_idx_ = new reader::BlockingQueue(file_names_.size()); + available_thread_idx_ = new reader::BlockingQueue(thread_num); + buffer_ = new reader::BlockingQueue>( + buffer_size_); for (size_t i = 0; i < file_names_.size(); ++i) { - waiting_file_idx_->Send(&i); + waiting_file_idx_->Send(i); } waiting_file_idx_->Close(); for (size_t i = 0; i < thread_num; ++i) { - available_thread_idx_->Send(&i); + available_thread_idx_->Send(i); } scheduler_ = std::thread([this] { ScheduleThreadFunc(); }); @@ -149,7 +141,7 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name, break; } try { - buffer_->Send(&ins); + buffer_->Send(std::move(ins)); } catch (paddle::platform::EnforceNotMet e) { VLOG(5) << "WARNING: The buffer channel has been closed. The prefetch " "thread of file '" @@ -158,9 +150,7 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name, } } - try { - available_thread_idx_->Send(&thread_idx); - } catch (paddle::platform::EnforceNotMet e) { + if (!available_thread_idx_->Send(thread_idx)) { VLOG(5) << "WARNING: The available_thread_idx_ channel has been closed. " "Fail to send thread_idx."; } diff --git a/paddle/fluid/operators/reader/reader_blocking_queue_test.cc b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..7d1b381d56c8cdc1e79e594b18c1a1ed59ab5284 --- /dev/null +++ b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc @@ -0,0 +1,219 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include // NOLINT +#include +#include // NOLINT +#include +#include "gtest/gtest.h" + +#include "paddle/fluid/operators/reader/blocking_queue.h" + +using paddle::operators::reader::BlockingQueue; + +TEST(BlockingQueue, CapacityTest) { + size_t cap = 10; + BlockingQueue q(cap); + EXPECT_EQ(q.Cap(), cap); +} + +void FirstInFirstOut(size_t queue_cap, size_t elem_num, size_t send_time_gap, + size_t receive_time_gap) { + BlockingQueue q(queue_cap); + std::thread sender([&]() { + for (size_t i = 0; i < elem_num; ++i) { + std::this_thread::sleep_for(std::chrono::milliseconds(send_time_gap)); + EXPECT_TRUE(q.Send(i)); + } + q.Close(); + }); + size_t count = 0; + while (true) { + std::this_thread::sleep_for(std::chrono::milliseconds(receive_time_gap)); + size_t elem; + if (!q.Receive(&elem)) { + break; + } + EXPECT_EQ(elem, count++); + } + sender.join(); + EXPECT_EQ(count, elem_num); + EXPECT_TRUE(q.IsClosed()); +} + +TEST(BlockingQueue, FirstInFirstOutTest) { + FirstInFirstOut(2, 5, 2, 50); + FirstInFirstOut(2, 5, 50, 2); + FirstInFirstOut(10, 3, 50, 2); + FirstInFirstOut(10, 3, 2, 50); +} + +TEST(BlockingQueue, SenderBlockingTest) { + const size_t queue_cap = 2; + BlockingQueue q(queue_cap); + size_t send_count = 0; + std::thread sender([&]() { + for (size_t i = 0; i < 5; ++i) { + if (!q.Send(i)) { + break; + } + ++send_count; + } + }); + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + q.Close(); + sender.join(); + EXPECT_EQ(send_count, queue_cap); + std::vector res; + while (true) { + size_t elem; + if (!q.Receive(&elem)) { + break; + } + res.push_back(elem); + } + EXPECT_EQ(res.size(), queue_cap); + for (size_t i = 0; i < res.size(); ++i) { + EXPECT_EQ(res[i], i); + } +} + +TEST(BlockingQueue, ReceiverBlockingTest) { + const size_t queue_cap = 5; + BlockingQueue q(queue_cap); + std::vector receive_res; + std::thread receiver([&]() { + size_t elem; + while (true) { + if (!q.Receive(&elem)) { + break; + } + receive_res.push_back(elem); + } + }); + std::vector to_send{2, 1, 7}; + for (auto e : to_send) { + q.Send(e); + } + q.Close(); + receiver.join(); + EXPECT_EQ(receive_res.size(), to_send.size()); + for (size_t i = 0; i < to_send.size(); ++i) { + EXPECT_EQ(receive_res[i], to_send[i]); + } +} + +void CheckIsUnorderedSame(const std::vector>& v1, + const std::vector>& v2) { + std::set s1; + std::set s2; + for (auto vec : v1) { + for (size_t elem : vec) { + s1.insert(elem); + } + } + for (auto vec : v2) { + for (size_t elem : vec) { + s2.insert(elem); + } + } + EXPECT_EQ(s1.size(), s2.size()); + auto it1 = s1.begin(); + auto it2 = s2.begin(); + while (it1 != s1.end()) { + EXPECT_EQ(*it1, *it2); + ++it1; + ++it2; + } +} + +void MultiSenderMultiReceiver(const size_t queue_cap, + const std::vector>& to_send, + size_t receiver_num, size_t send_time_gap, + size_t receive_time_gap) { + BlockingQueue q(queue_cap); + size_t sender_num = to_send.size(); + std::vector senders; + for (size_t s_idx = 0; s_idx < sender_num; ++s_idx) { + senders.emplace_back(std::thread([&, s_idx] { + for (size_t elem : to_send[s_idx]) { + std::this_thread::sleep_for(std::chrono::milliseconds(send_time_gap)); + EXPECT_TRUE(q.Send(elem)); + } + })); + } + std::vector receivers; + std::mutex mu; + std::vector> res; + for (size_t r_idx = 0; r_idx < receiver_num; ++r_idx) { + receivers.emplace_back(std::thread([&] { + std::vector receiver_res; + while (true) { + std::this_thread::sleep_for( + std::chrono::milliseconds(receive_time_gap)); + size_t elem; + if (!q.Receive(&elem)) { + break; + } + receiver_res.push_back(elem); + } + std::lock_guard lock(mu); + res.push_back(receiver_res); + })); + } + for (auto& t : senders) { + t.join(); + } + q.Close(); + for (auto& t : receivers) { + t.join(); + } + CheckIsUnorderedSame(to_send, res); +} + +TEST(BlockingQueue, MultiSenderMultiReaderTest) { + std::vector> to_send_1{{2, 3, 4}, {9}, {0, 7, 15, 6}}; + MultiSenderMultiReceiver(2, to_send_1, 2, 0, 0); + MultiSenderMultiReceiver(10, to_send_1, 2, 0, 0); + MultiSenderMultiReceiver(2, to_send_1, 20, 0, 0); + MultiSenderMultiReceiver(2, to_send_1, 2, 50, 0); + MultiSenderMultiReceiver(2, to_send_1, 2, 0, 50); + + std::vector> to_send_2{ + {2, 3, 4}, {}, {0, 7, 15, 6, 9, 32}}; + MultiSenderMultiReceiver(2, to_send_2, 3, 0, 0); + MultiSenderMultiReceiver(20, to_send_2, 3, 0, 0); + MultiSenderMultiReceiver(2, to_send_2, 30, 0, 0); + MultiSenderMultiReceiver(2, to_send_2, 3, 50, 0); + MultiSenderMultiReceiver(2, to_send_2, 3, 0, 50); +} + +struct MyClass { + MyClass() : val_(0) {} + explicit MyClass(int val) : val_(val) {} + MyClass(const MyClass& b) { val_ = b.val_; } + MyClass(MyClass&& b) { val_ = b.val_; } + void operator=(const MyClass& b) { val_ = b.val_; } + + int val_; +}; + +TEST(BlockingQueue, MyClassTest) { + BlockingQueue q(2); + MyClass a(200); + q.Send(std::move(a)); + MyClass b; + q.Receive(&b); + EXPECT_EQ(a.val_, b.val_); +} diff --git a/paddle/fluid/operators/reader/reader_op_registry.cc b/paddle/fluid/operators/reader/reader_op_registry.cc index fc8dc747ff0c2286f4516d8350f75d9887361924..3ff4536819b128d9c593b97f4942a0292a3b6b36 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.cc +++ b/paddle/fluid/operators/reader/reader_op_registry.cc @@ -12,7 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "reader_op_registry.h" +#include "paddle/fluid/operators/reader/reader_op_registry.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reader/reader_op_registry.h b/paddle/fluid/operators/reader/reader_op_registry.h index 929d32ad8b367865e33530f8517343c513ee9878..ec25f55ef5c3bb691b1213328b996c080656bb7b 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.h +++ b/paddle/fluid/operators/reader/reader_op_registry.h @@ -14,6 +14,8 @@ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/reader.h" diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index 00241e768217db0a611c00bbc72e2fb83ade73b4..72c2905872c528a7ed05820744f4031799ad9e46 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -596,7 +596,7 @@ class RecurrentGradOpDescMaker : public framework::SingleGradOpDescMaker { } } grad->SetAttrMap(this->Attrs()); - grad->SetBlockAttr(kStepBlock, *grad_block_[0]); + grad->SetBlockAttr(kStepBlock, grad_block_[0]); return std::unique_ptr(grad); } diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index 8320c257c9ab15efec29eabe99eca5b6f74c9e31..ccd7063fe69e0f21b4d2a821bb70902b39c9b9de 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -93,8 +93,14 @@ class ReshapeOp : public framework::OperatorWithKernel { if (unk_dim_idx != -1) { output_shape[unk_dim_idx] = -in_size / capacity; - PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, - "Invalid shape is given."); + // in_size < 0 and is un-determinate in compile time, skip the check, + // for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8], + // capacity = -24, in_size = -8, output_shape[0] = 0 + // the following check will fail. + if (in_size > 0) { + PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, + "Invalid shape is given."); + } } else { PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given."); } @@ -124,10 +130,8 @@ class ReshapeKernel : public framework::OpKernel { auto *shape_data = shape_tensor->data(); framework::Tensor cpu_shape_tensor; if (platform::is_gpu_place(ctx.GetPlace())) { - TensorCopy(*shape_tensor, platform::CPUPlace(), ctx.device_context(), - &cpu_shape_tensor); + TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); shape_data = cpu_shape_tensor.data(); - ctx.device_context().Wait(); } auto shape = std::vector(shape_data, shape_data + shape_tensor->numel()); @@ -146,9 +150,7 @@ class ReshapeKernel : public framework::OpKernel { out->Resize(out_dims); if (!inplace) { out->mutable_data(ctx.GetPlace()); - framework::TensorCopy(*in, ctx.GetPlace(), ctx.device_context(), out); - ctx.device_context().Wait(); - // TensorCopy will resize to in_dims. + framework::TensorCopySync(*in, ctx.GetPlace(), out); out->Resize(out_dims); } else { out->ShareDataWith(*in); diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index 224ec93d28ec75c52848d7c8400e684df0d69209..397e49ef20ac45515a852f466d693f358ef5461b 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -18,8 +18,7 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; - -static constexpr int kROISize = 5; +using LoDTensor = framework::LoDTensor; class ROIPoolOp : public framework::OperatorWithKernel { public: @@ -40,11 +39,11 @@ class ROIPoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(input_dims.size() == 4, "The format of input tensor is NCHW."); PADDLE_ENFORCE(rois_dims.size() == 2, - "ROIs should be a 2-D tensor of shape (num_rois, 5)" - "given as [[batch_id, x1, y1, x2, y2], …]."); + "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], …]."); PADDLE_ENFORCE(rois_dims[1] == kROISize, - "ROIs should be a 2-D tensor of shape (num_rois, 5)" - "given as [[batch_id, x1, y1, x2, y2], …]."); + "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], …]."); int pooled_height = ctx->Attrs().Get("pooled_height"); int pooled_width = ctx->Attrs().Get("pooled_width"); @@ -109,10 +108,10 @@ class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { "H is the height of the feature, and " "W is the width of the feature."); AddInput("ROIs", - "(Tensor), " + "(LoDTensor), " "ROIs (Regions of Interest) to pool over. " - "should be a 2-D tensor of shape (num_rois, 5)" - "given as [[batch_id, x1, y1, x2, y2], …]. " + "should be a 2-D LoDTensor of shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], …]. " "Where batch_id is the id of the data, " "(x1, y1) is the top left coordinates, and " "(x2, y2) is the bottom right coordinates."); diff --git a/paddle/fluid/operators/roi_pool_op.cu b/paddle/fluid/operators/roi_pool_op.cu index 1931629d1340758edb6664a5e3ffdba126b33717..0bdfee0434f6934b20083c42dd5da64f4cddf8e2 100644 --- a/paddle/fluid/operators/roi_pool_op.cu +++ b/paddle/fluid/operators/roi_pool_op.cu @@ -19,10 +19,10 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; -static constexpr int kROISize = 5; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, @@ -30,13 +30,11 @@ static inline int NumBlocks(const int N) { } template -__global__ void GPUROIPoolForward(const int nthreads, const T* input_data, - const int64_t* input_rois, - const float spatial_scale, const int channels, - const int height, const int width, - const int pooled_height, - const int pooled_width, T* output_data, - int64_t* argmax_data) { +__global__ void GPUROIPoolForward( + const int nthreads, const T* input_data, const int64_t* input_rois, + const float spatial_scale, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + int* roi_batch_id_data, T* output_data, int64_t* argmax_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { @@ -46,11 +44,11 @@ __global__ void GPUROIPoolForward(const int nthreads, const T* input_data, int n = index / pooled_width / pooled_height / channels; const int64_t* offset_input_rois = input_rois + n * kROISize; - int roi_batch_ind = offset_input_rois[0]; - int roi_start_w = round(offset_input_rois[1] * spatial_scale); - int roi_start_h = round(offset_input_rois[2] * spatial_scale); - int roi_end_w = round(offset_input_rois[3] * spatial_scale); - int roi_end_h = round(offset_input_rois[4] * spatial_scale); + int roi_batch_ind = roi_batch_id_data[n]; + int roi_start_w = round(offset_input_rois[0] * spatial_scale); + int roi_start_h = round(offset_input_rois[1] * spatial_scale); + int roi_end_w = round(offset_input_rois[2] * spatial_scale); + int roi_end_h = round(offset_input_rois[3] * spatial_scale); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); @@ -93,7 +91,8 @@ __global__ void GPUROIPoolBackward( const int nthreads, const int64_t* input_rois, const T* output_grad, const int64_t* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, T* input_grad) { + const int pooled_height, const int pooled_width, int* roi_batch_id_data, + T* input_grad) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { @@ -102,8 +101,7 @@ __global__ void GPUROIPoolBackward( int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; - const int64_t* offset_input_rois = input_rois + n * kROISize; - int roi_batch_ind = offset_input_rois[0]; + int roi_batch_ind = roi_batch_id_data[n]; int input_offset = (roi_batch_ind * channels + c) * height * width; int output_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_output_grad = output_grad + output_offset; @@ -124,7 +122,7 @@ class GPUROIPoolOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* out = ctx.Output("Out"); auto* argmax = ctx.Output("Argmax"); @@ -133,23 +131,46 @@ class GPUROIPoolOpKernel : public framework::OpKernel { auto spatial_scale = ctx.Attr("spatial_scale"); auto in_dims = in->dims(); + int batch_size = in_dims[0]; auto in_stride = framework::stride(in_dims); int channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; - size_t rois_num = rois->dims()[0]; + int rois_num = rois->dims()[0]; if (rois_num == 0) return; int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(platform::CPUPlace()); + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + PADDLE_ENFORCE_EQ( + rois_batch_size, batch_size, + "The rois_batch_size and imgs batch_size must be the same."); + int rois_num_with_lod = rois_lod[rois_batch_size]; + PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, + "The rois_num from input and lod must be the same."); + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } + } + + framework::Tensor roi_batch_id_list_gpu; + framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(), + ctx.device_context(), &roi_batch_id_list_gpu); + GPUROIPoolForward< T><<>>( output_size, in->data(), rois->data(), spatial_scale, channels, height, width, pooled_height, pooled_width, - out->mutable_data(ctx.GetPlace()), + roi_batch_id_list_gpu.data(), out->mutable_data(ctx.GetPlace()), argmax->mutable_data(ctx.GetPlace())); } }; @@ -159,7 +180,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* argmax = ctx.Input("Argmax"); auto* out_grad = ctx.Input(framework::GradVarName("Out")); @@ -169,12 +190,27 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { auto pooled_width = ctx.Attr("pooled_width"); auto spatial_scale = ctx.Attr("spatial_scale"); - size_t rois_num = rois->dims()[0]; + int rois_num = rois->dims()[0]; int channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (x_grad) { + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(platform::CPUPlace()); + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } + } + framework::Tensor roi_batch_id_list_gpu; + framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(), + ctx.device_context(), &roi_batch_id_list_gpu); + x_grad->mutable_data(ctx.GetPlace()); math::SetConstant set_zero; set_zero(ctx.cuda_device_context(), x_grad, static_cast(0)); @@ -189,6 +225,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { output_grad_size, rois->data(), out_grad->data(), argmax->data(), rois_num, spatial_scale, channels, height, width, pooled_height, pooled_width, + roi_batch_id_list_gpu.data(), x_grad->mutable_data(ctx.GetPlace())); } } diff --git a/paddle/fluid/operators/roi_pool_op.h b/paddle/fluid/operators/roi_pool_op.h index 54e07490319cf1da749bd33449a7b51efd6c3d65..c4f739b2c6b2d62ebebcc15fd627ebad040e7b3f 100644 --- a/paddle/fluid/operators/roi_pool_op.h +++ b/paddle/fluid/operators/roi_pool_op.h @@ -21,12 +21,14 @@ limitations under the License. */ namespace paddle { namespace operators { +static constexpr int kROISize = 4; + template class CPUROIPoolOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* out = ctx.Output("Out"); auto* argmax = ctx.Output("Argmax"); @@ -47,24 +49,36 @@ class CPUROIPoolOpKernel : public framework::OpKernel { auto out_stride = framework::stride(out->dims()); const T* input_data = in->data(); - const int64_t* rois_data = rois->data(); - T* output_data = out->mutable_data(ctx.GetPlace()); - int64_t* argmax_data = argmax->mutable_data(ctx.GetPlace()); - for (int n = 0; n < rois_num; ++n) { - int roi_batch_id = rois_data[0]; - PADDLE_ENFORCE_GE(roi_batch_id, 0); - PADDLE_ENFORCE_LT(roi_batch_id, batch_size); - rois_data += roi_stride[0]; + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(ctx.GetPlace()); + + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + PADDLE_ENFORCE_EQ( + rois_batch_size, batch_size, + "The rois_batch_size and imgs batch_size must be the same."); + int rois_num_with_lod = rois_lod[rois_batch_size]; + PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, + "The rois_num from input and lod must be the same."); + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } } - rois_data = rois->data(); + T* output_data = out->mutable_data(ctx.GetPlace()); + int64_t* argmax_data = argmax->mutable_data(ctx.GetPlace()); + + const int64_t* rois_data = rois->data(); for (int n = 0; n < rois_num; ++n) { - int roi_batch_id = rois_data[0]; - int roi_start_w = round(rois_data[1] * spatial_scale); - int roi_start_h = round(rois_data[2] * spatial_scale); - int roi_end_w = round(rois_data[3] * spatial_scale); - int roi_end_h = round(rois_data[4] * spatial_scale); + int roi_batch_id = roi_batch_id_data[n]; + int roi_start_w = round(rois_data[0] * spatial_scale); + int roi_start_h = round(rois_data[1] * spatial_scale); + int roi_end_w = round(rois_data[2] * spatial_scale); + int roi_end_h = round(rois_data[3] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_height = std::max(roi_end_h - roi_start_h + 1, 1); @@ -133,7 +147,7 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* argmax = ctx.Input("Argmax"); auto* out_grad = ctx.Input(framework::GradVarName("Out")); @@ -143,6 +157,20 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel { auto pooled_width = ctx.Attr("pooled_width"); if (in_grad) { + int rois_num = rois->dims()[0]; + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(ctx.GetPlace()); + + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } + } + const int64_t* rois_data = rois->data(); const T* out_grad_data = out_grad->data(); const int64_t* argmax_data = argmax->data(); @@ -156,11 +184,10 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel { auto roi_stride = framework::stride(rois->dims()); auto out_stride = framework::stride(out_grad->dims()); - int rois_num = rois->dims()[0]; int channels = in->dims()[1]; for (int n = 0; n < rois_num; ++n) { - int roi_batch_idx = rois_data[0]; + int roi_batch_idx = roi_batch_id_data[n]; T* batch_grad_data = in_grad_data + roi_batch_idx * in_stride[0]; for (int c = 0; c < channels; ++c) { for (int ph = 0; ph < pooled_height; ++ph) { diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index 82ff087d0a7a4b482aef842e618f593b17dca171..e4386b640a298cd216bb60104653f20c4a96e7dc 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -41,6 +41,8 @@ class SendOp : public framework::OperatorBase { std::vector endpoints = Attr>("endpoints"); + bool sync_mode = Attr("sync_mode"); + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); @@ -64,11 +66,13 @@ class SendOp : public framework::OperatorBase { } PADDLE_ENFORCE(rpc_client->Wait()); - for (auto& ep : endpoints) { - VLOG(3) << "batch barrier, ep: " << ep; - rpc_client->AsyncSendBatchBarrier(ep); + if (sync_mode) { + for (auto& ep : endpoints) { + VLOG(3) << "batch barrier, ep: " << ep; + rpc_client->AsyncSendBatchBarrier(ep); + } + PADDLE_ENFORCE(rpc_client->Wait()); } - PADDLE_ENFORCE(rpc_client->Wait()); if (outs.size() > 0) { for (size_t i = 0; i < outs.size(); i++) { @@ -112,6 +116,7 @@ This operator will send tensor to recv_op at the parameter server. "Server endpoints in the order of input " "variables for mapping") .SetDefault({}); + AddAttr("sync_mode", "work in sync_mode or not").SetDefault(true); } }; diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 81350fee38df058d1b63eb5a8cd0b770e0626ae4..d2e1f3cb2ff9c8254cd4815a0f8750966a6e161c 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -137,6 +137,8 @@ void StartServerNet(bool is_sparse) { attrs.insert({"GradList", std::vector({"x1"})}); attrs.insert({"OptimizeBlock", optimize_block}); attrs.insert({"PrefetchBlock", prefetch_block}); + attrs.insert({"grad_to_block_id", std::vector({""})}); + attrs.insert({"sync_mode", true}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); listen_and_serv_op->Run(scope, place); diff --git a/paddle/fluid/operators/sequence_conv_op.h b/paddle/fluid/operators/sequence_conv_op.h index b59504bb9893b720247841bdad5aa577992b7fb6..3916cdbb6a69c5a18f7a21ec60bad2732b4c3e58 100644 --- a/paddle/fluid/operators/sequence_conv_op.h +++ b/paddle/fluid/operators/sequence_conv_op.h @@ -33,7 +33,6 @@ class SequenceConvKernel : public framework::OpKernel { auto filter = *context.Input("Filter"); out->mutable_data(context.GetPlace()); - context.ShareLoD("X", "Out"); int context_start = context.Attr("contextStart"); int context_length = context.Attr("contextLength"); diff --git a/paddle/fluid/operators/softmax_mkldnn_op.cc b/paddle/fluid/operators/softmax_mkldnn_op.cc index d00bd1447e6114b6000b65799abb566a2a510127..71b541d98f6e0d3e12601c9988ca6ffb8bb7554d 100644 --- a/paddle/fluid/operators/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/softmax_mkldnn_op.cc @@ -77,7 +77,7 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { const bool is_test = ctx.Attr("is_test"); if (!is_test) { T threshold = exp(-64); - for (size_t i = 0; i < dst_tz[0] * dst_tz[1]; ++i) { + for (int i = 0; i < dst_tz[0] * dst_tz[1]; ++i) { output_data[i] = output_data[i] < threshold ? threshold : output_data[i]; } diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index 8b62b242cf8745378eb216db10605388b294ca75..710cc9fc2e716da2e4fd067562a34d312e48b1a1 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -288,7 +288,7 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { while_grad->SetInput(framework::GradVarName(kOutputs), output_grads_list); while_grad->SetAttrMap(this->Attrs()); - while_grad->SetBlockAttr(kStepBlock, *grad_block); + while_grad->SetBlockAttr(kStepBlock, grad_block); // record the original output gradient names, since the gradient name of // while operator could be renamed. while_grad->SetAttr("original_output_grad", output_grads_list); diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 917bdc64abf608b8ade70c47f76a8adffb32046a..598fd4d419078a973647f2f8f20e8a12c8115a8b 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -12,7 +12,7 @@ add_custom_command(TARGET profiler_py_proto POST_BUILD WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) if(WITH_GPU) - cc_library(enforce SRCS enforce.cc DEPS) + nv_library(enforce SRCS enforce.cc) else() cc_library(enforce SRCS enforce.cc) endif() diff --git a/paddle/fluid/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h index 1ab55d6b9bf8fdbd14c9c2bd978e3e99dba3e73e..81acaff87d3c2025cf0d6185a1590b018bfbd83c 100644 --- a/paddle/fluid/platform/dynload/cublas.h +++ b/paddle/fluid/platform/dynload/cublas.h @@ -14,10 +14,12 @@ #pragma once +#include #include #include #include #include // NOLINT +#include #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -37,14 +39,14 @@ extern void *cublas_dso_handle; #ifdef PADDLE_USE_DSO #define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \ struct DynLoad__##__name { \ + using FUNC_TYPE = decltype(&::__name); \ template \ inline cublasStatus_t operator()(Args... args) { \ - typedef cublasStatus_t (*cublasFunc)(Args...); \ std::call_once(cublas_dso_flag, []() { \ cublas_dso_handle = paddle::platform::dynload::GetCublasDsoHandle(); \ }); \ void *p_##__name = dlsym(cublas_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ + return reinterpret_cast(p_##__name)(args...); \ } \ }; \ extern DynLoad__##__name __name @@ -71,8 +73,8 @@ extern void *cublas_dso_handle; __macro(cublasDgemm_v2); \ __macro(cublasHgemm); \ __macro(cublasSgemmEx); \ - __macro(cublasSgeam_v2); \ - __macro(cublasDgeam_v2); \ + __macro(cublasSgeam); \ + __macro(cublasDgeam); \ __macro(cublasCreate_v2); \ __macro(cublasDestroy_v2); \ __macro(cublasSetStream_v2); \ diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index 24475b62ca2825c45ff7edb39328dece3b822b25..34d83e395694f55eafca74d63ebf363169ab30e8 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -34,7 +34,7 @@ extern void EnforceCUDNNLoaded(const char* fn_name); struct DynLoad__##__name { \ template \ auto operator()(Args... args) -> decltype(__name(args...)) { \ - using cudnn_func = decltype(__name(args...)) (*)(Args...); \ + using cudnn_func = decltype(&::__name); \ std::call_once(cudnn_dso_flag, []() { \ cudnn_dso_handle = paddle::platform::dynload::GetCUDNNDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/cupti.h b/paddle/fluid/platform/dynload/cupti.h index d0d676b9d8ac462900b48246bec43166d04ef97b..e64de7c20fc9d145e51cfc4528e321b3c4ec86c8 100644 --- a/paddle/fluid/platform/dynload/cupti.h +++ b/paddle/fluid/platform/dynload/cupti.h @@ -41,7 +41,7 @@ extern void *cupti_dso_handle; struct DynLoad__##__name { \ template \ inline CUptiResult CUPTIAPI operator()(Args... args) { \ - typedef CUptiResult CUPTIAPI (*cuptiFunc)(Args...); \ + using cuptiFunc = decltype(&::__name); \ std::call_once(cupti_dso_flag, []() { \ cupti_dso_handle = paddle::platform::dynload::GetCUPTIDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/curand.h b/paddle/fluid/platform/dynload/curand.h index 4697fb6cd96770127206bdabeea77e43eb09d1f5..46ad4379d5f9572d415ef1d747077217ae29391e 100644 --- a/paddle/fluid/platform/dynload/curand.h +++ b/paddle/fluid/platform/dynload/curand.h @@ -30,7 +30,7 @@ extern void *curand_dso_handle; struct DynLoad__##__name { \ template \ curandStatus_t operator()(Args... args) { \ - typedef curandStatus_t (*curandFunc)(Args...); \ + using curandFunc = decltype(&::__name); \ std::call_once(curand_dso_flag, []() { \ curand_dso_handle = paddle::platform::dynload::GetCurandDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/nccl.h b/paddle/fluid/platform/dynload/nccl.h index c5a10a78a4f432b431680c089f255fea777277cb..37902ae20c5d9d64486232bbd468375c4a50a615 100644 --- a/paddle/fluid/platform/dynload/nccl.h +++ b/paddle/fluid/platform/dynload/nccl.h @@ -33,7 +33,7 @@ extern void* nccl_dso_handle; struct DynLoad__##__name { \ template \ auto operator()(Args... args) -> decltype(__name(args...)) { \ - using nccl_func = decltype(__name(args...)) (*)(Args...); \ + using nccl_func = decltype(&::__name); \ std::call_once(nccl_dso_flag, []() { \ nccl_dso_handle = paddle::platform::dynload::GetNCCLDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/warpctc.h b/paddle/fluid/platform/dynload/warpctc.h index 7fa468370463a51c486b80317f401612930bc72e..7c70649d21c547beb824576d4a8ecf6219a9bddf 100644 --- a/paddle/fluid/platform/dynload/warpctc.h +++ b/paddle/fluid/platform/dynload/warpctc.h @@ -36,7 +36,7 @@ extern void* warpctc_dso_handle; struct DynLoad__##__name { \ template \ auto operator()(Args... args) -> decltype(__name(args...)) { \ - using warpctcFunc = decltype(__name(args...)) (*)(Args...); \ + using warpctcFunc = decltype(&::__name); \ std::call_once(warpctc_dso_flag, []() { \ warpctc_dso_handle = paddle::platform::dynload::GetWarpCTCDsoHandle(); \ }); \ diff --git a/paddle/scripts/docker/README.md b/paddle/scripts/README.md similarity index 66% rename from paddle/scripts/docker/README.md rename to paddle/scripts/README.md index 78c0cc378231f763597556cc5450f6f03ab2b291..9e8b135c1bc7fc05d88fe6f3bed17dd3b48e9615 100644 --- a/paddle/scripts/docker/README.md +++ b/paddle/scripts/README.md @@ -13,40 +13,49 @@ We want to make the building procedures: 1. Build docker images with PaddlePaddle pre-installed, so that we can run PaddlePaddle applications directly in docker or on Kubernetes clusters. -To achieve this, we created a repo: https://github.com/PaddlePaddle/buildtools -which gives several docker images that are `manylinux1` sufficient. Then we -can build PaddlePaddle using these images to generate corresponding `whl` -binaries. +To achieve this, we maintain a dockerhub repo:https://hub.docker.com/r/paddlepaddle/paddle +which provides pre-built environment images to build PaddlePaddle and generate corresponding `whl` +binaries.(**We strongly recommend building paddlepaddle in our pre-specified Docker environment.**) -## Run The Build +## Development Workflow + +Here we describe how the workflow goes on. We start from considering our daily development environment. + +Developers work on a computer, which is usually a laptop or desktop: + + + +or, they might rely on a more sophisticated box (like with GPUs): + + + +A principle here is that source code lies on the development computer (host) so that editors like Eclipse can parse the source code to support auto-completion. + +## Build With Docker ### Build Environments -The pre-built build environment images are: +The lastest pre-built build environment images are: | Image | Tag | | ----- | --- | -| paddlepaddle/paddle_manylinux_devel | cuda7.5_cudnn5 | -| paddlepaddle/paddle_manylinux_devel | cuda8.0_cudnn5 | -| paddlepaddle/paddle_manylinux_devel | cuda7.5_cudnn7 | -| paddlepaddle/paddle_manylinux_devel | cuda9.0_cudnn7 | +| paddlepaddle/paddle | latest-dev | +| paddlepaddle/paddle | latest-dev-android | ### Start Build -Choose one docker image that suit your environment and run the following -command to start a build: - ```bash git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle -docker run --rm -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TESTING=OFF" -e "RUN_TEST=OFF" -e "PYTHON_ABI=cp27-cp27mu" paddlepaddle/paddle_manylinux_devel /paddle/paddle/scripts/docker/build.sh +./paddle/scripts/paddle_docker_build.sh build ``` After the build finishes, you can get output `whl` package under `build/python/dist`. -This command mounts the source directory on the host into `/paddle` in the container, then run the build script `/paddle/paddle/scripts/docker/build.sh` -in the container. When it writes to `/paddle/build` in the container, it writes to `$PWD/build` on the host indeed. +This command will download the most recent dev image from docker hub, start a container in the backend and then run the build script `/paddle/paddle/scripts/paddle_build.sh build` in the container. +The container mounts the source directory on the host into `/paddle`. +When it writes to `/paddle/build` in the container, it writes to `$PWD/build` on the host indeed. ### Build Options @@ -68,7 +77,6 @@ Users can specify the following Docker build arguments with either "ON" or "OFF" | `WITH_DOC` | OFF | Build docs after build binaries. | | `WOBOQ` | OFF | Generate WOBOQ code viewer under `build/woboq_out` | - ## Docker Images You can get the latest PaddlePaddle docker images by @@ -144,59 +152,37 @@ docker push kubectl ... ``` -## Docker Images for Developers - -We have a special docker image for developers: -`paddlepaddle/paddle:-dev`. This image is also generated from -https://github.com/PaddlePaddle/buildtools - -This a development image contains only the -development tools and standardizes the building procedure. Users include: - -- developers -- no longer need to install development tools on the host, and can build their current work on the host (development computer). -- release engineers -- use this to build the official release from certain branch/tag on Github.com. -- document writers / Website developers -- Our documents are in the source repo in the form of .md/.rst files and comments in source code. We need tools to extract the information, typeset, and generate Web pages. - -Of course, developers can install building tools on their development computers. But different versions of PaddlePaddle might require different set or version of building tools. Also, it makes collaborative debugging easier if all developers use a unified development environment. - -The development image contains the following tools: - - - gcc/clang - - nvcc - - Python - - sphinx - - woboq - - sshd - -Many developers work on a remote computer with GPU; they could ssh into the computer and `docker exec` into the development container. However, running `sshd` in the container allows developers to ssh into the container directly. - - -### Development Workflow - -Here we describe how the workflow goes on. We start from considering our daily development environment. +### Reading source code with woboq codebrowser -Developers work on a computer, which is usually a laptop or desktop: +For developers who are interested in the C++ source code, you can build C++ source code into HTML pages using [Woboq codebrowser](https://github.com/woboq/woboq_codebrowser). - +- The following command builds PaddlePaddle, generates HTML pages from C++ source code, and writes HTML pages into `$HOME/woboq_out` on the host: -or, they might rely on a more sophisticated box (like with GPUs): +```bash +./paddle/scripts/paddle_docker_build.sh html +``` - +- You can open the generated HTML files in your Web browser. Or, if you want to run a Nginx container to serve them for a wider audience, you can run: -A principle here is that source code lies on the development computer (host) so that editors like Eclipse can parse the source code to support auto-completion. +``` +docker run -v $HOME/woboq_out:/usr/share/nginx/html -d -p 8080:80 nginx +``` -### Reading source code with woboq codebrowser +## More Options -For developers who are interested in the C++ source code, please use -e "WOBOQ=ON" to enable the building of C++ source code into HTML pages using [Woboq codebrowser](https://github.com/woboq/woboq_codebrowser). +### Build Without Docker -- The following command builds PaddlePaddle, generates HTML pages from C++ source code, and writes HTML pages into `$HOME/woboq_out` on the host: +Follow the *Dockerfile* in the paddlepaddle repo to set up your local dev environment and run: ```bash -docker run -v $PWD:/paddle -v $HOME/woboq_out:/woboq_out -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TESTING=ON" -e "WOBOQ=ON" paddlepaddle/paddle:latest-dev +./paddle/scripts/paddle_build.sh build ``` -- You can open the generated HTML files in your Web browser. Or, if you want to run a Nginx container to serve them for a wider audience, you can run: +### Additional Tasks -``` -docker run -v $HOME/woboq_out:/usr/share/nginx/html -d -p 8080:80 nginx +You can get the help menu for the build scripts by running with no options: + +```bash +./paddle/scripts/paddle_build.sh +or ./paddle/scripts/paddle_docker_build.sh ``` diff --git a/paddle/scripts/docker/doc/paddle-development-environment-gpu.graffle b/paddle/scripts/doc/paddle-development-environment-gpu.graffle similarity index 100% rename from paddle/scripts/docker/doc/paddle-development-environment-gpu.graffle rename to paddle/scripts/doc/paddle-development-environment-gpu.graffle diff --git a/paddle/scripts/docker/doc/paddle-development-environment-gpu.png b/paddle/scripts/doc/paddle-development-environment-gpu.png similarity index 100% rename from paddle/scripts/docker/doc/paddle-development-environment-gpu.png rename to paddle/scripts/doc/paddle-development-environment-gpu.png diff --git a/paddle/scripts/docker/doc/paddle-development-environment.graffle b/paddle/scripts/doc/paddle-development-environment.graffle similarity index 100% rename from paddle/scripts/docker/doc/paddle-development-environment.graffle rename to paddle/scripts/doc/paddle-development-environment.graffle diff --git a/paddle/scripts/docker/doc/paddle-development-environment.png b/paddle/scripts/doc/paddle-development-environment.png similarity index 100% rename from paddle/scripts/docker/doc/paddle-development-environment.png rename to paddle/scripts/doc/paddle-development-environment.png diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh new file mode 100755 index 0000000000000000000000000000000000000000..654c8272a18e5adb01e75be94985a80502ba2c8d --- /dev/null +++ b/paddle/scripts/paddle_build.sh @@ -0,0 +1,508 @@ +#!/usr/bin/env bash + +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#================================================= +# Utils +#================================================= + +function print_usage() { + RED='\033[0;31m' + BLUE='\033[0;34m' + BOLD='\033[1m' + NONE='\033[0m' + + echo -e "\n${RED}Usage${NONE}: + ${BOLD}$0${NONE} [OPTION]" + + echo -e "\n${RED}Options${NONE}: + ${BLUE}build${NONE}: run build for x86 platform + ${BLUE}build_android${NONE}: run build for android platform + ${BLUE}build_ios${NONE}: run build for ios platform + ${BLUE}test${NONE}: run all unit tests + ${BLUE}bind_test${NONE}: parallel tests bind to different GPU + ${BLUE}doc${NONE}: generate paddle documents + ${BLUE}html${NONE}: convert C++ source code into HTML + ${BLUE}dockerfile${NONE}: generate paddle release dockerfile + ${BLUE}capi${NONE}: generate paddle CAPI package + ${BLUE}fluid_inference_lib${NONE}: deploy fluid inference library + ${BLUE}check_style${NONE}: run code style check + " +} + +function init() { + PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )" +} + +function cmake_gen() { + mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build + + # build script will not fail if *.deb does not exist + rm *.deb 2>/dev/null || true + # delete previous built whl packages + rm -rf python/dist 2>/dev/null || true + + # Support build for all python versions, currently + # including cp27-cp27m and cp27-cp27mu. + PYTHON_FLAGS="" + if [ "$1" != "" ]; then + echo "using python abi: $1" + if [ "$1" == "cp27-cp27m" ]; then + export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs4/lib:} + export PATH=/opt/python/cp27-cp27m/bin/:${PATH} + PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27m/bin/python + -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27m/include/python2.7 + -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs2/lib/libpython2.7.so" + elif [ "$1" == "cp27-cp27mu" ]; then + export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs2/lib:} + export PATH=/opt/python/cp27-cp27mu/bin/:${PATH} + PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27mu/bin/python + -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27mu/include/python2.7 + -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs4/lib/libpython2.7.so" + fi + fi + + cat <&2 + echo "Please use pre-commit to check what is wrong." 1>&2 + exit 1 +} + +function check_style() { + trap 'abort' 0 + set -e + + # install glide + curl https://glide.sh/get | bash + eval "$(GIMME_GO_VERSION=1.8.3 gimme)" + + # set up go environment for running gometalinter + mkdir -p $GOPATH/src/github.com/PaddlePaddle/ + ln -sf ${PADDLE_ROOT} $GOPATH/src/github.com/PaddlePaddle/Paddle + cd $GOPATH/src/github.com/PaddlePaddle/Paddle/go; glide install; cd - + + go get github.com/alecthomas/gometalinter + gometalinter --install + + cd ${PADDLE_ROOT} + export PATH=/usr/bin:$PATH + pre-commit install + clang-format --version + + if ! pre-commit run -a ; then + git diff + exit 1 + fi + + trap : 0 +} + +#================================================= +# Build +#================================================= + +function build() { + mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build + cat <= 21." + ANDROID_API=21 + fi + else # armeabi, armeabi-v7a + ANDROID_ARCH=arm + fi + + ANDROID_STANDALONE_TOOLCHAIN=$ANDROID_TOOLCHAINS_DIR/$ANDROID_ARCH-android-$ANDROID_API + + cat < ${PADDLE_ROOT}/build/Dockerfile < + ENV HOME /root +EOF + + if [[ ${WITH_GPU} == "ON" ]]; then + NCCL_DEPS="apt-get install -y libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 &&" + else + NCCL_DEPS="" + fi + + if [[ ${WITH_FLUID_ONLY:-OFF} == "OFF" ]]; then + PADDLE_VERSION="paddle version" + CMD='"paddle", "version"' + else + PADDLE_VERSION="true" + CMD='"true"' + fi + + cat >> /paddle/build/Dockerfile < /dev/null + return $? +} + +function start_build_docker() { + docker pull $IMG + + if container_running "${CONTAINER_ID}"; then + docker stop "${CONTAINER_ID}" 1>/dev/null + docker rm -f "${CONTAINER_ID}" 1>/dev/null + fi + + DOCKER_ENV=$(cat < 1: + if self.sync_mode and self.trainer_num > 1: for trainer_id in xrange(self.trainer_num): var = pserver_program.global_block().create_var( name="%s.trainer_%d" % (orig_var_name, trainer_id), @@ -402,13 +410,13 @@ class DistributeTranspiler: for op in self.optimize_ops: if op.type == "scale": for in_name in op.input_arg_names: - if in_name.startswith("beta1_pow_acc") or\ - in_name.startswith("beta2_pow_acc"): + if in_name.startswith("beta1_pow_acc") or \ + in_name.startswith("beta2_pow_acc"): global_ops.append(op) - def __append_optimize_op__(op, block): + def __append_optimize_op__(op, block, grad_to_block_id): if self._is_opt_op(op): - self._append_pserver_ops(block, op, endpoint, + self._append_pserver_ops(block, op, endpoint, grad_to_block_id, default_main_program()) else: self._append_pserver_non_opt_ops(block, op) @@ -422,21 +430,22 @@ class DistributeTranspiler: self._append_pserver_non_opt_ops(lr_decay_block, op) # append op to the current block + grad_to_block_id = [] pre_block_idx = pserver_program.num_blocks - 1 for idx, opt_op in enumerate(opt_op_on_pserver): per_opt_block = pserver_program.create_block(pre_block_idx) for _, op in enumerate(self.optimize_ops): # optimizer is connected to itself if ufind.is_connected(op, opt_op) and op not in global_ops: - __append_optimize_op__(op, per_opt_block) + __append_optimize_op__(op, per_opt_block, grad_to_block_id) # append global ops - opt_state_block = None if global_ops: opt_state_block = pserver_program.create_block( pserver_program.num_blocks - 1) for glb_op in global_ops: - __append_optimize_op__(glb_op, opt_state_block) + __append_optimize_op__(glb_op, opt_state_block, + grad_to_block_id) # NOT USED: single block version: # @@ -472,7 +481,9 @@ class DistributeTranspiler: "OptimizeBlock": pserver_program.block(1), "endpoint": endpoint, "Fanin": self.trainer_num, - "PrefetchBlock": prefetch_block + "PrefetchBlock": prefetch_block, + "sync_mode": self.sync_mode, + "grad_to_block_id": grad_to_block_id }) pserver_program.sync_with_cpp() @@ -683,17 +694,6 @@ class DistributeTranspiler: self.table_name)], persistable=False) - # create grad vars in pserver program - table_grad_var = self.table_param_grad[1] - table_grad_list = [ - pserver_program.global_block().create_var( - name="%s.trainer_%d.pserver_%d" % - (table_grad_var.name, index, pserver_index), - type=table_grad_var.type, - shape=table_grad_var.shape, - dtype=table_grad_var.dtype) for index in range(self.trainer_num) - ] - # create table optimize block in pserver program table_opt_op = [ op for op in self.optimize_ops @@ -703,11 +703,24 @@ class DistributeTranspiler: # only support sgd now assert table_opt_op.type == "sgd" - # append sum op for table_grad_list - table_opt_block.append_op( - type="sum", - inputs={"X": table_grad_list}, - outputs={"Out": [grad_var]}) + if self.sync_mode: + # create grad vars in pserver program + table_grad_var = self.table_param_grad[1] + table_grad_list = [ + pserver_program.global_block().create_var( + name="%s.trainer_%d.pserver_%d" % + (table_grad_var.name, index, pserver_index), + type=table_grad_var.type, + shape=table_grad_var.shape, + dtype=table_grad_var.dtype) + for index in range(self.trainer_num) + ] + + # append sum op for table_grad_list + table_opt_block.append_op( + type="sum", + inputs={"X": table_grad_list}, + outputs={"Out": [grad_var]}) lr_var = pserver_program.global_block().vars[table_opt_op.input( "LearningRate")[0]] @@ -746,7 +759,7 @@ class DistributeTranspiler: for varname, splited in block_map.iteritems(): orig_var = program.global_block().var(varname) if len(splited) == 1: - if add_trainer_suffix: + if self.sync_mode and add_trainer_suffix: new_var_name = "%s.trainer_%d" % \ (orig_var.name, self.trainer_id) program.global_block().rename_var(varname, new_var_name) @@ -770,7 +783,7 @@ class DistributeTranspiler: if len(orig_shape) >= 2: splited_shape.extend(orig_shape[1:]) new_var_name = "" - if add_trainer_suffix: + if self.sync_mode and add_trainer_suffix: new_var_name = "%s.block%d.trainer_%d" % \ (varname, i, self.trainer_id) else: @@ -879,7 +892,7 @@ class DistributeTranspiler: return orig_var_name def _append_pserver_ops(self, optimize_block, opt_op, endpoint, - origin_program): + grad_to_block_id, origin_program): program = optimize_block.program pserver_block = program.global_block() new_inputs = dict() @@ -900,7 +913,9 @@ class DistributeTranspiler: return merged_var = \ pserver_block.vars[self._orig_varname(grad_block.name)] - if self.trainer_num > 1: + grad_to_block_id.append(merged_var.name + ":" + str( + optimize_block.idx)) + if self.sync_mode and self.trainer_num > 1: vars2merge = [] for i in xrange(self.trainer_num): per_trainer_name = "%s.trainer_%d" % \ @@ -918,6 +933,7 @@ class DistributeTranspiler: inputs={"X": merged_var}, outputs={"Out": merged_var}, attrs={"scale": 1.0 / float(self.trainer_num)}) + new_inputs[key] = merged_var elif key == "Param": # param is already created on global program diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 340882ea9e7b0e2a0c52749c771308c6b860ed07..53486ecffc8dbdcbe93ae12c4f6ebb53c79bce47 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1070,16 +1070,25 @@ class Program(object): for t in targets: if not isinstance(t, Operator): if isinstance(t, Variable): - if t.op is None: - global_block = self.global_block() - for op in global_block.ops: - if t.name in op.output_arg_names: - t.op = op - break + # After transpiler processing, the op that output this + # variable maybe has been changed, so t.op is not reliable + # and we need to find the current op that generate this + # variable here. + t.op = None + global_block = self.global_block() + for idx, op in enumerate(global_block.ops): + if t.name in op.output_arg_names: + t.op = op + break + t = t.op + if t is None: + raise ValueError( + "The target variable must have an " + "associated operator that generates it.") else: - raise ValueError(("All targets of prune() can only be " - "Variable or Operator.")) + raise ValueError("All targets of prune() can only be " + "Variable or Operator.") targets_idx.append([t.block.idx, t.idx]) res = Program() diff --git a/python/paddle/fluid/inference_transpiler.py b/python/paddle/fluid/inference_transpiler.py index 39b01610f96018e1775405a30147e77006cecc16..f4ad717b9e72e281940fab0cfd06296306b587fc 100644 --- a/python/paddle/fluid/inference_transpiler.py +++ b/python/paddle/fluid/inference_transpiler.py @@ -121,7 +121,60 @@ class InferenceTranspiler: # And a better solution will be considered later. program = program.clone() + def float16_transpile(self, program, place, scope=None): + ''' + Transpile the program desc and cast the weights to float16 data type to + enable float16 inference. + + Since the operator in a program desc will automatically choose the + right compute kernel to run based on the data type of the input tensor. + We actually don't need to change the program desc to run in float16 mode. + + However, in this way, users who are used to feeding and fetching tensors + of float32 data type when running typical inference may find it confusing + and difficult to run inference in float16 mode as they need to convert + input data to float16 dtype and then convert the results back to float32 + dtype to match the rest of code. + + So this function appends cast ops to the program desc where necessary so + that users are able to run inference in float16 mode while providing input + tensor (feed_holder) of float data type and obtaining output tensor + (fetch_holder) of float data type. + + Moreover, it is desired that when we have the scope and program desc to run + inference in float32 mode, we can use a single API to do the necessary + modification and then user can run float16 inference on the fly. To make + this happen, this function also create new parameters in the scope to have the + converted float16 weights and change the operators in program desc to use + these new parameters. + + :param program: program to transpile + :type program: Program + :param place: inference place + :type place: Place + :param scope: inference scope + :type scope: Scope + ''' + if scope is None: + scope = global_scope() + + self.scope = scope + self.place = place + self.block = program.block(0) + self.input_map = {} # store the input names should be adjusted + + self._modify_feed_fetch() + self._convert_param_to_float16() + self._adjust_input(skip=True) + self._remove_unused_var() + + # TODO(luotao): use clone() method to flush the program.desc in force, + # since some large program.desc will not be flushed immediately. + # And a better solution will be considered later. + program = program.clone() + # ====================== private transpiler functions ===================== + def _insert_bias_op(self, index, current_op, bn_op): ''' Construct elementwise_add operator for adding bias @@ -216,9 +269,27 @@ class InferenceTranspiler: # collect the renamed input self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0] - def _adjust_input(self): + def _adjust_input(self, skip=False): + ''' + Change the input variable name in operators. + + When we are in the process of modifying a program desc, we usually + replace some variables with some other variables, where we create + a dictionary input_map to record the one-to-one correspondence + between each old variable and the new one. + + After that, this function will search all the operators that use the + old variables and change the info in op to use the new variables. There + maybe some exceptions to this rule when we are using the float16 transpiler + and insert cast ops to cast float32 variable to float16 one. After we + insert the cast op to cast var_1 to var_1_fp16, we don't want to change + the input of cast op to var_1_fp16 after using this function. + ''' + skip_ops = {"cast"} for i in range(len(self.block.ops)): current_op = self.block.ops[i] + if skip and current_op.type in skip_ops: + continue for input_arg in current_op.input_arg_names: if input_arg in self.input_map: current_op.rename_input(input_arg, @@ -238,3 +309,138 @@ class InferenceTranspiler: for var in self.block.vars.keys(): if var not in args: self.block.remove_var(var) + + def _modify_feed_fetch(self): + ''' + Modify feed fetch op/vars for float16 inference. + + For each feed op: + feed_op->feed_target_var + + Change it to: + feed_op->feed_target_var->cast_op(from other dtype to float16)->tmp_var + + For each fetch op: + fetch_target_var->fetch_op + + Change it to: + tmp_var->cast_op(from float16 to other dtype)->fetch_target_var->fetch_op + + :return: None + ''' + + def find_op(var): + # It is possible that var.op is not up to date after some + # modifications to program desc. Here we force to make it up to date. + var.op = None + for op in self.block.ops: + if var.name in op.output_arg_names: + var.op = op + break + + if var.op is None: + raise ValueError("The target variable must have an " + "associated operator that generates it.") + + i = 0 + while i < len(self.block.ops): + cur_op = self.block.ops[i] + if cur_op.type == "feed": + var_name = cur_op.output("Out")[0] + tmp_var_name = var_name + ".fp16" + var = self.block.vars[var_name] + tmp_var = self.block.create_var( + name=tmp_var_name.encode('ascii'), + type=var.type, + dtype=core.VarDesc.VarType.FP16, + shape=var.shape, + persistable=var.persistable) + self.block.insert_op( + i + 1, + type="cast", + inputs={"X": var}, + outputs={"Out": tmp_var}, + attrs={ + 'in_dtype': int(var.dtype), + 'out_dtype': int(tmp_var.dtype) + }) + self.input_map[var_name] = tmp_var_name + i = i + 1 + elif cur_op.type == "fetch": + var_name = cur_op.input("X")[0] + tmp_var_name = var_name + ".fp16" + var = self.block.vars[var_name] + tmp_var = self.block.create_var( + name=tmp_var_name.encode('ascii'), + type=var.type, + dtype=core.VarDesc.VarType.FP16, + shape=var.shape, + persistable=var.persistable) + find_op(var) + var.op.rename_output(var_name, tmp_var_name) + self.block.insert_op( + i, + type="cast", + inputs={"X": tmp_var}, + outputs={"Out": var}, + attrs={ + 'in_dtype': int(tmp_var.dtype), + 'out_dtype': int(var.dtype) + }) + i = i + 1 + i = i + 1 + + def _convert_param_to_float16(self): + def _get_no_fp16_conversion_var_names(): + ''' + Get the set of input variable names that shouldn't be converted to float16. + + When we want to run inference in float16 mode, most parameters need to be + firstly converted to float16. However, there are some parameters that + shouldn't be converted to float16 because the corresponding operator + requires float32 parameters even in float16 mode (when the input data is + of float16 data type). Currently, the only operator that has this exclusion + is the batch norm op. + + :return: set of input variable names + :type var_names: set + ''' + op_names = {'batch_norm'} + var_names = [] + for op in self.block.ops: + if op.type in op_names: + var_names += op.input_arg_names + return set(var_names) + + def _should_be_converted(var): + return var.persistable and \ + var.name not in self.no_conversion_vars and \ + var.type != core.VarDesc.VarType.FEED_MINIBATCH and \ + var.type != core.VarDesc.VarType.FETCH_LIST + + self.no_conversion_vars = _get_no_fp16_conversion_var_names() + conversion_var_list = filter(_should_be_converted, + self.block.vars.values()) + for var in conversion_var_list: + fp16_var_name = var.name + ".fp16" + fp16_var = self.block.create_parameter( + name=fp16_var_name.encode('ascii'), + type=var.type, + dtype=core.VarDesc.VarType.FP16, + shape=var.shape) + + # cast the data in the tensor of the original var to float16 + # data type and store it in the tensor of the new float16 var + self.scope.var(fp16_var_name) + fp16_tensor = self.scope.find_var(fp16_var_name).get_tensor() + tensor = np.array(self.scope.find_var(var.name).get_tensor()) + # After the old tensor data is converted to np.float16, view(np.uint16) + # is used so that the internal memory of the numpy array will be + # reinterpreted to be of np.uint16 data type, which is binded to fluid + # float16 data type via the help of pybind in tensor_py.h. + fp16_tensor.set( + tensor.astype(np.float16).view(np.uint16), self.place) + + # old var will be replaced by the fp16 var in program desc + self.input_map[var.name] = fp16_var_name + self.block.remove_var(var.name) diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index f7f1ca2598a3e679b24fa8d62c52e4f4de788fe2..08b8a878b6490bc989620085f3f9c06c7032d882 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -336,7 +336,7 @@ def save_inference_model(dirname, if main_program is None: main_program = default_main_program() - copy_program = main_program + copy_program = main_program.clone() if not os.path.isdir(dirname): os.makedirs(dirname) diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 34382fb9fecdc256ae8fe3fcdaf1effd6e2597cb..cc71c2136a6756ff094f6e06b8e200c6a68db06a 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -457,8 +457,8 @@ def __create_shared_decorated_reader__(op_type, reader, attrs): return monkey_patch_reader_methods(main_prog_var) -def __create_unshared_decorated_reader__(op_type, reader, attrs): - new_reader_name = unique_name(op_type) +def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None): + new_reader_name = name if name is not None else unique_name(op_type) main_blk = default_main_program().current_block() new_reader = main_blk.create_var(name=new_reader_name) main_blk.append_op( @@ -481,12 +481,12 @@ def batch(reader, batch_size): 'create_batch_reader', reader, {'batch_size': int(batch_size)}) -def double_buffer(reader, place=None): +def double_buffer(reader, place=None, name=None): attrs = dict() if place is not None: attrs['place'] = str(place).upper() - return __create_unshared_decorated_reader__('create_double_buffer_reader', - reader, attrs) + return __create_unshared_decorated_reader__( + 'create_double_buffer_reader', reader, attrs, name=name) def multi_pass(reader, pass_num): diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9a0c328033cdfdae39da050fc482abba17032dd9..7f16bf2a0c430213b2f52dafe8fa948b9e350f96 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -79,6 +79,7 @@ __all__ = [ 'lrn', 'pad', 'label_smooth', + 'roi_pool', ] @@ -3759,3 +3760,53 @@ def label_smooth(label, outputs={"Out": smooth_label}, attrs={"epsilon": float(epsilon)}) return smooth_label + + +def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): + """ + Region of interest pooling (also known as RoI pooling) is to perform + is to perform max pooling on inputs of nonuniform sizes to obtain + fixed-size feature maps (e.g. 7*7). + The operator has three steps: + 1. Dividing each region proposal into equal-sized sections with + the pooled_width and pooled_height + 2. Finding the largest value in each section + 3. Copying these max values to the output buffer + + Args: + input (Variable): The input for ROI pooling. + rois (Variable): ROIs (Regions of Interest) to pool over. It should + be a 2-D one level LoTensor of shape [num_rois, 4]. + The layout is [x1, y1, x2, y2], where (x1, y1) + is the top left coordinates, and (x2, y2) is the + bottom right coordinates. The num_rois is the + total number of ROIs in this batch data. + pooled_height (integer): The pooled output height. Default: 1 + pooled_width (integer): The pooled output width. Default: 1 + spatial_scale (float): Multiplicative spatial scale factor. To + translate ROI coords from their input scale + to the scale used when pooling. Default: 1.0 + + Returns: + pool_out (Variable): The output is a 4-D tensor of the shape + (num_rois, channels, pooled_h, pooled_w). + + Examples: + pool_out = fluid.layers.roi_pool(input=x, rois=rois, 7, 7, 1.0) + """ + helper = LayerHelper('roi_pool', **locals()) + dtype = helper.input_dtype() + pool_out = helper.create_tmp_variable(dtype) + argmaxes = helper.create_tmp_variable(dtype='int32') + helper.append_op( + type="roi_pool", + inputs={"X": input, + "ROIs": rois}, + outputs={"Out": pool_out, + "Argmax": argmaxes}, + attrs={ + "pooled_height": pooled_height, + "pooled_width": pooled_width, + "spatial_scale": spatial_scale + }) + return pool_out diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index d3c14b83fa74f3a4016ae13442846fad1f9e41fc..09f994c37020c4612223e4168be4cf535157f60b 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -244,7 +244,7 @@ def infer(use_cuda, save_dirname=None): assert len(results[0]) == len(transpiler_results[0]) for i in range(len(results[0])): np.testing.assert_almost_equal( - results[0][i], transpiler_results[0][i], decimal=6) + results[0][i], transpiler_results[0][i], decimal=5) print("infer results: ", results[0]) @@ -252,6 +252,26 @@ def infer(use_cuda, save_dirname=None): fetch_targets, exe, inference_transpiler_program) + if use_cuda and fluid.core.is_float16_supported(place): + # Use float16_transpiler to speedup + fp16_transpiler_program = inference_transpiler_program.clone() + t.float16_transpile(fp16_transpiler_program, place) + + fp16_results = exe.run(fp16_transpiler_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + + assert len(results[0]) == len(fp16_results[0]) + for i in range(len(results[0])): + np.testing.assert_almost_equal( + results[0][i], fp16_results[0][i], decimal=2) + + print("float16 infer results: ", fp16_results[0]) + + fluid.io.save_inference_model("float16_" + save_dirname, + feed_target_names, fetch_targets, exe, + fp16_transpiler_program) + def main(net_type, use_cuda, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/demo/text_classification/.gitignore b/python/paddle/fluid/tests/demo/text_classification/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..780d05b94667d3ea726e37bf9cf1b5b2baeff354 --- /dev/null +++ b/python/paddle/fluid/tests/demo/text_classification/.gitignore @@ -0,0 +1 @@ +*.recordio diff --git a/python/paddle/fluid/tests/demo/text_classification/convert_data_to_recordio.py b/python/paddle/fluid/tests/demo/text_classification/convert_data_to_recordio.py new file mode 100644 index 0000000000000000000000000000000000000000..9425d472a48056e71da5da364f659971ef6c2520 --- /dev/null +++ b/python/paddle/fluid/tests/demo/text_classification/convert_data_to_recordio.py @@ -0,0 +1,59 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import paddle.fluid as fluid +import paddle.v2 as paddle + + +def load_vocab(filename): + """ + load vocabulary + """ + vocab = {} + with open(filename) as f: + wid = 0 + for line in f: + vocab[line.strip()] = wid + wid += 1 + return vocab + + +# load word dict with paddle inner function +word_dict = load_vocab(sys.argv[1]) +word_dict[""] = len(word_dict) +print "Dict dim = ", len(word_dict) + +# input text data +data = fluid.layers.data(name="words", shape=[1], dtype="int64", lod_level=1) + +# label data +label = fluid.layers.data(name="label", shape=[1], dtype="int64") +# like placeholder +feeder = fluid.DataFeeder(feed_list=[data, label], place=fluid.CPUPlace()) + +# train data set +BATCH_SIZE = 128 +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=10000), + batch_size=BATCH_SIZE) + +test_reader = paddle.batch( + paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) + +fluid.recordio_writer.convert_reader_to_recordio_file( + "train.recordio", feeder=feeder, reader_creator=train_reader) +fluid.recordio_writer.convert_reader_to_recordio_file( + "test.recordio", feeder=feeder, reader_creator=test_reader) diff --git a/python/paddle/fluid/tests/demo/text_classification/train.py b/python/paddle/fluid/tests/demo/text_classification/train.py new file mode 100644 index 0000000000000000000000000000000000000000..e408684c6e0941a1b317ffeac66f071c1382836d --- /dev/null +++ b/python/paddle/fluid/tests/demo/text_classification/train.py @@ -0,0 +1,148 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import numpy +import sys + +TRAIN_FILES = ['train.recordio'] +TEST_FILES = ['test.recordio'] + +DICT_DIM = 89528 + +# embedding dim +emb_dim = 128 + +# hidden dim +hid_dim = 128 + +# hidden dim2 +hid_dim2 = 96 + +# class num +class_dim = 2 + + +def network_cfg(is_train, pass_num=100): + with fluid.unique_name.guard(): + train_file_obj = fluid.layers.open_files( + filenames=TRAIN_FILES, + pass_num=pass_num, + shapes=[[-1, 1], [-1, 1]], + lod_levels=[1, 0], + dtypes=['int64', 'int64'], + thread_num=1) + + test_file_obj = fluid.layers.open_files( + filenames=TEST_FILES, + pass_num=1, + shapes=[[-1, 1], [-1, 1]], + lod_levels=[1, 0], + dtypes=['int64', 'int64'], + thread_num=1) + + if is_train: + file_obj = fluid.layers.shuffle(train_file_obj, buffer_size=1000) + else: + file_obj = test_file_obj + + file_obj = fluid.layers.double_buffer( + file_obj, + name="train_double_buffer" if is_train else 'test_double_buffer') + + data, label = fluid.layers.read_file(file_obj) + + emb = fluid.layers.embedding(input=data, size=[DICT_DIM, emb_dim]) + + # sequence conv with window size = 3 + win_size = 3 + conv_3 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=win_size, + act="tanh", + pool_type="max") + + # fc layer after conv + fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2) + + # probability of each class + prediction = fluid.layers.fc(input=[fc_1], + size=class_dim, + act="softmax") + # cross entropy loss + cost = fluid.layers.cross_entropy(input=prediction, label=label) + + # mean loss + avg_cost = fluid.layers.mean(x=cost) + acc = fluid.layers.accuracy(input=prediction, label=label) + + if is_train: + # SGD optimizer + sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=0.01) + sgd_optimizer.minimize(avg_cost) + + return { + 'loss': avg_cost, + 'log': [avg_cost, acc], + 'file': train_file_obj if is_train else test_file_obj + } + + +def main(): + train = fluid.Program() + startup = fluid.Program() + + with fluid.program_guard(train, startup): + train_args = network_cfg(is_train=True) + + test = fluid.Program() + + with fluid.program_guard(test, fluid.Program()): + test_args = network_cfg(is_train=False) + + # startup + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place=place) + exe.run(startup) + + train_exe = fluid.ParallelExecutor( + use_cuda=True, loss_name=train_args['loss'].name, main_program=train) + + fetch_var_list = [var.name for var in train_args['log']] + for i in xrange(sys.maxint): + result = map(numpy.array, + train_exe.run(fetch_list=fetch_var_list + if i % 1000 == 0 else [])) + if len(result) != 0: + print 'Train: ', result + + if i % 1000 == 0: + test_exe = fluid.ParallelExecutor( + use_cuda=True, main_program=test, share_vars_from=train_exe) + loss = [] + acc = [] + try: + while True: + loss_np, acc_np = map( + numpy.array, test_exe.run(fetch_list=fetch_var_list)) + loss.append(loss_np[0]) + acc.append(acc_np[0]) + except: + test_args['file'].reset() + print 'TEST: ', numpy.mean(loss), numpy.mean(acc) + + +if __name__ == '__main__': + main() diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py new file mode 100644 index 0000000000000000000000000000000000000000..bffb4f3b666a7ddcc133b7c30fab132b49aa1d0e --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -0,0 +1,95 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +def bilinear_interp_np(input, out_h, out_w): + batch_size, channel, in_h, in_w = input.shape + if out_h > 1: + ratio_h = (in_h - 1.0) / (out_h - 1.0) + else: + ratio_h = 0.0 + if out_w > 1: + ratio_w = (in_w - 1.0) / (out_w - 1.0) + else: + ratio_w = 0.0 + + out = np.zeros((batch_size, channel, out_h, out_w)) + for i in range(out_h): + h = int(ratio_h * i) + hid = 1 if h < in_h - 1 else 0 + h1lambda = ratio_h * i - h + h2lambda = 1.0 - h1lambda + for j in range(out_w): + w = int(ratio_w * j) + wid = 1 if w < in_w - 1 else 0 + w1lambda = ratio_w * j - w + w2lambda = 1.0 - w1lambda + + out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] + + w1lambda*input[:, :, h, w+wid]) + \ + h1lambda*(w2lambda*input[:, :, h+hid, w] + + w1lambda*input[:, :, h+hid, w+wid]) + return out.astype("float32") + + +class TestBilinearInterpOp(OpTest): + def setUp(self): + self.init_test_case() + self.op_type = "bilinear_interp" + input_np = np.random.random(self.input_shape).astype("float32") + output_np = bilinear_interp_np(input_np, self.out_h, self.out_w) + + self.inputs = {'X': input_np} + self.attrs = {'out_h': self.out_h, 'out_w': self.out_w} + self.outputs = {'Out': output_np} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out', in_place=True) + + def init_test_case(self): + self.input_shape = [2, 3, 4, 4] + self.out_h = 2 + self.out_w = 2 + + +class TestCase1(TestBilinearInterpOp): + def init_test_case(self): + self.input_shape = [4, 1, 7, 8] + self.out_h = 1 + self.out_w = 1 + + +class TestCase2(TestBilinearInterpOp): + def init_test_case(self): + self.input_shape = [3, 3, 9, 6] + self.out_h = 12 + self.out_w = 12 + + +class TestCase3(TestBilinearInterpOp): + def init_test_case(self): + self.input_shape = [1, 1, 128, 64] + self.out_h = 64 + self.out_w = 128 + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py index e33436b63c0e0b41f5e4a5bc6190517d7c648277..8f62ac20a5c13257a1519128292e2abc4962bf84 100644 --- a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py +++ b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py @@ -14,6 +14,7 @@ import unittest import numpy as np +import numpy.random as random import sys import math from op_test import OpTest @@ -25,14 +26,27 @@ class TestIOUSimilarityOp(OpTest): def setUp(self): self.op_type = "iou_similarity" - self.boxes1 = np.array( - [[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]).astype('float32') - self.boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], - [0.0, 0.0, 20.0, 20.0]]).astype('float32') - self.output = np.array( - [[2.0 / 16.0, 0, 6.0 / 400.0], - [1.0 / 16.0, 0.0, 5.0 / 400.0]]).astype('float32') - + self.boxes1 = random.rand(2, 4).astype('float32') + self.boxes2 = random.rand(3, 4).astype('float32') + self.output = random.rand(2, 3).astype('float32') + for row in range(self.boxes1.shape[0]): + for col in range(self.boxes2.shape[0]): + xmin1, ymin1, xmax1, ymax1 = self.boxes1[row] + xmin2, ymin2, xmax2, ymax2 = self.boxes2[col] + area1 = (ymax1 - ymin1) * (xmax1 - xmin1) + area2 = (ymax2 - ymin2) * (xmax2 - xmin2) + inter_xmax = min(xmax1, xmax2) + inter_ymax = min(ymax1, ymax2) + inter_xmin = max(xmin1, xmin2) + inter_ymin = max(ymin1, ymin2) + inter_height = inter_ymax - inter_ymin + inter_width = inter_xmax - inter_xmin + inter_height = max(inter_height, 0) + inter_width = max(inter_width, 0) + inter_area = inter_width * inter_height + union_area = area1 + area2 - inter_area + sim_score = inter_area / union_area + self.output[row, col] = sim_score self.inputs = {'X': self.boxes1, 'Y': self.boxes2} self.outputs = {'Out': self.output} diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 17d6afdee161426e5da398ffa2ec148a027c905e..c5414abf0fee6b686dccf7c97e9c6d5408ecf62a 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -359,6 +359,16 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(indices) print(str(program)) + def test_roi_pool(self): + program = Program() + with program_guard(program): + x = layers.data(name="x", shape=[256, 30, 30], dtype="float32") + rois = layers.data( + name="rois", shape=[4], dtype="float32", lod_level=1) + output = layers.roi_pool(x, rois, 7, 7, 0.6) + self.assertIsNotNone(output) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py index e556d51b021217063e23190e40bc0e8f9fdc816c..3d754aff3a73e7168e2123483b26e5e3a3585a4e 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py @@ -25,7 +25,7 @@ class TestROIPoolOp(OpTest): self.make_rois() self.calc_roi_pool() - self.inputs = {'X': self.x, 'ROIs': self.rois} + self.inputs = {'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod)} self.attrs = { 'spatial_scale': self.spatial_scale, @@ -36,7 +36,7 @@ class TestROIPoolOp(OpTest): self.outputs = {'Out': self.outs, 'Argmax': self.argmaxes} def init_test_case(self): - self.batch_size = 5 + self.batch_size = 3 self.channels = 3 self.height = 6 self.width = 4 @@ -47,7 +47,6 @@ class TestROIPoolOp(OpTest): self.spatial_scale = 1.0 / 4.0 self.pooled_height = 2 self.pooled_width = 2 - self.rois_num = 2 self.x = np.random.random(self.x_dim).astype('float32') @@ -106,20 +105,24 @@ class TestROIPoolOp(OpTest): def make_rois(self): rois = [] - batch_ids = np.random.randint(0, self.batch_size, size=self.rois_num) - for i in range(self.rois_num): - x1 = np.random.random_integers( - 0, self.width / self.spatial_scale - self.pooled_width) - y1 = np.random.random_integers( - 0, self.height / self.spatial_scale - self.pooled_height) - - x2 = np.random.random_integers(x1 + self.pooled_width, - self.width / self.spatial_scale) - y2 = np.random.random_integers(y1 + self.pooled_height, - self.height / self.spatial_scale) - - roi = [batch_ids[i], x1, y1, x2, y2] - rois.append(roi) + self.rois_lod = [[]] + for bno in range(self.batch_size): + self.rois_lod[0].append(len(rois)) + for i in range(bno + 1): + x1 = np.random.random_integers( + 0, self.width / self.spatial_scale - self.pooled_width) + y1 = np.random.random_integers( + 0, self.height / self.spatial_scale - self.pooled_height) + + x2 = np.random.random_integers(x1 + self.pooled_width, + self.width / self.spatial_scale) + y2 = np.random.random_integers(y1 + self.pooled_height, + self.height / self.spatial_scale) + + roi = [bno, x1, y1, x2, y2] + rois.append(roi) + self.rois_lod[0].append(len(rois)) + self.rois_num = len(rois) self.rois = np.array(rois).astype("int64") def setUp(self): diff --git a/tools/aws_benchmarking/README.md b/tools/aws_benchmarking/README.md index 22a468466afbcbf7cc312e714e41a3b5adf1160c..4fdd4b0de44e779378091566d9d6056a6f9ee4b6 100644 --- a/tools/aws_benchmarking/README.md +++ b/tools/aws_benchmarking/README.md @@ -77,10 +77,10 @@ Training nodes will run your `ENTRYPOINT` script with the following environment Now let's start the training process: ```bash -docker run -i -v $HOME/.aws:/root/.aws -v :/root/.pem \ +docker run -i -v $HOME/.aws:/root/.aws -v :/root/.pem \ putcn/paddle_aws_client \ --action create \ ---key_name \ +--key_name \ --security_group_id \ --docker_image myreponame/paddle_benchmark \ --pserver_count 2 \ @@ -154,8 +154,31 @@ Master exposes 4 major services: ### Parameters -TBD, please refer to client/cluster_launcher.py for now + - key_name: required, aws key pair name + - security_group_id: required, the security group id associated with your VPC + - vpc_id: The VPC in which you wish to run test, if not provided, this tool will use your default VPC. + - subnet_id: The Subnet_id in which you wish to run test, if not provided, this tool will create a new sub net to run test. + - pserver_instance_type: your pserver instance type, c5.2xlarge by default, which is a memory optimized machine. + - trainer_instance_type: your trainer instance type, p2.8xlarge by default, which is a GPU machine with 8 cards. + - task_name: the name you want to identify your job, if not provided, this tool will generate one for you. + - pserver_image_id: ami id for system image. Please note, although the default one has nvidia-docker installed, pserver is always launched with `docker` instead of `nvidia-docker`, please DO NOT init your training program with GPU place. + - pserver_command: pserver start command, format example: python,vgg.py,batch_size:128,is_local:no, which will be translated as `python vgg.py --batch_size 128 --is_local no` when trying to start the training in pserver. "--device CPU" is passed as default. + - trainer_image_id: ami id for system image, default one has nvidia-docker ready. + - trainer_command: trainer start command. Format is the same as pserver's, "--device GPU" is passed as default. + - availability_zone: aws zone id to place ec2 instances, us-east-2a by default. + - trainer_count: Trainer count, 1 by default. + - pserver_count: Pserver count, 1 by default. + - action: create|cleanup|status, "create" by default. + - pserver_port: the port for pserver to open service, 5436 by default. + - docker_image: the training docker image id. + - master_service_port: the port for master to open service, 5436 by default. + - master_server_public_ip: the master service ip, this is required when action is not "create" + - master_docker_image: master's docker image id, "putcn/paddle_aws_master:latest" by default + - no_clean_up: no instance termination when training is finished or failed when this value is set "yes". This is for debug purpose, so that you can inspect into the instances when the process is finished. + ### Trouble shooting -TBD + 1. How to check logs + + Master log is served at `http://:/status`, and you can list all the log files from `http://:/logs`, and access either one of them by `http://:/log/`