diff --git a/.gitignore b/.gitignore index 2badc3bdaa52f2608183fa34393719be66630654..9e3a0b499f9f42856429f3a42bef313ea3df3699 100644 --- a/.gitignore +++ b/.gitignore @@ -25,12 +25,3 @@ third_party/ # clion workspace. cmake-build-* - -# generated while compiling -paddle/pybind/pybind.h -CMakeFiles -cmake_install.cmake -paddle/.timestamp -python/paddlepaddle.egg-info/ -paddle/fluid/pybind/pybind.h -python/paddle/version.py diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index df3f0c7f0c31efaa127515bb98e5668b8f9df199..796bcf28a1dfb308ccb7a2f839742c5c2fcf2002 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -28,7 +28,7 @@ INCLUDE(ExternalProject) SET(MKLML_PROJECT "extern_mklml") SET(MKLML_VER "mklml_lnx_2018.0.1.20171007") -SET(MKLML_URL "https://github.com/01org/mkl-dnn/releases/download/v0.11/${MKLML_VER}.tgz") +SET(MKLML_URL "http://paddlepaddledeps.bj.bcebos.com/${MKLML_VER}.tgz") SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml") SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}") SET(MKLML_DST_DIR "mklml") diff --git a/cmake/external/snappystream.cmake b/cmake/external/snappystream.cmake index 5377a0b046a796cd6f0bb1fb466e1cd0b4b678bf..8f7a3bf8eeaef75c8840f4ea318b484d33249bb7 100644 --- a/cmake/external/snappystream.cmake +++ b/cmake/external/snappystream.cmake @@ -54,5 +54,7 @@ add_library(snappystream STATIC IMPORTED GLOBAL) set_property(TARGET snappystream PROPERTY IMPORTED_LOCATION "${SNAPPYSTREAM_INSTALL_DIR}/lib/libsnappystream.a") -include_directories(${SNAPPYSTREAM_INCLUDE_DIR}) +include_directories(${SNAPPYSTREAM_INCLUDE_DIR}) # For snappysteam to include its own headers. +include_directories(${THIRD_PARTY_PATH}/install) # For Paddle to include snappy stream headers. + add_dependencies(snappystream extern_snappystream) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 9a9a20f897e09b823dfb19ff841c3f2aeb3f9fe6..a631ad14b18310598f7eea3a51839d61a9e456ff 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -62,7 +62,8 @@ ExternalProject_Add( ) MESSAGE(STATUS "warp-ctc library: ${WARPCTC_LIBRARIES}") -INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) +INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) # For warpctc code to include its headers. +INCLUDE_DIRECTORIES(${THIRD_PARTY_PATH}/install) # For Paddle code to include warpctc headers. ADD_LIBRARY(warpctc SHARED IMPORTED GLOBAL) SET_PROPERTY(TARGET warpctc PROPERTY IMPORTED_LOCATION ${WARPCTC_LIBRARIES}) diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index 20b8506e678af4db6ccb65bef99d28e085a67bf2..c3d73235453c8c9fd2859c3ab142888e8bda2dbe 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -25,7 +25,8 @@ ELSE(WIN32) SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE) ENDIF(WIN32) -INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) +INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) # For zlib code to include its own headers. +INCLUDE_DIRECTORIES(${THIRD_PARTY_PATH}/install) # For Paddle code to include zlib.h. ExternalProject_Add( extern_zlib diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 3fe750f47efc149bb1af6086841bffd5dd8e85fd..e8bc285bdc95e213b9da2ee388078349a46d2798 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -251,7 +251,7 @@ function(cc_test TARGET_NAME) add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME} ${cc_test_ARGS} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() endfunction(cc_test) @@ -561,9 +561,9 @@ function(py_test TARGET_NAME) set(multiValueArgs SRCS DEPS ARGS ENVS) cmake_parse_arguments(py_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} - COMMAND env PYTHONPATH=${PADDLE_PYTHON_BUILD_DIR}/lib-python ${py_test_ENVS} + COMMAND env PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_ENVS} ${PYTHON_EXECUTABLE} -u ${py_test_SRCS} ${py_test_ARGS} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() endfunction() diff --git a/doc/fluid/CMakeLists.txt b/doc/fluid/CMakeLists.txt index 9fe79323ef9377a459d8405cfa74c88c52ce9346..8086507bb4b7e870ad6d6091945ed07a00b5100b 100644 --- a/doc/fluid/CMakeLists.txt +++ b/doc/fluid/CMakeLists.txt @@ -27,7 +27,7 @@ sphinx_add_target(paddle_fluid_docs ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_EN}) -add_dependencies(paddle_fluid_docs gen_proto_py) +add_dependencies(paddle_fluid_docs gen_proto_py paddle_python) # configured documentation tools and intermediate build results set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") @@ -50,6 +50,6 @@ sphinx_add_target(paddle_fluid_docs_cn ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_CN}) -add_dependencies(paddle_fluid_docs_cn gen_proto_py) +add_dependencies(paddle_fluid_docs_cn gen_proto_py paddle_python) add_subdirectory(api) diff --git a/doc/fluid/api/CMakeLists.txt b/doc/fluid/api/CMakeLists.txt index ca40dfb9644cea69329be0ec231378506c138bc0..48b396f0786adad1ba6cd41f72497f853e54bc38 100644 --- a/doc/fluid/api/CMakeLists.txt +++ b/doc/fluid/api/CMakeLists.txt @@ -19,4 +19,4 @@ sphinx_add_target(paddle_fluid_apis ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_EN}) -add_dependencies(paddle_fluid_apis gen_proto_py framework_py_proto copy_paddle_pybind) +add_dependencies(paddle_fluid_apis gen_proto_py framework_py_proto copy_paddle_pybind paddle_python) diff --git a/doc/fluid/design/algorithm/parameter_average.md b/doc/fluid/design/algorithm/parameter_average.md index 53d601d3a9a37e8adad519833bb6fa2dc48023a0..940d37fb31dcd0c50ea6c4c42b052d7cb23a9c47 100644 --- a/doc/fluid/design/algorithm/parameter_average.md +++ b/doc/fluid/design/algorithm/parameter_average.md @@ -5,9 +5,11 @@ In a large scale machine learning setup where the size of the training data is h Polyak and Juditsky (1992) showed that the test performance of simple average of parameters obtained by Stochastic Gradient Descent (SGD) is as good as that of parameter values that are obtained by training the model over and over again, over the training dataset. -Hence, to accelerate the speed of Stochastic Gradient Descent, Averaged Stochastic Gradient Descent (ASGD) was proposed in Polyak and Juditsky (1992). For ASGD, the running average of parameters obtained by SGD, is used as the estimator for
. The averaging is done as follows: +Hence, to accelerate the speed of Stochastic Gradient Descent, Averaged Stochastic Gradient Descent (ASGD) was proposed in Polyak and Juditsky (1992). For ASGD, the running average of parameters obtained by SGD, is used as the estimator for
. The averaging is done as follows: -![](./images/asgd.gif) +

+
+

We propose averaging for any optimizer similar to how ASGD performs it, as mentioned above. diff --git a/doc/fluid/design/concurrent/channel.md b/doc/fluid/design/concurrent/channel.md index a00a3325e7b49381f0f82ebbf32b74683f02de5f..df67438bcc741ac521b00ee962fc13c93db21182 100644 --- a/doc/fluid/design/concurrent/channel.md +++ b/doc/fluid/design/concurrent/channel.md @@ -2,7 +2,7 @@ ## Introduction -A Channel is a data structure that allows for synchronous interprocess +A Channel is a data structure that allows for synchronous interprocess communication via message passing. It is a fundemental component of CSP (communicating sequential processes), and allows for users to pass data between threads without having to worry about synchronization. @@ -18,7 +18,7 @@ Creates a new channel that takes in variables of a specific dtype. - **fluid.make_channel(dtype, capacity=0)** - **dtype**: The data type of variables being sent/received through channel - - **capacity**: The capacity of the channel. A capacity of 0 represents + - **capacity**: The capacity of the channel. A capacity of 0 represents an unbuffered channel. Capacity > 0 represents a buffered channel ``` @@ -40,8 +40,8 @@ fluid.channel_close(ch) ### Send data to a channel -Sends a variable to a channel. Currently, variables of dtype `LoDTensor`, -`LoDRankTable`, `LoDTensorArray`, `SelectedRows`, `ReaderHolder`, and +Sends a variable to a channel. Currently, variables of dtype `LoDTensor`, +`LoDRankTable`, `LoDTensorArray`, `SelectedRows`, `ReaderHolder`, and `ChannelHolder` are supported. By default, the data of the Variable is moved from the sender to the receiver, @@ -52,7 +52,7 @@ however the user can optionally copy the data before performing the send. - **variable**: The variable to send to the channel - **is_copy**: If set to True, channel_send will perform a variable assign to copy the source variable to a new variable to be sent. - + ``` ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) var = fill_constant(shape=[1],dtype=core.VarDesc.VarType.INT32, value=100) @@ -68,7 +68,7 @@ receiving variable. - **channel**: The channel to receive the variable from - **return_variable**: The destination variable used to store the data of the variable received from the channel - + ``` ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) var = fill_constant(shape=[1],dtype=core.VarDesc.VarType.INT32, value=-1) @@ -84,9 +84,9 @@ internal queues, locks, and conditional variables. ### QueueMessage QueueMessage encapsulates the state of the channel send/receive operation to be -put in the **sendq/recvq**. It contains a condition variable used to lock the +put in the **sendq/recvq**. It contains a condition variable used to lock the thread (when there are no available sends/receives). In addition, it contains -a callback function to notify a thread when the QueueMessage is being +a callback function to notify a thread when the QueueMessage is being processed by the channel. ### Queues @@ -108,21 +108,21 @@ channel_recv operation will put a new QueueMessage on the recvq and block the current thread under two conditions: 1. The channel is buffered and there is no data on the buff_ 2. The channel is unbuffered and does not have a sender - + ### State diagram #### Channel Send

-
+

- + #### Channel Receive

-
+

- + ## Limitations and Considerations ### Variable Copy @@ -135,5 +135,5 @@ be sent before it is sent. Please note that this is acheived by adding an **assign** operator and creating a temporary variable that is sent in place of the original variable. Please -note that **assign** operator has limited support for only certain variables +note that **assign** operator has limited support for only certain variables datatypes. diff --git a/doc/fluid/design/concurrent/concurrent_programming.md b/doc/fluid/design/concurrent/concurrent_programming.md index 64602166065af28309d7a01fdeb7076a9b0a081a..1859f983e9133674e69ecd506d7683ea926b2b8f 100644 --- a/doc/fluid/design/concurrent/concurrent_programming.md +++ b/doc/fluid/design/concurrent/concurrent_programming.md @@ -23,21 +23,25 @@ The following table compares concepts in Fluid and Go user-defined functions layers + control-flow and built-in functions intrinsics/operators + goroutines, channels class ThreadPool + runtime class Executor + diff --git a/doc/fluid/design/concurrent/select_op.md b/doc/fluid/design/concurrent/select_op.md index 52c226bc94a4e8bfc5588705d7f65328840e91cc..4fcae57cc7932cdaebe549486e7f7cebf0bd038a 100644 --- a/doc/fluid/design/concurrent/select_op.md +++ b/doc/fluid/design/concurrent/select_op.md @@ -2,13 +2,13 @@ ## Introduction -In golang, the [**select**](https://golang.org/ref/spec#Select_statements) -statement lets a goroutine wait on multiple communication operations at the -same time. The **select** blocks until one of its cases can run, then -executes the case. If multiple cases are ready to run, then one case is +In golang, the [**select**](https://golang.org/ref/spec#Select_statements) +statement lets a goroutine wait on multiple communication operations at the +same time. The **select** blocks until one of its cases can run, then +executes the case. If multiple cases are ready to run, then one case is choosen at random to be executed. -With the introduction of CSP for Paddle, we mimic this behavior by +With the introduction of CSP for Paddle, we mimic this behavior by creating a ***select_op***. ## How to use it @@ -17,11 +17,11 @@ The **select_op** is available as a c++ operator. However most users will prefer to use the much simplier Python API. - **fluid.Select()**: Creates a select operator and adds it to the current -block within the main program. Also creates a sub block and adds it to the -main program. This sub block is used to hold all variables and operators +block within the main program. Also creates a sub block and adds it to the +main program. This sub block is used to hold all variables and operators used by the case statements. - -Within the select block, users can add cases by + +Within the select block, users can add cases by calling **select.case** or **select.default** method. - **fluid.Select.case(channel_action, channel, result_variable)**: Represents @@ -37,13 +37,13 @@ execute. ``` ch1 = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) quit_ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) - + x = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) y = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=1) - + while_cond = fill_constant(shape=[1], dtype=core.VarDesc.VarType.BOOL, value=True) while_op = While(cond=while_cond) - + with while_op.block(): with fluid.Select() as select: with select.case(fluid.channel_send, channel, x): @@ -99,17 +99,17 @@ blocks { } } // Create "select" operator. - // inputs: + // inputs: // X: All input variables used by operators within the select block // case_to_execute: Variable filled in by select_op when it determines // which case to execute. // // outputs: - // Out: All output variables referenced by operators within select block. - // + // Out: All output variables referenced by operators within select block. + // // attrs: // sub_block: The block id containing the select "cases" - // cases: Serialized list of all cases in the select op. + // cases: Serialized list of all cases in the select op. // Each case is serialized as: ',,,' // where type is 0 for default, 1 for send, and 2 for receive. // No channel and values are needed for default cases. @@ -150,7 +150,7 @@ into **X**. It will also create a temp variable called **case_to_execute**. Th filled in by the select_op after it has completed processing the case statements. If there are no available cases to execute (ie: all cases are blocked on channel operations, and -there is no default statement), then the select_op will block the current thread. The thread will +there is no default statement), then the select_op will block the current thread. The thread will unblock once there is a channel operation affecting one of the case statements, at which point, the **select_op** will set the **case_to_execute** variable to the index of the case to execute. @@ -247,17 +247,17 @@ blocks { ``` -Cases are represented by a **conditional_block operator**, whose's condition is set as the output of -equal(**case_to_execute**, **case_index**). Since each case index is unique in this sub-block, +Cases are represented by a **conditional_block operator**, whose's condition is set as the output of +equal(**case_to_execute**, **case_index**). Since each case index is unique in this sub-block, only one case will be executed. ### select_op flow

-
+

-The select algorithm is inspired by golang's select routine. Please refer to +The select algorithm is inspired by golang's select routine. Please refer to http://www.tapirgames.com/blog/golang-concurrent-select-implementation for more information. ## Backward Pass diff --git a/doc/fluid/design/dist_train/distributed_architecture.md b/doc/fluid/design/dist_train/distributed_architecture.md index a405cb6aaf80b9d2e8a1a9c774ca85cc7e62bbab..229cb47c17d633be6848bb35e58d33ec9b47ec3b 100644 --- a/doc/fluid/design/dist_train/distributed_architecture.md +++ b/doc/fluid/design/dist_train/distributed_architecture.md @@ -40,11 +40,11 @@ computation is only specified in Python code which sits outside of PaddlePaddle, Similar to how a compiler uses an intermediate representation (IR) so that the programmer does not need to manually optimize their code for most of the cases, we can have an intermediate representation in PaddlePaddle as well. The compiler optimizes the IR as follows: - + PaddlePaddle can support model parallelism by converting the IR so that the user no longer needs to manually perform the computation and operations in the Python component: - + The IR for PaddlePaddle after refactoring is called a `Block`, it specifies the computation dependency graph and the variables used in the computation. @@ -60,7 +60,7 @@ For a detailed explanation, refer to this document - The revamped distributed training architecture can address the above discussed limitations. Below is the illustration of how it does so: - + The major components are: *Python API*, *Distribute Transpiler* and *Remote Executor*. @@ -152,7 +152,7 @@ for data in train_reader(): `JobDesc` object describe the distributed job resource specification to run on Cluster environment. - + `RemoteExecutor.run` sends the `ProgramDesc` and [TrainingJob](https://github.com/PaddlePaddle/cloud/blob/unreleased-tpr/doc/autoscale/README.md#training-job-resource) @@ -171,7 +171,7 @@ In the future, a more general placement algorithm should be implemented, which m The local training architecture will be the same as the distributed training architecture, the difference is that everything runs locally, and there is just one PaddlePaddle runtime: - + ### Training Data diff --git a/doc/fluid/design/dist_train/multi_cpu.md b/doc/fluid/design/dist_train/multi_cpu.md index a8d8ee0422acc84835170a44eb83f9b5f0c6bb40..38222d083084ebfca3099ce96b47868c42d55101 100644 --- a/doc/fluid/design/dist_train/multi_cpu.md +++ b/doc/fluid/design/dist_train/multi_cpu.md @@ -8,11 +8,11 @@ Op graph to a multi-CPU Op graph, and run `ParallelDo` Op to run the graph. ## Transpiler - + After converted: - + ## Implement diff --git a/doc/fluid/design/dist_train/parameter_server.md b/doc/fluid/design/dist_train/parameter_server.md index 6ce48dfbfce8b094684b412ebfda7e505ddc30ae..73c85da5e89eee0ac7857a0b808bc64ae673fdad 100644 --- a/doc/fluid/design/dist_train/parameter_server.md +++ b/doc/fluid/design/dist_train/parameter_server.md @@ -41,11 +41,11 @@ We will need these OPs: *Send*, *Recv*, *Enqueue*, *Dequeue*. Below is an example of converting the user defined graph to the subgraphs for the trainer and the parameter server: - + After converting: - + 1. The parameter variable W and its optimizer program are placed on the parameter server. 1. Operators are added to the program. @@ -69,8 +69,7 @@ In Fluid, we introduce [SelectedRows](../selected_rows.md) to represent a list o non-zero gradient data. So when we do parameter optimization both locally and remotely, we only need to send those non-zero rows to the optimizer operators: - - + ### Benefits - Model parallelism becomes easier to implement: it is an extension to diff --git a/doc/fluid/design/dynamic_rnn/rnn.md b/doc/fluid/design/dynamic_rnn/rnn.md index 6f414e5549b149bc88fb252085ff56dbb06730f8..7b61b050f640814d6949cf6847b431da53d59581 100644 --- a/doc/fluid/design/dynamic_rnn/rnn.md +++ b/doc/fluid/design/dynamic_rnn/rnn.md @@ -5,7 +5,7 @@ This document describes the RNN (Recurrent Neural Network) operator and how it i ## RNN Algorithm Implementation

- +

The above diagram shows an RNN unrolled into a full network. @@ -22,7 +22,7 @@ There are several important concepts here: There could be local variables defined in each step-net. PaddlePaddle runtime realizes these variables in *step-scopes* which are created for each step.

-
+
Figure 2 illustrates the RNN's data flow

@@ -93,7 +93,7 @@ For example, we could have a 2-level RNN, where the top level corresponds to par The following figure illustrates feeding in text into the lower level, one sentence at a step, and the feeding in step outputs to the top level. The final top level output is about the whole text.

- +

```python @@ -149,5 +149,5 @@ If the `output_all_steps` is set to False, it will only output the final time st

- +

diff --git a/doc/fluid/design/modules/batch_norm_op.md b/doc/fluid/design/modules/batch_norm_op.md index d1392619c42d9206bf4bddcd33ad11b033e6cbdb..e451ffcc73b5de2b911e1c6de54b42a5d1d54c37 100644 --- a/doc/fluid/design/modules/batch_norm_op.md +++ b/doc/fluid/design/modules/batch_norm_op.md @@ -2,7 +2,7 @@ ## What is batch normalization -Batch normalization is a frequently-used method in deep network training. It adjusts the mean and variance of a layer's output, and make the data distribution easier for next layer's training. +Batch normalization is a frequently-used method in deep network training. It adjusts the mean and variance of a layer's output, and make the data distribution easier for next layer's training. The principle of batch normalization can be summarized into a simple function: @@ -66,7 +66,7 @@ As most C++ operators do, `batch_norm_op` is defined by inputs, outputs, attribu The following graph showes the training computational process of `batch_norm_op`: - + cudnn provides APIs to finish the whole series of computation, we can use them in our GPU kernel. @@ -74,13 +74,13 @@ cudnn provides APIs to finish the whole series of computation, we can use them i `batch_norm_op` is warpped as a layer in Python: -```python -def batch_norm_layer(net, +```python +def batch_norm_layer(net, input, - output, - scale, - bias, - use_global_est = False, + output, + scale, + bias, + use_global_est = False, epsilon = 1e-6, momentum = 0.99): mean_cache = scope.new_var(name = 'estimated_mean', trainable = False) @@ -119,15 +119,15 @@ for pass_id in range(PASS_NUM): if pass_id % 100 == 0: net.infer(test_image) # run inferencing model # ... -``` +``` `is_infer` is an attribute. Once an operator is created, its attributes can not be changed. It suggests us that we shall maintain two `batch_norm_op` in the model, one's `is_infer` is `True`(we call it `infer_batch_norm_op`) and the other one's is `False`(we call it `train_batch_norm_op`). They share all parameters and variables, but be placed in two different branches. That is to say, if a network contains a `batch_norm_op`, it will fork into two branches, one go through `train_batch_norm_op` and the other one go through `infer_batch_norm_op`:
- +
-Just like what is shown in the above graph, the net forks before `batch_norm_op` and will never merge again. All the operators after `batch_norm_op` will duplicate. +Just like what is shown in the above graph, the net forks before `batch_norm_op` and will never merge again. All the operators after `batch_norm_op` will duplicate. When the net runs in training mode, the end of the left branch will be set as the running target, so the dependency tracking process will ignore right branch automatically. When the net runs in inferencing mode, the process is reversed. diff --git a/doc/fluid/design/modules/regularization.md b/doc/fluid/design/modules/regularization.md index 21280ac898feb4dd5e5a5d9e88d121e856850f0b..8cd5ff71d193f03e1ac923724b52f28c6057d25d 100644 --- a/doc/fluid/design/modules/regularization.md +++ b/doc/fluid/design/modules/regularization.md @@ -6,23 +6,23 @@ A central problem in machine learning is how to design an algorithm that will pe ### Parameter Norm Penalties Most common regularization approaches in deep learning are based on limiting the capacity of the models by adding a parameter norm penalty to the objective function `J`. This is given as follows: -
+
The parameter `alpha` is a hyperparameter that weights the relative contribution of the norm penalty term, `omega`, relative to the standard objective function `J`. The most commonly used norm penalties are the L2 norm penalty and the L1 norm penalty. These are given as follows: ##### L2 Regularization: -
+
##### L1 Regularization -
+
A much more detailed mathematical background of regularization can be found [here](http://www.deeplearningbook.org/contents/regularization.html). ## Regularization Survey -A detailed survey of regularization in various deep learning frameworks can be found [here](https://github.com/PaddlePaddle/Paddle/wiki/Regularization-Survey). +A detailed survey of regularization in various deep learning frameworks can be found [here](https://github.com/PaddlePaddle/Paddle/wiki/Regularization-Survey). ## Proposal for Regularization in PaddlePaddle @@ -32,41 +32,35 @@ In the new design, we propose to create new operations for regularization. For n - L2_regularization_op - L1_regularization_op -These ops can be like any other ops with their own CPU/GPU implementations either using Eigen or separate CPU and GPU kernels. As the initial implementation, we can implement their kernels using Eigen following the abstraction pattern implemented for [Activation Ops](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/accuracy_op.h). This abstraction pattern can make it very easy to implement new regularization schemes other than L1 and L2 norm penalties. +These ops can be like any other ops with their own CPU/GPU implementations either using Eigen or separate CPU and GPU kernels. As the initial implementation, we can implement their kernels using Eigen following the abstraction pattern implemented for [Activation Ops](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/accuracy_op.h). This abstraction pattern can make it very easy to implement new regularization schemes other than L1 and L2 norm penalties. -The idea of building ops for regularization is in sync with the refactored Paddle philosophy of using operators to represent any computation unit. The way these ops will be added to the computation graph, will be decided by the [layer functions](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md#layer-function) in Python API. +The idea of building ops for regularization is in sync with the refactored Paddle philosophy of using operators to represent any computation unit. The way these ops will be added to the computation graph, will be decided by the [layer functions](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md#layer-function) in Python API. ### Computation Graph Below is an example of a really simple feed forward neural network. -
+
The Python API will modify this computation graph to add regularization operators. The modified computation graph will look as follows: -
+
    ### Python API implementation for Regularization -Using the low level ops, `L2_regularization_op` and `L1_regularization_op`, any user can add regularization to their computation graphs. However, this will require a lot of lines of code and we should design Python APIs that support regularization. An example of such an API can be seen in [Keras](https://keras.io/regularizers/). As per the PaddlePaddle [Python API design](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md), the layer functions are responsible for creating operators, operator parameters and variables. Since regularization is a property of parameters, it makes sense to create these in the layer functions. +Using the low level ops, `L2_regularization_op` and `L1_regularization_op`, any user can add regularization to their computation graphs. However, this will require a lot of lines of code and we should design Python APIs that support regularization. An example of such an API can be seen in [Keras](https://keras.io/regularizers/). As per the PaddlePaddle [Python API design](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md), the layer functions are responsible for creating operators, operator parameters and variables. Since regularization is a property of parameters, it makes sense to create these in the layer functions. #### Creation of Regularization ops There are two possibilities for creating the regularization ops: -1. We create these ops immediately while building the computation graph. -2. We add these ops in a lazy manner, just before the backward, similar to the way the optimization ops are added. +1. We create these ops immediately while building the computation graph. +2. We add these ops in a lazy manner, just before the backward, similar to the way the optimization ops are added. -The proposal is to add these ops in a lazy manner just before the backward pass. +The proposal is to add these ops in a lazy manner just before the backward pass. #### Storage of Regularization attributes -Since we want to create the regularization ops in a lazy manner, the regularization attributes (type of regularization and weight of regularization penalty) can be stored as attributes of the [`Parameter`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/framework.py#L421) class. This is because regularization is a property of the parameters and storing regularization properties with Parameters also allows for shared parameters. +Since we want to create the regularization ops in a lazy manner, the regularization attributes (type of regularization and weight of regularization penalty) can be stored as attributes of the [`Parameter`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/framework.py#L421) class. This is because regularization is a property of the parameters and storing regularization properties with Parameters also allows for shared parameters. #### High-level API In PaddlePaddle Python API, users will primarily rely on [layer functions](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md#layer-function) to create neural network layers. Hence, we also need to provide regularization functionality in layer functions. The design of these APIs can be postponed for later right now. A good reference for these APIs can be found in [Keras](https://keras.io/regularizers/) and also by looking at Tensorflow in [`tf.contrib.layers`](https://www.tensorflow.org/api_guides/python/contrib.layers). - - - - - - diff --git a/doc/fluid/design/network/deep_speech_2.md b/doc/fluid/design/network/deep_speech_2.md index 7f5dcf55f9f2a0fd27ffde100510dd8fee305381..f32a5b7e8a4d820319a666dab4c3129360e2c924 100644 --- a/doc/fluid/design/network/deep_speech_2.md +++ b/doc/fluid/design/network/deep_speech_2.md @@ -116,7 +116,7 @@ The classical DS2 network contains 15 layers (from bottom to top): - **One** CTC-loss layer
-
+
Figure 1. Archetecture of Deep Speech 2 Network.
@@ -142,7 +142,7 @@ Key ingredients about the layers: - **Batch Normalization Layers**: - Added to all above layers (except for data and loss layer). - Sequence-wise normalization for RNNs: BatchNorm only performed on input-state projection and not state-state projection, for efficiency consideration. - + @@ -208,7 +208,7 @@ TODO by Assignees ### Beam Search with CTC and LM
-
+
Figure 2. Algorithm for CTC Beam Search Decoder.
diff --git a/doc/fluid/design/network/sequence_decoder.md b/doc/fluid/design/network/sequence_decoder.md index c4a9bbeeefca0e05c335dd60233691e8bac33015..f13d30ca9fe09c9525c711436f605bb280e11000 100644 --- a/doc/fluid/design/network/sequence_decoder.md +++ b/doc/fluid/design/network/sequence_decoder.md @@ -199,7 +199,7 @@ Packing the `selected_generation_scores` will get a `LoDTensor`, and each tail i ## LoD and shape changes during decoding

- +

According to the image above, the only phase that changes the LoD is beam search. diff --git a/doc/fluid/design/others/gan_api.md b/doc/fluid/design/others/gan_api.md index fb41df8615f73d9fd4c32995eab265833eac1a55..7167470088766985fa5ad31657410309330fd725 100644 --- a/doc/fluid/design/others/gan_api.md +++ b/doc/fluid/design/others/gan_api.md @@ -1,24 +1,24 @@ # Design for GAN -GAN (General Adversarial Net [https://arxiv.org/abs/1406.2661]) is an important model for unsupervised learning and widely used in many areas. +GAN (General Adversarial Net [https://arxiv.org/abs/1406.2661]) is an important model for unsupervised learning and widely used in many areas. It applies several important concepts in machine learning system design, including building and running subgraphs, dependency tracing, different optimizers in one executor and so forth. In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN (Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks [https://arxiv.org/abs/1511.06434]) as an example due to its good performance on image generation.

-
+
Figure 1. The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss marked in red and green are the two targets we would like to run.

The operators, layers and functions required/optional to build a GAN demo is summarized in https://github.com/PaddlePaddle/Paddle/issues/4563.

-
+
Figure 2. Photo borrowed from the original DC-GAN paper.

-## The Conditional-GAN might be a class. +## The Conditional-GAN might be a class. This design we adopt the popular open source design in https://github.com/carpedm20/DCGAN-tensorflow and https://github.com/rajathkmp/DCGAN. It contains following data structure: - DCGAN(object): which contains everything required to build a GAN model. It provides following member functions methods as API: @@ -29,7 +29,7 @@ This design we adopt the popular open source design in https://github.com/carped Returns a generated image. - discriminator(image): -Given an image, decide if it is from a real source or a fake one. +Given an image, decide if it is from a real source or a fake one. Returns a 0/1 binary label. - build_model(self): @@ -47,7 +47,7 @@ To be more detailed, we introduce our design of DCGAN as following: ```python class DCGAN(object): def __init__(self, y_dim=None): - + # hyper parameters self.y_dim = y_dim # conditional gan or not self.batch_size = 100 @@ -82,18 +82,18 @@ class DCGAN(object): # input z: the random noise # input y: input data label (optional) # output G_im: generated fake images - + if not self.y_dim: z = pd.layer.concat(1, [z, y]) - + G_h0 = pd.layer.fc(z, self.G_w0, self.G_b0) G_h0_bn = pd.layer.batch_norm(G_h0) G_h0_relu = pd.layer.relu(G_h0_bn) - + G_h1 = pd.layer.deconv(G_h0_relu, self.G_w1, self.G_b1) G_h1_bn = pd.layer.batch_norm(G_h1) G_h1_relu = pd.layer.relu(G_h1_bn) - + G_h2 = pd.layer.deconv(G_h1_relu, self.G_W2, self.G_b2)) G_im = pd.layer.tanh(G_im) return G_im @@ -111,11 +111,11 @@ class DCGAN(object): D_h0 = pd.layer.conv2d(image, w=self.D_w0, b=self.D_b0) D_h0_bn = pd.layer.batchnorm(h0) D_h0_relu = pd.layer.lrelu(h0_bn) - + D_h1 = pd.layer.conv2d(D_h0_relu, w=self.D_w1, b=self.D_b1) D_h1_bn = pd.layer.batchnorm(D_h1) D_h1_relu = pd.layer.lrelu(D_h1_bn) - + D_h2 = pd.layer.fc(D_h1_relu, w=self.D_w2, b=self.D_b2) return D_h2 ``` @@ -123,7 +123,7 @@ class DCGAN(object): ### Class member function: Build the model - Define data readers as placeholders to hold the data; - Build generator and discriminators; -- Define two training losses for discriminator and generator, respectively. +- Define two training losses for discriminator and generator, respectively. If we have execution dependency engine to back-trace all tensors, the module building our GAN model will be like this: ```python class DCGAN(object): @@ -133,7 +133,7 @@ class DCGAN(object): self.images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) self.faked_images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) self.z = pd.data(tf.float32, [None, self.z_size]) - + # step 1: generate images by generator, classify real/fake images with discriminator if self.y_dim: # if conditional GAN, includes label self.G = self.generator(self.z, self.y) @@ -147,12 +147,12 @@ class DCGAN(object): # generate fake images self.sampled = self.sampler(self.z) self.D_f = self.discriminator(self.images) - + # step 2: define the two losses self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size)) self.d_loss_fake = pd.reduce_mean(pd.cross_entropy(self.D_f, np.zeros(self.batch_size)) self.d_loss = self.d_loss_real + self.d_loss_fake - + self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie)) ``` @@ -176,7 +176,7 @@ class DCGAN(object): self.G = self.generator(self.z) self.D_g = self.discriminator(self.G, self.y) self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_g, np.ones(self.batch_szie)) - + with pd.default_block().d_block(): if self.y_dim: # if conditional GAN, includes label self.D_t = self.discriminator(self.images, self.y) @@ -217,7 +217,7 @@ if __name__ == "__main__": # load mnist data data_X, data_y = self.load_mnist() - + # Two subgraphs required!!! with pd.block().d_block(): d_optim = pd.train.Adam(lr = .001, beta= .1) @@ -228,7 +228,7 @@ if __name__ == "__main__": # executor sess = pd.executor() - + # training for epoch in xrange(10000): for batch_id in range(N / batch_size): @@ -239,7 +239,7 @@ if __name__ == "__main__": batch_z = np.random.uniform(-1., 1., [batch_size, z_dim]) if batch_id % 2 == 0: - sess.run(d_step, + sess.run(d_step, feed_dict = {dcgan.images: batch_im, dcgan.y: batch_label, dcgan.z: batch_z}) diff --git a/doc/fluid/dev/releasing_process.md b/doc/fluid/dev/releasing_process.md index 0810765b85f73d9dba876e66fb43bb1ad476d6d2..c5943ccd81c2ae2aaacd2676da12509db889f54a 100644 --- a/doc/fluid/dev/releasing_process.md +++ b/doc/fluid/dev/releasing_process.md @@ -37,7 +37,7 @@ PaddlePaddle每次发新的版本,遵循以下流程: 可以在此页面的"Artifacts"下拉框中找到生成的3个二进制文件,分别对应CAPI,`cp27m`和`cp27mu`的版本。然后按照上述的方法 使用`twine`工具上传即可。 - + * 注:CI环境使用 https://github.com/PaddlePaddle/buildtools 这里的DockerImage作为编译环境以支持更多的Linux 发型版,如果需要手动编译,也可以使用这些镜像。这些镜像也可以从 https://hub.docker.com/r/paddlepaddle/paddle_manylinux_devel/tags/ 下载得到。 diff --git a/doc/fluid/howto/performance/profiler.md b/doc/fluid/howto/performance/profiler.md index b20b5efdc1f1f10ce7cec835adcc6fb374ed4e20..ee96e7c74ce317caddb387cbb1d4998937bd5c81 100644 --- a/doc/fluid/howto/performance/profiler.md +++ b/doc/fluid/howto/performance/profiler.md @@ -23,7 +23,7 @@ But how to record the time for the mixed C++ and CUDA program? There many C++ A The overall flow is shown as the following figure. -
+
### Event @@ -36,10 +36,10 @@ enum EventKind { kPopRange}; ``` - kMark: only a marker without time range. -- kPushRange: mark the starting event for time range. +- kPushRange: mark the starting event for time range. - kPopRange: mark the ending event for time range. -For the CPU code, the events only need to record the current time. For the CUDA code, the [event management functions of CUDA](http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html#group__CUDART__EVENT) are used. For many pieces of code, an event lists are used to record each piece. +For the CPU code, the events only need to record the current time. For the CUDA code, the [event management functions of CUDA](http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html#group__CUDART__EVENT) are used. For many pieces of code, an event lists are used to record each piece. ```c++ class Event { @@ -66,11 +66,11 @@ struct EventList { }; ``` -As mentioned above, there is no need to record the timeline when disabling the profiler. So there is a global state to enable or disable the profiler. +As mentioned above, there is no need to record the timeline when disabling the profiler. So there is a global state to enable or disable the profiler. ```c++ enum ProfilerState { - kDisabled, + kDisabled, kCPU, kCUDA }; diff --git a/doc/fluid/images/2_level_rnn.dot b/doc/fluid/images/2_level_rnn.dot new file mode 100644 index 0000000000000000000000000000000000000000..5d77865061ca7bbbfcf254dd938f09aef5553505 --- /dev/null +++ b/doc/fluid/images/2_level_rnn.dot @@ -0,0 +1,56 @@ +digraph G { + + rnn [label="1st level RNN" shape=box] + + subgraph cluster0 { + label = "time step 0" + + sent0 [label="sentence"] + sent1 [label="sentence"] + + rnn1 [label="2nd level RNN" shape=box] + + sent0 -> rnn1 + sent1 -> rnn1 + } + + subgraph cluster1 { + label = "time step 1" + + sent2 [label="sentence"] + sent3 [label="sentence"] + + rnn2 [label="2nd level RNN" shape=box] + + sent2 -> rnn2 + sent3 -> rnn2 + } + + subgraph cluster2 { + label = "time step 2" + + sent4 [label="sentence"] + sent5 [label="sentence"] + + rnn3 [label="2nd level RNN" shape=box] + + sent4 -> rnn3 + sent5 -> rnn3 + } + + + para0 [label="paragraph info 0"] + para1 [label="paragraph info 1"] + para2 [label="paragraph info 2"] + + rnn1 -> para0 + rnn2 -> para1 + rnn3 -> para2 + + para0 -> rnn + para1 -> rnn + para2 -> rnn + + chapter [label="chapter info"] + rnn -> chapter +} diff --git a/doc/fluid/images/2_level_rnn.png b/doc/fluid/images/2_level_rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..0537a75beb175c0c284717421f7aa908da2a5038 Binary files /dev/null and b/doc/fluid/images/2_level_rnn.png differ diff --git a/doc/fluid/images/LOD-and-shape-changes-during-decoding.jpg b/doc/fluid/images/LOD-and-shape-changes-during-decoding.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b0d90f7b9d8184b314b0ee4e521f53eb5f1b455 Binary files /dev/null and b/doc/fluid/images/LOD-and-shape-changes-during-decoding.jpg differ diff --git a/doc/fluid/images/asgd.gif b/doc/fluid/images/asgd.gif new file mode 100644 index 0000000000000000000000000000000000000000..4a0da7bf6df9326a2aab1638b77c5455c18b8c4e Binary files /dev/null and b/doc/fluid/images/asgd.gif differ diff --git a/doc/fluid/images/batch_norm_fork.dot b/doc/fluid/images/batch_norm_fork.dot new file mode 100644 index 0000000000000000000000000000000000000000..4bc47713cba2cb23f1b34fffe6426ef10ac3a9df --- /dev/null +++ b/doc/fluid/images/batch_norm_fork.dot @@ -0,0 +1,25 @@ +digraph ImageBatchNormForkGragh { + subgraph cluster_before { + Prev [label="...", shape=plaintext]; + Rnn [label="rnn_op", shape=box]; + BatchNorm [label="batch_norm_op", shape=box]; + Fc [label="fc_op", shape=box]; + After [label="...", shape=plaintext]; + Prev -> Rnn -> BatchNorm -> Fc -> After; + label="original"; + } + + subgraph cluster_after { + Prev2 [label="...", shape=plaintext]; + Rnn2 [label="rnn_op", shape=box]; + BatchNorm2_1 [label="train_batch_norm_op", shape=box]; + BatchNorm2_2 [label="infer_batch_norm_op", shape=box]; + Fc2_1 [label="fc_op", shape=box]; + Fc2_2 [label="fc_op", shape=box]; + After2_1 [label="...", shape=plaintext]; + After2_2 [label="...", shape=plaintext]; + Prev2 -> Rnn2 -> BatchNorm2_1 -> Fc2_1 -> After2_1; + Rnn2 -> BatchNorm2_2 ->Fc2_2 ->After2_2 + label="forked"; + } +} diff --git a/doc/fluid/images/batch_norm_fork.png b/doc/fluid/images/batch_norm_fork.png new file mode 100644 index 0000000000000000000000000000000000000000..aded62bce5bc268b7a3ef4dc96c89fe21d6ea955 Binary files /dev/null and b/doc/fluid/images/batch_norm_fork.png differ diff --git a/doc/fluid/images/batch_norm_op_kernel.png b/doc/fluid/images/batch_norm_op_kernel.png new file mode 100644 index 0000000000000000000000000000000000000000..a99ce81ff3bf42880ebbd6a1297de3bf038e09b2 Binary files /dev/null and b/doc/fluid/images/batch_norm_op_kernel.png differ diff --git a/doc/fluid/images/beam_search.png b/doc/fluid/images/beam_search.png new file mode 100644 index 0000000000000000000000000000000000000000..7f7e35f34223162d0f7f0ed97375909c43b830ae Binary files /dev/null and b/doc/fluid/images/beam_search.png differ diff --git a/doc/fluid/images/ci_build_whl.png b/doc/fluid/images/ci_build_whl.png new file mode 100644 index 0000000000000000000000000000000000000000..232762b82a9ae3e979a1f38a7beb715c87438f40 Binary files /dev/null and b/doc/fluid/images/ci_build_whl.png differ diff --git a/doc/fluid/images/compiler.graffle b/doc/fluid/images/compiler.graffle new file mode 100644 index 0000000000000000000000000000000000000000..8cc678fea3c820103e7ce81f7a5d625d6c1d92de Binary files /dev/null and b/doc/fluid/images/compiler.graffle differ diff --git a/doc/fluid/images/compiler.png b/doc/fluid/images/compiler.png new file mode 100644 index 0000000000000000000000000000000000000000..65d34f841afce9756def07dd8ecb9ca44e658bfe Binary files /dev/null and b/doc/fluid/images/compiler.png differ diff --git a/doc/fluid/images/control_flow_graph.png b/doc/fluid/images/control_flow_graph.png new file mode 100644 index 0000000000000000000000000000000000000000..3579998e58d07abc50bd3332128d4733a391cb3b Binary files /dev/null and b/doc/fluid/images/control_flow_graph.png differ diff --git a/doc/fluid/images/dataflow_equations.png b/doc/fluid/images/dataflow_equations.png new file mode 100644 index 0000000000000000000000000000000000000000..c10f7f69f4007952e5b0394edaa04efa1cfbb658 Binary files /dev/null and b/doc/fluid/images/dataflow_equations.png differ diff --git a/doc/fluid/images/dcgan.png b/doc/fluid/images/dcgan.png new file mode 100644 index 0000000000000000000000000000000000000000..15e8e290a111ff43900934341365cb4360d87d28 Binary files /dev/null and b/doc/fluid/images/dcgan.png differ diff --git a/doc/fluid/images/deep_learning.png b/doc/fluid/images/deep_learning.png new file mode 100644 index 0000000000000000000000000000000000000000..026becc4d94e01e407dacb2a5314a0e5723334ff Binary files /dev/null and b/doc/fluid/images/deep_learning.png differ diff --git a/doc/fluid/images/dist-graph.graffle b/doc/fluid/images/dist-graph.graffle new file mode 100644 index 0000000000000000000000000000000000000000..941399c6ced8d5f65b6c595522b770c88259df4b Binary files /dev/null and b/doc/fluid/images/dist-graph.graffle differ diff --git a/doc/fluid/images/dist-graph.png b/doc/fluid/images/dist-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..3546b09f1c2ee3e4f60f519d5e47f823f08051a7 Binary files /dev/null and b/doc/fluid/images/dist-graph.png differ diff --git a/doc/fluid/images/distributed_architecture.graffle b/doc/fluid/images/distributed_architecture.graffle new file mode 100644 index 0000000000000000000000000000000000000000..d1b60141342232e06227c2d430ebc60ec349a907 Binary files /dev/null and b/doc/fluid/images/distributed_architecture.graffle differ diff --git a/doc/fluid/images/distributed_architecture.png b/doc/fluid/images/distributed_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..29c7b0c0783f97c6d33b1db1ed484d6a2b9dd356 Binary files /dev/null and b/doc/fluid/images/distributed_architecture.png differ diff --git a/doc/fluid/images/ds2_network.png b/doc/fluid/images/ds2_network.png new file mode 100644 index 0000000000000000000000000000000000000000..1a5b2184d47928cc2849d5a7c8ea2d8cf5337e11 Binary files /dev/null and b/doc/fluid/images/ds2_network.png differ diff --git a/doc/fluid/images/feed_forward.png b/doc/fluid/images/feed_forward.png new file mode 100644 index 0000000000000000000000000000000000000000..d312371a04c26aa6cd196e0bd1f51becb425180b Binary files /dev/null and b/doc/fluid/images/feed_forward.png differ diff --git a/doc/fluid/images/feed_forward_regularized.png b/doc/fluid/images/feed_forward_regularized.png new file mode 100644 index 0000000000000000000000000000000000000000..677e99bfd9f8e72ed9fe4b27127af2ced202f447 Binary files /dev/null and b/doc/fluid/images/feed_forward_regularized.png differ diff --git a/doc/fluid/images/fluid-compiler.graffle b/doc/fluid/images/fluid-compiler.graffle new file mode 100644 index 0000000000000000000000000000000000000000..c933df2cb855462c52b2d25f7f9a99b95652961d Binary files /dev/null and b/doc/fluid/images/fluid-compiler.graffle differ diff --git a/doc/fluid/images/fluid-compiler.png b/doc/fluid/images/fluid-compiler.png new file mode 100644 index 0000000000000000000000000000000000000000..1b0ffed2039c91a3a00bbb719da08c91c3acf7bb Binary files /dev/null and b/doc/fluid/images/fluid-compiler.png differ diff --git a/doc/fluid/images/graph_construction_example.bash b/doc/fluid/images/graph_construction_example.bash new file mode 100755 index 0000000000000000000000000000000000000000..35e6997abd17588e17a82d448918fc1b3bd7220e --- /dev/null +++ b/doc/fluid/images/graph_construction_example.bash @@ -0,0 +1,11 @@ +cat ./graph_construction_example.dot | \ + sed 's/color=red/color=red, style=invis/g' | \ + sed 's/color=green/color=green, style=invis/g' | \ + dot -Tpng > graph_construction_example_forward_only.png + +cat ./graph_construction_example.dot | \ + sed 's/color=green/color=green, style=invis/g' | \ + dot -Tpng > graph_construction_example_forward_backward.png + +cat ./graph_construction_example.dot | \ + dot -Tpng > graph_construction_example_all.png diff --git a/doc/fluid/images/graph_construction_example.dot b/doc/fluid/images/graph_construction_example.dot new file mode 100644 index 0000000000000000000000000000000000000000..e115f9844bae6ad24f638c8ed4749cea8aff06a9 --- /dev/null +++ b/doc/fluid/images/graph_construction_example.dot @@ -0,0 +1,68 @@ +digraph ImageClassificationGraph { + ///////// The forward part ///////// + FeedX [label="Feed", color=blue, shape=box]; + FeedY [label="Feed", color=blue, shape=box]; + InitW [label="Init", color=blue, shape=diamond]; + Initb [label="Init", color=blue, shape=diamond]; + FC [label="FC", color=blue, shape=box]; + MSE [label="MSE", color=blue, shape=box]; + + x [label="x", color=blue, shape=oval]; + l [label="l", color=blue, shape=oval]; + y [label="y", color=blue, shape=oval]; + W [label="W", color=blue, shape=doublecircle]; + b [label="b", color=blue, shape=doublecircle]; + cost [label="cost", color=blue, shape=oval]; + + FeedX -> x -> FC -> y -> MSE -> cost [color=blue]; + FeedY -> l [color=blue]; + InitW -> W [color=blue]; + Initb -> b [color=blue]; + W -> FC [color=blue]; + b -> FC [color=blue]; + l -> MSE [color=blue]; + + ////////// The backward part ///////// + MSE_Grad [label="MSE_grad", color=red, shape=box]; + FC_Grad [label="FC_grad", color=red, shape=box]; + + d_cost [label="d cost", color=red, shape=oval]; + d_y [label="d y", color=red, shape=oval]; + d_b [label="d b", color=red, shape=oval]; + d_W [label="d W", color=red, shape=oval]; + + cost -> MSE_Grad [color=red]; + d_cost -> MSE_Grad [color=red]; + l -> MSE_Grad [color=red]; + y -> MSE_Grad -> d_y [color=red]; + + x -> FC_Grad [color=red]; + y -> FC_Grad [color=red]; + d_y -> FC_Grad [color=red]; + W -> FC_Grad -> d_W [color=red]; + b -> FC_Grad -> d_b [color=red]; + + ////////// The optimizaiton part ////////// + + OPT_W [label="SGD", color=green, shape=box]; + OPT_b [label="SGD", color=green, shape=box]; + + W -> OPT_W [color=green]; + b -> OPT_b [color=green]; + d_W -> OPT_W -> W [color=green]; + d_b -> OPT_b -> b [color=green]; + + ////////// Groupings ////////// + + subgraph clusterMSE { + style=invis; + MSE; + MSE_Grad; + } + + subgraph clusterFC { + style=invis; + FC; + FC_Grad; + } +} diff --git a/doc/fluid/images/graph_construction_example_all.png b/doc/fluid/images/graph_construction_example_all.png new file mode 100644 index 0000000000000000000000000000000000000000..261611a5721f9aa97874f7e6d897fe48cf667db2 Binary files /dev/null and b/doc/fluid/images/graph_construction_example_all.png differ diff --git a/doc/fluid/images/graph_construction_example_forward_backward.png b/doc/fluid/images/graph_construction_example_forward_backward.png new file mode 100644 index 0000000000000000000000000000000000000000..4c69687f4a6a181138f3df72ce5e8aa48487b5be Binary files /dev/null and b/doc/fluid/images/graph_construction_example_forward_backward.png differ diff --git a/doc/fluid/images/graph_construction_example_forward_only.png b/doc/fluid/images/graph_construction_example_forward_only.png new file mode 100644 index 0000000000000000000000000000000000000000..e668c16e0cac73acb4e5dc2b1827557ae77126b4 Binary files /dev/null and b/doc/fluid/images/graph_construction_example_forward_only.png differ diff --git a/doc/fluid/images/l1_regularization.png b/doc/fluid/images/l1_regularization.png new file mode 100644 index 0000000000000000000000000000000000000000..e1b9c7a44f94dc027598a98da93ddb8133190972 Binary files /dev/null and b/doc/fluid/images/l1_regularization.png differ diff --git a/doc/fluid/images/l2_regularization.png b/doc/fluid/images/l2_regularization.png new file mode 100644 index 0000000000000000000000000000000000000000..d5c2fcbc2ccae75ad083162e5a2dceb0210be298 Binary files /dev/null and b/doc/fluid/images/l2_regularization.png differ diff --git a/doc/fluid/images/local-graph.graffle b/doc/fluid/images/local-graph.graffle new file mode 100644 index 0000000000000000000000000000000000000000..19e509bd9af3c1e9a3f5e0f16ddd281457a339c5 Binary files /dev/null and b/doc/fluid/images/local-graph.graffle differ diff --git a/doc/fluid/images/local-graph.png b/doc/fluid/images/local-graph.png new file mode 100644 index 0000000000000000000000000000000000000000..ada51200f793a9bb18911e7d63cfdb3244b967d7 Binary files /dev/null and b/doc/fluid/images/local-graph.png differ diff --git a/doc/fluid/images/local_architecture.graffle b/doc/fluid/images/local_architecture.graffle new file mode 100644 index 0000000000000000000000000000000000000000..49fcc663ebe3824aa234e3a67aadf285cb417877 Binary files /dev/null and b/doc/fluid/images/local_architecture.graffle differ diff --git a/doc/fluid/images/local_architecture.png b/doc/fluid/images/local_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..14adc9fd72b855bb9f74fbf2c84ac9ec0cf2b122 Binary files /dev/null and b/doc/fluid/images/local_architecture.png differ diff --git a/doc/fluid/images/lookup_table.png b/doc/fluid/images/lookup_table.png new file mode 100644 index 0000000000000000000000000000000000000000..72dfe3547f731d0d090338afb206b0549dff472e Binary files /dev/null and b/doc/fluid/images/lookup_table.png differ diff --git a/doc/fluid/images/lookup_table_training.png b/doc/fluid/images/lookup_table_training.png new file mode 100644 index 0000000000000000000000000000000000000000..cc7cc4aeb3b885850fe2f70f19fb84d5873bed1e Binary files /dev/null and b/doc/fluid/images/lookup_table_training.png differ diff --git a/doc/fluid/images/loss_equation.png b/doc/fluid/images/loss_equation.png new file mode 100644 index 0000000000000000000000000000000000000000..14212ec8d36c803de96bde8a9a4b5591bd20434e Binary files /dev/null and b/doc/fluid/images/loss_equation.png differ diff --git a/doc/fluid/images/multi-threads.graffle b/doc/fluid/images/multi-threads.graffle new file mode 100644 index 0000000000000000000000000000000000000000..e71173715fff92a0a933d0c7d83599ba948552c6 Binary files /dev/null and b/doc/fluid/images/multi-threads.graffle differ diff --git a/doc/fluid/images/multi-threads@3x.png b/doc/fluid/images/multi-threads@3x.png new file mode 100644 index 0000000000000000000000000000000000000000..e40a869987dbbf5019d4cb03c1dab55b74d6c9f9 Binary files /dev/null and b/doc/fluid/images/multi-threads@3x.png differ diff --git a/doc/fluid/images/multigpu_allreduce.graffle b/doc/fluid/images/multigpu_allreduce.graffle new file mode 100644 index 0000000000000000000000000000000000000000..cb5bc420ceafe8ba4c87694d44ee4e5e4ad06779 Binary files /dev/null and b/doc/fluid/images/multigpu_allreduce.graffle differ diff --git a/doc/fluid/images/multigpu_allreduce.png b/doc/fluid/images/multigpu_allreduce.png new file mode 100644 index 0000000000000000000000000000000000000000..87a1b3e8f6dd4a713ec9df9f0037d1da04e9178a Binary files /dev/null and b/doc/fluid/images/multigpu_allreduce.png differ diff --git a/doc/fluid/images/multigpu_before_convert.graffle b/doc/fluid/images/multigpu_before_convert.graffle new file mode 100644 index 0000000000000000000000000000000000000000..6c35ab1b21fb76ceae82d3693ed0d085b5bc0855 Binary files /dev/null and b/doc/fluid/images/multigpu_before_convert.graffle differ diff --git a/doc/fluid/images/multigpu_before_convert.png b/doc/fluid/images/multigpu_before_convert.png new file mode 100644 index 0000000000000000000000000000000000000000..9c8f7711165d80a2fa3911280fdee91855a401b1 Binary files /dev/null and b/doc/fluid/images/multigpu_before_convert.png differ diff --git a/doc/fluid/images/multiple_reader.png b/doc/fluid/images/multiple_reader.png new file mode 100644 index 0000000000000000000000000000000000000000..b22126b31db4982c13fc3a0827805e6aaf955046 Binary files /dev/null and b/doc/fluid/images/multiple_reader.png differ diff --git a/doc/fluid/images/paddle-compile.graffle b/doc/fluid/images/paddle-compile.graffle new file mode 100644 index 0000000000000000000000000000000000000000..a6348cc3dbcaca923c6e794681b2edb85cb9f8f6 Binary files /dev/null and b/doc/fluid/images/paddle-compile.graffle differ diff --git a/doc/fluid/images/paddle-compile.png b/doc/fluid/images/paddle-compile.png new file mode 100644 index 0000000000000000000000000000000000000000..e0f13d551ac41afaec627a57dea79356464bf0bf Binary files /dev/null and b/doc/fluid/images/paddle-compile.png differ diff --git a/doc/fluid/images/pprof_1.png b/doc/fluid/images/pprof_1.png new file mode 100644 index 0000000000000000000000000000000000000000..8e9edbf377672d0ef40f2fc7bd39e746923550cb Binary files /dev/null and b/doc/fluid/images/pprof_1.png differ diff --git a/doc/fluid/images/pprof_2.png b/doc/fluid/images/pprof_2.png new file mode 100644 index 0000000000000000000000000000000000000000..172ba20399ba974d27f4c072425277b69b02520b Binary files /dev/null and b/doc/fluid/images/pprof_2.png differ diff --git a/doc/fluid/images/profiler.png b/doc/fluid/images/profiler.png new file mode 100644 index 0000000000000000000000000000000000000000..d57b71ca88aaba5d05584a6219d84214e285a1e1 Binary files /dev/null and b/doc/fluid/images/profiler.png differ diff --git a/doc/fluid/images/readers.png b/doc/fluid/images/readers.png new file mode 100644 index 0000000000000000000000000000000000000000..fd59168ce16c9e2a0ef45303c28c997cfd7740be Binary files /dev/null and b/doc/fluid/images/readers.png differ diff --git a/doc/fluid/images/remote_executor.graffle b/doc/fluid/images/remote_executor.graffle new file mode 100644 index 0000000000000000000000000000000000000000..41b2067311694b56d211a4f32d1b76884eeffd2d Binary files /dev/null and b/doc/fluid/images/remote_executor.graffle differ diff --git a/doc/fluid/images/remote_executor.png b/doc/fluid/images/remote_executor.png new file mode 100644 index 0000000000000000000000000000000000000000..744e2fb2e0f1bbe058e991ba7b2a09000965ee79 Binary files /dev/null and b/doc/fluid/images/remote_executor.png differ diff --git a/doc/fluid/images/rnn.dot b/doc/fluid/images/rnn.dot new file mode 100644 index 0000000000000000000000000000000000000000..c1141cd9c981bb3cbf50d8bf7a6ed210280d79a5 --- /dev/null +++ b/doc/fluid/images/rnn.dot @@ -0,0 +1,87 @@ +digraph G { + label = "simple RNN implementation" + + ranksep=2; + + //graph [nodesep=1, ranksep=1]; + + node[nodesep=1] + + subgraph cluster0 { + label = "global scope" + rankdir = TB + W + boot_memory + input + output + } + + subgraph cluster1 { + label = "step-scope 0" + rankdir = TB + memory0[label="memory"] + prememory0[label="pre-memory"] + step_input0[label="step input"] + step_output0[label="step output"] + } + + subgraph cluster2 { + label = "step-scope 1" + rankdir = TB + memory1[label="memory"] + prememory1[label="pre-memory"] + step_input1[label="step input"] + step_output1[label="step output"] + } + + subgraph cluster3 { + label = "step-scope 2" + rankdir = TB + memory2[label="memory"] + prememory2[label="pre-memory"] + step_input2[label="step input"] + step_output2[label="step output"] + } + + stepnet [shape=box] + stepnet0 [shape=box, style=dashed] + stepnet1 [shape=box, style=dashed] + stepnet2 [shape=box, style=dashed] + + + edge[color=blue] + boot_memory -> prememory0 [label="init" color="blue"] + memory0 -> prememory1 [label="copy/reference" color="blue"] + memory1 -> prememory2 [label="copy/reference" color="blue"] + + edge[color=black] + W -> stepnet0[constraint=false, style=dashed] + W -> stepnet1[constraint=false, style=dashed] + W -> stepnet2[constraint=false, style=dashed] + + memory0 -> stepnet0[style=dashed] + prememory0 -> stepnet0 -> step_output0[style=dashed] + + memory1 -> stepnet1[style=dashed] + prememory1 -> stepnet1 -> step_output1[style=dashed] + + memory2 -> stepnet2[style=dashed] + prememory2 -> stepnet2 -> step_output2[style=dashed] + + input -> step_input0 + input -> step_input1 + input -> step_input2 + + step_input0 -> stepnet0 [style=dashed] + step_input1 -> stepnet1[style=dashed] + step_input2 -> stepnet2[style=dashed] + + step_output0 -> output + step_output1 -> output + step_output2 -> output + + stepnet0 -> stepnet[style=dashed] + stepnet1 -> stepnet[style=dashed] + stepnet2 -> stepnet[style=dashed] + +} diff --git a/doc/fluid/images/rnn.jpg b/doc/fluid/images/rnn.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9867e404cf959df0dce6ded5222b466c788fb840 Binary files /dev/null and b/doc/fluid/images/rnn.jpg differ diff --git a/doc/fluid/images/rnn.png b/doc/fluid/images/rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..e139e373fe8396782044cfd936fdde624f8c66fe Binary files /dev/null and b/doc/fluid/images/rnn.png differ diff --git a/doc/fluid/images/rnn_2level_data.dot b/doc/fluid/images/rnn_2level_data.dot new file mode 100644 index 0000000000000000000000000000000000000000..1d85ae2617a915ad0ad8288d848b607cc37ad297 --- /dev/null +++ b/doc/fluid/images/rnn_2level_data.dot @@ -0,0 +1,75 @@ +digraph G { + chapter [label="chapter"] + + subgraph cluster0 { + label = "paragraph 0" + + top_rnn0[label="top rnn step 0" shape=box] + + p0 [label="paragraph 0"] + p1 [label="paragraph 1"] + } + + subgraph cluster1{ + label = "paragraph 1" + + top_rnn1[label="top rnn step 1" shape=box] + + p2 [label="paragraph 0"] + p3 [label="paragraph 1"] + } + + subgraph cluster_p0 { + label = "sentence 0" + + low_rnn0 [label="low rnn step 0" shape=box] + s00 [label="sentence 0"] + s01 [label="sentence 1"] + + low_rnn0 -> s00 + low_rnn0 -> s01 + } + + subgraph cluster_p1 { + label = "sentence 1" + low_rnn1 [label="low rnn step 1" shape=box] + s10 [label="sentence 0"] + s11 [label="sentence 1"] + low_rnn1 -> s10 + low_rnn1 -> s11 + } + + subgraph cluster_p2 { + label = "sentence 1" + low_rnn2 [label="low rnn step 0" shape=box] + s20 [label="sentence 0"] + s21 [label="sentence 1"] + low_rnn2 -> s20 + low_rnn2 -> s21 + } + + subgraph cluster_p3 { + label = "sentence 1" + low_rnn3 [label="low rnn step 1" shape=box] + s30 [label="sentence 0"] + s31 [label="sentence 1"] + low_rnn3 -> s30 + low_rnn3 -> s31 + } + + + chapter -> top_rnn0 + chapter -> top_rnn1 + + top_rnn0 -> p0 + top_rnn0 -> p1 + top_rnn1 -> p2 + top_rnn1 -> p3 + + + p0 -> low_rnn0 + p1 -> low_rnn1 + p2 -> low_rnn2 + p3 -> low_rnn3 + +} diff --git a/doc/fluid/images/rnn_2level_data.png b/doc/fluid/images/rnn_2level_data.png new file mode 100644 index 0000000000000000000000000000000000000000..4be81b2430717a6a506342a09fc26899568574c6 Binary files /dev/null and b/doc/fluid/images/rnn_2level_data.png differ diff --git a/doc/fluid/images/single-thread@3x.png b/doc/fluid/images/single-thread@3x.png new file mode 100644 index 0000000000000000000000000000000000000000..4083aebfdd45af5fbac25fa2c4176bc08c3cb44a Binary files /dev/null and b/doc/fluid/images/single-thread@3x.png differ diff --git a/doc/fluid/images/sparse_update.graffle b/doc/fluid/images/sparse_update.graffle new file mode 100644 index 0000000000000000000000000000000000000000..08d689a58f83698d8c1158ee3990ed8abf3a7a9a Binary files /dev/null and b/doc/fluid/images/sparse_update.graffle differ diff --git a/doc/fluid/images/sparse_update.png b/doc/fluid/images/sparse_update.png new file mode 100644 index 0000000000000000000000000000000000000000..8c872e6ac479f7d1b818a4a207956c43155d0ad7 Binary files /dev/null and b/doc/fluid/images/sparse_update.png differ diff --git a/doc/fluid/images/test.dot b/doc/fluid/images/test.dot new file mode 100644 index 0000000000000000000000000000000000000000..62c69b8fc8010a26a54a6ee8ef1488aad94d747a --- /dev/null +++ b/doc/fluid/images/test.dot @@ -0,0 +1,35 @@ + +digraph Test { + z -> generator -> G_img; + G_img -> discriminator -> D_f -> d_loss_f; + label0 -> d_loss_f -> d_loss; + + img -> discriminator -> D_t -> d_loss_t; + label1 -> d_loss_t -> d_loss; + + d_loss -> d_loss_t[color=red, style=dashed]; + d_loss -> d_loss_f[color=red, style=dashed]; + d_loss_t -> D_t[color=red, style=dashed]; + d_loss_f -> D_f[color=red, style=dashed]; + D_t -> discriminator[color=red, style=dashed]; + D_f -> discriminator[color=red, style=dashed]; + + D_f -> g_loss; + label2 -> g_loss; + + g_loss -> D_f[color=green, style=dashed]; + D_f -> discriminator[color=green, style=dashed]; + discriminator -> G_img[color=green, style=dashed]; + G_img -> generator[color=green, style=dashed]; + + discriminator [color=red, shape=box]; + generator [color=green, shape=box]; + z [shape=diamond]; + img [shape=diamond]; + label0 [shape=diamond]; + label1 [shape=diamond]; + label2 [shape=diamond]; + + d_loss [color=red]; + g_loss [color=green]; +} diff --git a/doc/fluid/images/test.dot.png b/doc/fluid/images/test.dot.png new file mode 100644 index 0000000000000000000000000000000000000000..4e121a40b9f7b2232d7cdda315bad15926446f55 Binary files /dev/null and b/doc/fluid/images/test.dot.png differ diff --git a/doc/fluid/images/theta_star.gif b/doc/fluid/images/theta_star.gif new file mode 100644 index 0000000000000000000000000000000000000000..dd24d33e124396be3fc410c9b12f33148f64efe2 Binary files /dev/null and b/doc/fluid/images/theta_star.gif differ diff --git a/doc/fluid/images/timeline.jpeg b/doc/fluid/images/timeline.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..38ec3f80c982857531f30a8bb0fa26ea5bf05385 Binary files /dev/null and b/doc/fluid/images/timeline.jpeg differ diff --git a/doc/fluid/images/tracing.jpeg b/doc/fluid/images/tracing.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..3a49fc4f8a401a9463b0157e2f38c164ca02dcc5 Binary files /dev/null and b/doc/fluid/images/tracing.jpeg differ diff --git a/doc/templates/conf.py.cn.in b/doc/templates/conf.py.cn.in index 260b6c9fd1b364433cae098bacea77aa7fe9e266..76b82fd97f1ed642696c4414676b694ebda9ad81 100644 --- a/doc/templates/conf.py.cn.in +++ b/doc/templates/conf.py.cn.in @@ -13,7 +13,7 @@ # serve to show the default. import sys import os, subprocess -sys.path.insert(0, os.path.abspath('@PADDLE_SOURCE_DIR@/python')) +sys.path.insert(0, os.path.abspath('@PADDLE_BINARY_DIR@/python')) import shlex from recommonmark import parser, transform import paddle diff --git a/doc/templates/conf.py.en.in b/doc/templates/conf.py.en.in index e5757b86b43001bc6090d8edd0aaa5ff4fc476ee..5aa5c1381fa3fad4ebc181c7868da03ae0138016 100644 --- a/doc/templates/conf.py.en.in +++ b/doc/templates/conf.py.en.in @@ -13,7 +13,7 @@ # serve to show the default. import sys import os, subprocess -sys.path.insert(0, os.path.abspath('@PADDLE_SOURCE_DIR@/python')) +sys.path.insert(0, os.path.abspath('@PADDLE_BINARY_DIR@/python')) import shlex from recommonmark import parser, transform import paddle diff --git a/doc/v2/CMakeLists.txt b/doc/v2/CMakeLists.txt index 82de7a3a3e1ca7724e1eda877d53454a4fa4129a..be957d37b14c618e9346251b3bd3dbaf1541773f 100644 --- a/doc/v2/CMakeLists.txt +++ b/doc/v2/CMakeLists.txt @@ -27,7 +27,7 @@ sphinx_add_target(paddle_v2_docs ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_EN}) -add_dependencies(paddle_v2_docs gen_proto_py) +add_dependencies(paddle_v2_docs gen_proto_py paddle_python) # configured documentation tools and intermediate build results set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") @@ -50,6 +50,6 @@ sphinx_add_target(paddle_v2_docs_cn ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_CN}) -add_dependencies(paddle_v2_docs_cn gen_proto_py) +add_dependencies(paddle_v2_docs_cn gen_proto_py paddle_python) add_subdirectory(api) diff --git a/doc/v2/api/CMakeLists.txt b/doc/v2/api/CMakeLists.txt index da1eafc02ed8cd155d4f0f1fbadcb7b237b6fcc1..2670a21a227546ffcee4f10f395feef3c58df9b4 100644 --- a/doc/v2/api/CMakeLists.txt +++ b/doc/v2/api/CMakeLists.txt @@ -19,4 +19,4 @@ sphinx_add_target(paddle_v2_apis ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_EN}) -add_dependencies(paddle_v2_apis gen_proto_py framework_py_proto copy_paddle_pybind) +add_dependencies(paddle_v2_apis gen_proto_py framework_py_proto copy_paddle_pybind paddle_python) diff --git a/doc/v2/howto/rnn/recurrent_group_en.md b/doc/v2/howto/rnn/recurrent_group_en.md index d264b0a9f85faffd49c1982117cb5a3ac6ffc015..de6b60f29eb97029a54609cd2194bb7faf3ffec5 100644 --- a/doc/v2/howto/rnn/recurrent_group_en.md +++ b/doc/v2/howto/rnn/recurrent_group_en.md @@ -1,3 +1,96 @@ # Recurrent Group Tutorial -TBD +## Overview + +Sequential data is common in natural language processing. + +A sentence is a sequence of words and many sentences form a paragraph further. Therefore, a paragraph can be viewed as a nested sequence with two level, where each element of the sequence is another sequence. That is to say, sequential data could be recursive. An example of two-level recursive sequential data is that an article is composed of a sequence of sentences, and each sentence a sequence of words. + +PaddlePaddle and PaddlePaddle v2 support two-level recursive sequential data. The two-level sequence is a very flexible data, which helps us to better describe more complex language data such as discribing paragraphs and several rounds of dialogues. Based on two-level sequence input, we can design and build a flexible, hierarchical RNN model that encodes input data from the word and sentence level. For the support of arbitrary levels, please refer to PaddlePaddle Fluid. + +In PaddlePaddle, `recurrent_group` is an arbitrarily complex RNN unit. The user only needs to define the calculation that the RNN will complete in one time step. PaddlePaddle is responsible for the propagation of information and error in time series. + +Furthermore, `recurrent_group` can also be extended to handle two-level sequence. By defining two nested `recurrent_group` operations at the clause level and the word level respectively, a hierarchical and complex RNN is finally achieved. + +Currently, in the PaddlePaddle, there are `recurrent_group` and some Layers that can process bidirectional sequences. For details, refer to the document: Layers for supporting double-layer sequences as input. + +## Related Concepts + +### Basic Principle +`recurrent_group` is an arbitrarily complex RNN unit supported by PaddlePaddle. The user only needs to focus on the calculations that the RNN is designed to complete within a single time step. The PaddlePaddle is responsible for completing the propagation of information and gradients over time. + +In PaddlePaddle, a simple call to `recurrent_group` is as follows: + +``` python +recurrent_group(step, input, reverse) +``` +- step: A callable function that defines the calculations completed by the RNN unit within a time step +- input: The input must be a single-layer sequence or a double-layer sequence +- reverse: Whether to process the input sequence in reverse order + +The core of using `recurrent_group` is to design the logic of the step function. The step function can be freely combined with various layers supported by PaddlePaddle to complete arbitrary arithmetic logic. The input of `recurrent_group` (input) becomes the input of the step function. Since the step function only focuses on the calculation within one time step of RNN, here `recurrent_group` completes the splitting of the original input data for us. + +### Input +The input sequence processed by `recurrent_group` is mainly divided into the following three types: + +- **Input Data**: When putting a two-level sequence into `recurrent_group`, it will be disassembled into a single-level sequence. When putting a single-level sequence into `recurrent_group`, it will be disassembled into a non-sequence and then passed to the step function. This process is completely transparent to the user. There are two possible types: 1) User input via data_layer; 2) Output from other layers. + +- **Read-only Memory Input**: `StaticInput` defines a read-only Memory. The input specified by `StaticInput` will not be disassembled by `recurrent_group`, and each time step of the `recurrent_group` loop will always be able to reference all inputs. It may be a non-sequence or a single-layer sequence. + +- **Input of Sequence Generation Task**: `GeneratedInput` is only used to specify input data in a sequence generation task. + +### Input Example + +Sequence generation tasks mostly follow the encoder-decoer architecture. The encoder and decoder can be arbitrary neural network units capable of processing sequences and RNN is the most popular choice. + +Given the encoder output and the current word, the decoder predicts the next most likely word each time. In this structure, the decoder accepts two inputs: + +- Target sequence to be generated: a input of the decoder and the basis of the decoder loop. `recurrent_group` will disassemble this input type. + +- Encoder output, an non-sequencce or single-sequence: a unbounded memory. Each time step in the decoder loop will reference the entire result and should not be disassembled. This type of input must be specified via `StaticInput`. For more discussion on Unbounded Memory, please refer to the paper [Neural Turning Machine](https://arxiv.org/abs/1410.5401). + +In a sequence generation task, the decoder RNN always refers to the word vector of the word predicted at the previous moment as the current time input. `GeneratedInput` will automate this process. + +### Output +The `step` function must return the output of one or more Layers. The output of this Layer will be the final output of the entire `recurrent_group`. In the output process, `recurrent_group` will concatenate the output of each time step, which is also transparent to the user. + +### Memory +Memory can only be defined and used in `recurrent_group`. Memory cannot exist independently and must point to a layer defined by PaddlePaddle. Memory is referenced to get a momentary output from this layer, so memory can be interpreted as a delay operation. + +The user can explicitly specify the output of a layer to initialize the memory. When not specified, memory is initialized to 0 by default. + +## Sequence-level RNN Introduction + +`recurrent_group` helps us to split the input sequence, merge the output, and loop through the sequence of computational logic. + +Using this feature, the two nested `recurrent_group` can handle the nested two-level sequences, implementing sequence-level RNN structures at both the word and sentence levels. + +- Word-level RNN: each state corresponds to a word. +- Sequence-level RNN: a sequence-layer RNN consists of multiple word-layer RNNs. Each word-layer RNN (ie, each state of a sequence-layer RNN) has a subsequence. + +For convenience of description, the following takes the NLP task as an example. A paragraph containing a subsequence is defined as a two-level sequence, and a sentence containing a word is defined as a single-layer sequence. Then, the zero-level sequence is a word. + +## Usage of Sequence-level RNN + +### Usage of Training Process +Using `recurrent_group` requires the following conventions: + +- **Single-input Single-output**: Both input and output are single layer sequences. + - If there are multiple inputs, the number of words in different input sequences must be exactly equal. + - A single-layer sequence is output, and the number of words in the output sequence is the same as the input sequence. + - memory: define memory to point to a layer in the step function, get a moment output from this layer by referencing memory to form a recurrent connection. The is_seq parameter of memory must be false. If memory is not defined, the operations within each time step are independent. + - boot_layer: the initial state of memory, set 0 by default. is_seq in memory must be false. + +- **Double-input Double-output**: Both input and output are two-level sequence. + - If there are multiple input sequences, the number of subsequence contained in different inputs must be strictly equal, but the number of words in the subsequence may not be equal. + - output a two-level sequence. The number of subsequence and the number of words are the same as the specified input sequence and the first input is default. + - memory: defining memory in the step function, pointing to a layer, by referring to the memory to get the output of this layer at a time, forming a recurrent connection. The memory defined in the outer `recurrent_group` step function can record the state of the previous subsequence, either as a single-level sequence (only as read-only memory) or as a word. If memory is not defined, the operations between subsequence are independent. + - boot_layer: the initial state of memory. It is either a single-level sequence (only as read-only memory) or a vector. The default is not set, that is, the initial state is 0. + +- **Double-input Single-output**: not support for now, and output the error with "In hierachical RNN, all out links should be from sequences now". + +### Usage of Generation Process +Using `beam_search` need follow those conventions: + +- Word-level RNN: generate the next word from a word. +- Sequence-level RNN: the single-layer RNN generated subsequence is concatenated into a new double-layer sequence. Semantically, there is no case where a subsequence generates the next subseq directly. diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index cf84568ecdf1227b0d0ed3606a4a9a6e5186af72..06e1f5d5f0884efabfcdf917ca5c35d94ad5dce9 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -89,16 +89,17 @@ SWIG_LINK_LIBRARIES(swig_paddle ${START_END} ) -add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/paddle/py_paddle/_swig_paddle.so - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/swig_paddle.py ${PADDLE_SOURCE_DIR}/paddle/py_paddle - COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/_swig_paddle.so ${PADDLE_SOURCE_DIR}/paddle/py_paddle - COMMAND ${CMAKE_COMMAND} -E touch .timestamp +add_custom_command(OUTPUT ${PADDLE_BINARY_DIR}/python/py_paddle/_swig_paddle.so + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/py_paddle + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/swig_paddle.py ${PADDLE_BINARY_DIR}/python/py_paddle + COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/_swig_paddle.so ${PADDLE_BINARY_DIR}/python/py_paddle + COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/.timestamp WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle DEPENDS _swig_paddle ) # TODO(yuyang18) : make wheel name calculated by cmake -add_custom_target(python_api_wheel ALL DEPENDS ${PADDLE_SOURCE_DIR}/paddle/py_paddle/_swig_paddle.so) +add_custom_target(python_api_wheel ALL DEPENDS ${PADDLE_BINARY_DIR}/python/py_paddle/_swig_paddle.so) if(WITH_TESTING) IF(NOT PY_PIP_FOUND) diff --git a/paddle/api/test/CMakeLists.txt b/paddle/api/test/CMakeLists.txt index 761aeb5b174105edece8880a9f5012c13a63fd11..13cb79129cc2272d215cdb475fb146b37266699e 100644 --- a/paddle/api/test/CMakeLists.txt +++ b/paddle/api/test/CMakeLists.txt @@ -1,3 +1,8 @@ +add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/testTrain.py + COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/*.py ${CMAKE_CURRENT_BINARY_DIR} +) +add_custom_target(copy_api_test ALL DEPENDS testTrain.py) + py_test(testTrain SRCS testTrain.py) py_test(testMatrix SRCS testMatrix.py) py_test(testVector SRCS testVector.py) diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h index 63ec51564793ca2255032d0efbe2c47326f8b698..b790fa39fe863bbb00f6cd36d4c63481b7634fe1 100644 --- a/paddle/cuda/include/hl_cnn.h +++ b/paddle/cuda/include/hl_cnn.h @@ -370,4 +370,48 @@ extern void hl_maxout_backward(real* inGrad, size_t featLen, size_t groups); +/** + * @brief Upsample forward. + * @param[in] inputData input data. + * @param[out] maskData the mask data from MaxPoolWithMaskLayer. + * @param[out] batchSize the batch size of the input. + * @param[in] imgSizeH image height. + * @param[in] imgSizeW image width. + * @param[in] channels the input channels. + * @param[in] outputH the output height. + * @param[in] outputW the output widht. + * @param[out] outputData output data. + */ +extern void hl_upsample_forward(real* inputData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* outputData); + +/** + * @brief Upsample backward. + * @param[in] outputGradData the output grad data. + * @param[out] maskData the mask data from MaxPoolWithMaskLayer. + * @param[out] batchSize the batch size of the input. + * @param[in] imgSizeH image height. + * @param[in] imgSizeW image width. + * @param[in] channels the input channels. + * @param[in] outputH the output height. + * @param[in] outputW the output widht. + * @param[out] inputGradData the input grad data. + */ +extern void hl_upsample_backward(real* outputGradData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* inputGradData); + #endif // HL_CNN_H_ diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/cuda/include/stub/hl_cnn_stub.h index c39bd3228d3f2ea7495cd21f5ff60bdfbbd2b51d..997eed62e07827f375c7441554b397fdd0bd6a80 100644 --- a/paddle/cuda/include/stub/hl_cnn_stub.h +++ b/paddle/cuda/include/stub/hl_cnn_stub.h @@ -224,4 +224,24 @@ inline void hl_maxout_backward(real* inGrad, size_t featLen, size_t group) {} +inline void hl_upsample_forward(real* inputData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* outputData) {} + +inline void hl_upsample_backward(real* outputGradData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* inputGradData) {} + #endif // HL_CNN_STUB_H_ diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu index a4459243e8a7c8be58be2255faf89e29817fbdf5..bac743a293cc97b114281e510d06367a86536452 100644 --- a/paddle/cuda/src/hl_cuda_cnn.cu +++ b/paddle/cuda/src/hl_cuda_cnn.cu @@ -1028,3 +1028,79 @@ void hl_maxout_backward(real* inGrad, num_kernels, inGrad, outGrad, idData, size, featLen, groups); CHECK_SYNC("hl_maxout_backward failed"); } + +__global__ void upsampleForwardCompute(real* input_data, + real* mask_data, + size_t nthreads, + size_t in_h, + size_t in_w, + size_t out_h, + size_t out_w, + real* output_data) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < nthreads) { + int offset = index / (in_w * in_h) * out_h * out_w; + int upsample_idx = static_cast(mask_data[index]); + output_data[offset + upsample_idx] = input_data[index]; + } +} + +__global__ void upsampleBackwardCompute(real* out_grad, + real* mask_data, + size_t nthreads, + size_t in_h, + size_t in_w, + size_t out_h, + size_t out_w, + real* input_grad) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < nthreads) { + int offset = index / (in_w * in_h) * out_h * out_w; + int upsample_idx = static_cast(mask_data[index]); + input_grad[index] = out_grad[offset + upsample_idx]; + } +} + +void hl_upsample_forward(real* inputData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* outputData) { + int num_kernels = batchSize * imgSizeH * imgSizeW * channels; + int blocks = (num_kernels + 1024 - 1) / 1024; + upsampleForwardCompute<<>>(inputData, + maskData, + num_kernels, + imgSizeH, + imgSizeW, + outputH, + outputW, + outputData); + CHECK_SYNC("hl_upsample_forward failed"); +} + +void hl_upsample_backward(real* outputGradData, + real* maskData, + size_t batchSize, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW, + real* inputGradData) { + int num_kernels = batchSize * imgSizeH * imgSizeW * channels; + int blocks = (num_kernels + 1024 - 1) / 1024; + upsampleBackwardCompute<<>>(outputGradData, + maskData, + num_kernels, + imgSizeH, + imgSizeW, + outputH, + outputW, + inputGradData); + CHECK_SYNC("hl_upsample_backward failed"); +} diff --git a/paddle/fluid/framework/.clang-format b/paddle/fluid/.clang-format similarity index 100% rename from paddle/fluid/framework/.clang-format rename to paddle/fluid/.clang-format diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index c425c71160a8fa3830a5fbdae1baaed850710877..a473ed7400012b7d0cbc5ab9bed263b3cca8c6ec 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -74,8 +74,8 @@ py_proto_compile(framework_py_proto SRCS framework.proto) add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_dependencies(framework_py_proto framework_py_proto_init) add_custom_command(TARGET framework_py_proto POST_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto - COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto/ + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto + COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/ COMMENT "Copy generated python proto into directory paddle/fluid/proto." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index 468423e0e8e7b8c9ebc14b7568c9c3bd21645ea7..873969b2a884f6d9e133fe87bf72725c36ce8b98 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include +#include #include #include @@ -96,6 +97,8 @@ class BlockDesc { */ void RemoveOp(size_t s, size_t e); + void RemoveVar(const std::string &name) { vars_.erase(name); } + std::vector AllOps() const; size_t OpSize() const { return ops_.size(); } diff --git a/paddle/fluid/framework/channel.h b/paddle/fluid/framework/channel.h index 019bea600f496a6b58579ad0aa8af836cd6134a9..722bf8e8ecba0c9cbc5e3ad737dbf73148d2873c 100644 --- a/paddle/fluid/framework/channel.h +++ b/paddle/fluid/framework/channel.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include // for size_t -#include +#include // for size_t +#include // NOLINT #include #include "paddle/fluid/platform/enforce.h" @@ -216,7 +216,8 @@ class ChannelHolder { template struct PlaceholderImpl : public Placeholder { - PlaceholderImpl(size_t buffer_size) : type_(std::type_index(typeid(T))) { + explicit PlaceholderImpl(size_t buffer_size) + : type_(std::type_index(typeid(T))) { channel_.reset(MakeChannel(buffer_size)); } diff --git a/paddle/fluid/framework/channel_impl.h b/paddle/fluid/framework/channel_impl.h index e056779ea0dd0a31191b628f82724298efaf50ff..26d454534e1ae38c4f83376c0836a45781ea9101 100644 --- a/paddle/fluid/framework/channel_impl.h +++ b/paddle/fluid/framework/channel_impl.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include // for size_t #include -#include +#include // NOLINT #include #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/platform/enforce.h" @@ -38,7 +38,7 @@ class ChannelImpl : public paddle::framework::Channel { virtual void Unlock(); virtual bool IsClosed(); virtual void Close(); - ChannelImpl(size_t); + explicit ChannelImpl(size_t); virtual ~ChannelImpl(); virtual void AddToSendQ(const void *referrer, T *data, @@ -60,7 +60,7 @@ class ChannelImpl : public paddle::framework::Channel { const void *referrer; // TODO(thuan): figure out better way to do this std::function callback; - QueueMessage(T *item) + explicit QueueMessage(T *item) : data(item), cond(std::make_shared()) {} QueueMessage(T *item, std::shared_ptr cond) @@ -88,15 +88,15 @@ class ChannelImpl : public paddle::framework::Channel { } std::shared_ptr get_first_message( - std::deque> &queue, ChannelAction action) { - while (!queue.empty()) { + std::deque> *queue, ChannelAction action) { + while (!queue->empty()) { // Check whether this message was added by Select // If this was added by Select then execute the callback // to check if you can execute this message. The callback // can return false if some other case was executed in Select. // In that case just discard this QueueMessage and process next. - std::shared_ptr m = queue.front(); - queue.pop_front(); + std::shared_ptr m = queue->front(); + queue->pop_front(); if (m->callback == nullptr || m->callback(action)) return m; } return nullptr; @@ -147,7 +147,7 @@ void ChannelImpl::Send(T *item) { // to send to the receiver, bypassing the channel buffer if any if (!recvq.empty()) { std::shared_ptr m = - get_first_message(recvq, ChannelAction::SEND); + get_first_message(&recvq, ChannelAction::SEND); if (m != nullptr) { *(m->data) = std::move(*item); @@ -198,7 +198,7 @@ bool ChannelImpl::Receive(T *item) { // buffer and move front of send queue to the buffer if (!sendq.empty()) { std::shared_ptr m = - get_first_message(sendq, ChannelAction::RECEIVE); + get_first_message(&sendq, ChannelAction::RECEIVE); if (buf_.size() > 0) { // Case 1 : Channel is Buffered // Do Data transfer from front of buffer @@ -219,8 +219,9 @@ bool ChannelImpl::Receive(T *item) { if (m != nullptr) { *item = std::move(*(m->data)); m->Notify(); - } else + } else { return recv_return(Receive(item)); + } } return recv_return(true); } diff --git a/paddle/fluid/framework/channel_test.cc b/paddle/fluid/framework/channel_test.cc index 1184bfdae1940286fb72d9091ae4f23ff7f84a54..542d791f6bbdf7d68a4786998ccc0233fff6473d 100644 --- a/paddle/fluid/framework/channel_test.cc +++ b/paddle/fluid/framework/channel_test.cc @@ -14,8 +14,8 @@ limitations under the License. */ #include "paddle/fluid/framework/channel.h" -#include -#include +#include // NOLINT +#include // NOLINT #include "gtest/gtest.h" using paddle::framework::Channel; @@ -166,9 +166,9 @@ TEST(Channel, ConcurrentSendNonConcurrentReceiveWithSufficientBufferSize) { std::thread t([&]() { // Try to write more than buffer size. for (size_t i = 0; i < 2 * buffer_size; ++i) { - if (i < buffer_size) + if (i < buffer_size) { ch->Send(&i); // should block after 10 iterations - else { + } else { bool is_exception = false; try { ch->Send(&i); @@ -212,12 +212,12 @@ TEST(Channel, RecevingOrderEqualToSendingOrderWithBufferedChannel3) { } void ChannelCloseUnblocksReceiversTest(Channel *ch) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; // Launches threads that try to read and are blocked because of no writers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -230,7 +230,7 @@ void ChannelCloseUnblocksReceiversTest(Channel *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait 0.2 sec // Verify that all the threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } @@ -241,21 +241,21 @@ void ChannelCloseUnblocksReceiversTest(Channel *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait 0.2 sec // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } void ChannelCloseUnblocksSendersTest(Channel *ch, bool isBuffered) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; - bool send_success[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; + bool send_success[kNumThreads]; // Launches threads that try to write and are blocked because of no readers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; send_success[i] = false; t[i] = std::thread( @@ -277,13 +277,13 @@ void ChannelCloseUnblocksSendersTest(Channel *ch, bool isBuffered) { if (isBuffered) { // If ch is Buffered, atleast 4 threads must be blocked. int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (!thread_ended[i]) ct++; } EXPECT_GE(ct, 4); } else { // If ch is UnBuffered, all the threads should be blocked. - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } } @@ -294,21 +294,21 @@ void ChannelCloseUnblocksSendersTest(Channel *ch, bool isBuffered) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } if (isBuffered) { // Verify that only 1 send was successful int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (send_success[i]) ct++; } // Only 1 send must be successful EXPECT_EQ(ct, 1); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } // This tests that closing a buffered channel also unblocks @@ -409,13 +409,13 @@ TEST(Channel, UnbufferedMoreReceiveLessSendTest) { // This tests that destroying a channel unblocks // any senders waiting for channel to have write space void ChannelDestroyUnblockSenders(Channel *ch, bool isBuffered) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; - bool send_success[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; + bool send_success[kNumThreads]; // Launches threads that try to write and are blocked because of no readers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; send_success[i] = false; t[i] = std::thread( @@ -438,14 +438,14 @@ void ChannelDestroyUnblockSenders(Channel *ch, bool isBuffered) { if (isBuffered) { // If channel is buffered, verify that atleast 4 threads are blocked int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (thread_ended[i] == false) ct++; } // Atleast 4 threads must be blocked EXPECT_GE(ct, 4); } else { // Verify that all the threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } } @@ -454,13 +454,13 @@ void ChannelDestroyUnblockSenders(Channel *ch, bool isBuffered) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } // Count number of successful sends int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (send_success[i]) ct++; } @@ -473,18 +473,18 @@ void ChannelDestroyUnblockSenders(Channel *ch, bool isBuffered) { } // Join all threads - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } // This tests that destroying a channel also unblocks // any receivers waiting on the channel void ChannelDestroyUnblockReceivers(Channel *ch) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; // Launches threads that try to read and are blocked because of no writers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -498,18 +498,18 @@ void ChannelDestroyUnblockReceivers(Channel *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait // Verify that all threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } // delete the channel delete ch; std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } TEST(Channel, BufferedChannelDestroyUnblocksReceiversTest) { @@ -679,12 +679,12 @@ TEST(ChannelHolder, TypeMismatchReceiveTest) { } void ChannelHolderCloseUnblocksReceiversTest(ChannelHolder *ch) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; // Launches threads that try to read and are blocked because of no writers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -697,7 +697,7 @@ void ChannelHolderCloseUnblocksReceiversTest(ChannelHolder *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait 0.2 sec // Verify that all the threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } @@ -708,21 +708,21 @@ void ChannelHolderCloseUnblocksReceiversTest(ChannelHolder *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait 0.2 sec // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } void ChannelHolderCloseUnblocksSendersTest(ChannelHolder *ch, bool isBuffered) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; - bool send_success[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; + bool send_success[kNumThreads]; // Launches threads that try to write and are blocked because of no readers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; send_success[i] = false; t[i] = std::thread( @@ -744,13 +744,13 @@ void ChannelHolderCloseUnblocksSendersTest(ChannelHolder *ch, bool isBuffered) { if (isBuffered) { // If ch is Buffered, atleast 4 threads must be blocked. int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (!thread_ended[i]) ct++; } EXPECT_GE(ct, 4); } else { // If ch is UnBuffered, all the threads should be blocked. - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } } @@ -761,21 +761,21 @@ void ChannelHolderCloseUnblocksSendersTest(ChannelHolder *ch, bool isBuffered) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } if (isBuffered) { // Verify that only 1 send was successful int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (send_success[i]) ct++; } // Only 1 send must be successful EXPECT_EQ(ct, 1); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } // This tests that closing a channelholder unblocks @@ -813,13 +813,13 @@ TEST(Channel, ChannelHolderCloseUnblocksSendersTest) { // This tests that destroying a channelholder unblocks // any senders waiting for channel void ChannelHolderDestroyUnblockSenders(ChannelHolder *ch, bool isBuffered) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; - bool send_success[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; + bool send_success[kNumThreads]; // Launches threads that try to write and are blocked because of no readers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; send_success[i] = false; t[i] = std::thread( @@ -841,14 +841,14 @@ void ChannelHolderDestroyUnblockSenders(ChannelHolder *ch, bool isBuffered) { if (isBuffered) { // If channel is buffered, verify that atleast 4 threads are blocked int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (thread_ended[i] == false) ct++; } // Atleast 4 threads must be blocked EXPECT_GE(ct, 4); } else { // Verify that all the threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } } @@ -857,13 +857,13 @@ void ChannelHolderDestroyUnblockSenders(ChannelHolder *ch, bool isBuffered) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } // Count number of successfuld sends int ct = 0; - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { if (send_success[i]) ct++; } @@ -876,18 +876,18 @@ void ChannelHolderDestroyUnblockSenders(ChannelHolder *ch, bool isBuffered) { } // Join all threads - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } // This tests that destroying a channelholder also unblocks // any receivers waiting on the channel void ChannelHolderDestroyUnblockReceivers(ChannelHolder *ch) { - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; + const size_t kNumThreads = 5; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; // Launches threads that try to read and are blocked because of no writers - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -901,18 +901,18 @@ void ChannelHolderDestroyUnblockReceivers(ChannelHolder *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads are blocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], false); } // delete the channel delete ch; std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } TEST(ChannelHolder, ChannelHolderDestroyUnblocksReceiversTest) { @@ -945,12 +945,12 @@ TEST(ChannelHolder, ChannelHolderDestroyUnblocksSendersTest) { // This tests that closing a channelholder many times. void ChannelHolderManyTimesClose(ChannelHolder *ch) { - const int num_threads = 15; - std::thread t[num_threads]; - bool thread_ended[num_threads]; + const int kNumThreads = 15; + std::thread t[kNumThreads]; + bool thread_ended[kNumThreads]; // Launches threads that try to send data to channel. - for (size_t i = 0; i < num_threads / 3; i++) { + for (size_t i = 0; i < kNumThreads / 3; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *ended) { @@ -962,7 +962,7 @@ void ChannelHolderManyTimesClose(ChannelHolder *ch) { } // Launches threads that try to receive data to channel. - for (size_t i = num_threads / 3; i < 2 * num_threads / 3; i++) { + for (size_t i = kNumThreads / 3; i < 2 * kNumThreads / 3; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -976,7 +976,7 @@ void ChannelHolderManyTimesClose(ChannelHolder *ch) { } // Launches threads that try to close the channel. - for (size_t i = 2 * num_threads / 3; i < num_threads; i++) { + for (size_t i = 2 * kNumThreads / 3; i < kNumThreads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { @@ -991,13 +991,13 @@ void ChannelHolderManyTimesClose(ChannelHolder *ch) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait // Verify that all threads are unblocked - for (size_t i = 0; i < num_threads; i++) { + for (size_t i = 0; i < kNumThreads; i++) { EXPECT_EQ(thread_ended[i], true); } EXPECT_TRUE(ch->IsClosed()); // delete the channel delete ch; - for (size_t i = 0; i < num_threads; i++) t[i].join(); + for (size_t i = 0; i < kNumThreads; i++) t[i].join(); } TEST(ChannelHolder, ChannelHolderManyTimesCloseTest) { diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index bf1a705ef50b663efa53393ead1f81fd6bcf8c48..89b5c6847f15b3f2a270fe1e7db9e590549e8982 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -16,6 +16,6 @@ else() endif() cc_library(multi_devices_graph_builder SRCS multi_devices_graph_builder.cc DEPS ssa_graph_builder computation_op_handle scale_loss_grad_op_handle ${multi_devices_graph_builder_deps}) -cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS ssa_graph) +cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS ssa_graph framework_proto) cc_library(threaded_ssa_graph_executor SRCS threaded_ssa_graph_executor.cc DEPS fetch_op_handle ssa_graph_executor scope simple_threadpool device_context) diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index c277bd7cb69bba899296efe64107ee538c4aa847..128a5344fbb8c64c36ade24475bd0d99bdb3e0f5 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -21,6 +21,9 @@ #include "paddle/fluid/framework/details/nccl_all_reduce_op_handle.h" #endif +#include +#include + namespace paddle { namespace framework { namespace details { @@ -168,6 +171,11 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( */ PolishGraphToSupportDataHazards(&result); + /* + * Only variables should be the leaves of graph. + */ + AddOutputToLeafOps(&result); + if (VLOG_IS_ON(10)) { std::ostringstream sout; PrintGraphviz(*graph, sout); diff --git a/paddle/fluid/framework/details/ssa_graph_builder.cc b/paddle/fluid/framework/details/ssa_graph_builder.cc index 361ba6d39721eed406a30fea325b3b4508ec45d0..0a4febd22f3feefdcac99cafc2cb58269380d192 100644 --- a/paddle/fluid/framework/details/ssa_graph_builder.cc +++ b/paddle/fluid/framework/details/ssa_graph_builder.cc @@ -136,6 +136,17 @@ void SSAGraphBuilder::PrintGraphviz(const SSAGraph &graph, std::ostream &sout) { sout << "}\n"; } + +void SSAGraphBuilder::AddOutputToLeafOps(SSAGraph *graph) { + for (auto &op : graph->ops_) { + if (!op->outputs_.empty()) { + continue; + } + auto *dummy_leaf = new DummyVarHandle(); + graph->dep_vars_.emplace(dummy_leaf); + op->AddOutput(dummy_leaf); + } +} } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph_builder.h b/paddle/fluid/framework/details/ssa_graph_builder.h index bf20e7164a100718c1dcfe3ef971cfff60bbbaa2..be1f0460e45402806b18835f054a7195df1374cc 100644 --- a/paddle/fluid/framework/details/ssa_graph_builder.h +++ b/paddle/fluid/framework/details/ssa_graph_builder.h @@ -14,13 +14,13 @@ #pragma once +#include +#include + #include "paddle/fluid/framework/details/ssa_graph.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/platform/place.h" -#include -#include - namespace paddle { namespace framework { namespace details { @@ -52,6 +52,8 @@ class SSAGraphBuilder { const std::string &each_var_name, const platform::Place &place, size_t place_offset); + static void AddOutputToLeafOps(SSAGraph *graph); + static void PrintGraphviz(const SSAGraph &graph, std::ostream &sout); }; } // namespace details diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 1f96b9dc6235a18f7566c98cca60baa964e6aa56..596e5731868630cebc3cf51b2e78d4deb39a9b33 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -87,7 +87,6 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( // Step 2. Insert FetchOps std::vector> fetch_ops; - std::vector dummy_vars; FeedFetchList fetch_data(fetch_tensors.size()); std::unordered_map> fetched_vars; @@ -101,13 +100,13 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( } } + std::unordered_set> fetch_dependencies; for (size_t i = 0; i < fetch_tensors.size(); ++i) { auto &var_name = fetch_tensors[i]; auto &vars = fetched_vars.at(var_name); auto *op = new FetchOpHandle(&fetch_data, i, &local_scopes_); fetch_ops.emplace_back(op); - // FIXME: Use new device context for (auto &p : places_) { op->dev_ctxes_[p] = fetch_ctxs_.Get(p); } @@ -115,6 +114,11 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( for (auto *var : vars) { op->AddInput(var); } + + auto *fetch_dummy = new DummyVarHandle(); + op->AddOutput(fetch_dummy); + fetch_dependencies.emplace(fetch_dummy); + InsertPendingVar(*fetch_dummy); InsertPendingOp(*op); } diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 64c06687b6b905186d4efcc8441d3abef6323d53..16a118090ba9cfd50b4b03484983f9fc73cf7973 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -279,6 +279,21 @@ std::unique_ptr Executor::Prepare( return std::unique_ptr(ctx); } +std::vector> Executor::Prepare( + const ProgramDesc& program, const std::vector& block_ids) { + std::vector> result; + for (auto& bid : block_ids) { + auto* ctx = new ExecutorPrepareContext(program, bid); + PADDLE_ENFORCE_LT(static_cast(bid), program.Size()); + auto& block = program.Block(bid); + for (auto& op_desc : block.AllOps()) { + ctx->ops_.push_back(OpRegistry::CreateOp(*op_desc)); + } + result.push_back(std::shared_ptr(ctx)); + } + return result; +} + void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, bool create_local_scope, bool create_vars) { auto& block = ctx->prog_.Block(ctx->block_id_); diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 7173c51c95e04ad3095f01bb24923a7a3341c517..d7c99165f0c9d3b1ae11a3b4753a61e8118f7b52 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -61,6 +61,9 @@ class Executor { static std::unique_ptr Prepare( const ProgramDesc& program, int block_id); + static std::vector> Prepare( + const ProgramDesc& program, const std::vector& block_ids); + void RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, bool create_local_scope = true, bool create_vars = true); diff --git a/paddle/fluid/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h index dee505fee0dccd8d60bb290a8bec4df243e504a2..4f130d265900483ec7a7c541f2610d17a352913f 100644 --- a/paddle/fluid/framework/lod_tensor.h +++ b/paddle/fluid/framework/lod_tensor.h @@ -142,6 +142,7 @@ class LoDTensor : public Tensor { return (lod_)[level].size() - 1; } + // Split LoDTensor and copy to each place specified in places. std::vector SplitLoDTensor( const std::vector places) const; diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index f6a43804ef2fd73c4a2c2c3b3dfbb90bff1c451b..a3b4a8c0829ae3324e933309b2eaea35fe571997 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -35,6 +35,17 @@ std::vector> kKernelPriority = { std::make_tuple(platform::CPUPlace(), LibraryType::kPlain), }; +proto::VarType::Type GetDataTypeOfVar(const Variable* var) { + if (var->IsType()) { + return framework::ToDataType(var->Get().type()); + } else if (var->IsType()) { + return framework::ToDataType( + var->Get().value().type()); + } else { + PADDLE_THROW("Var should be LoDTensor or SelectedRows"); + } +} + static DDim GetDims(const Scope& scope, const std::string& name) { Variable* var = scope.FindVar(name); if (var == nullptr) { diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 41214b41cb68cbd7049552f39195ae5257e0d06f..b7a7c69b4c8493f945926c75797c49d327a3197e 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -61,6 +61,8 @@ inline std::string GradVarName(const std::string& var_name) { return var_name + kGradVarSuffix; } +proto::VarType::Type GetDataTypeOfVar(const Variable* var); + class OperatorBase; class ExecutionContext; diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 17885143247f0e0db8f12931e3c3412e7114ef3d..7be93fa6002ae93c3e1b75c8f7fe5ca5f40b271f 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -150,13 +150,30 @@ void ParallelExecutor::BCastParamsToGPUs( #endif } -void ParallelExecutor::Run(const std::vector &fetch_tensors, - const std::string &fetched_var_name) { +void ParallelExecutor::Run( + const std::vector &fetch_tensors, + const std::string &fetched_var_name, + const std::unordered_map &feed_tensors) { platform::RecordBlock b(0); + SplitTensorToPlaces(feed_tensors); auto fetch_data = member_->executor_->Run(fetch_tensors); *member_->global_scope_->Var(fetched_var_name)->GetMutable() = fetch_data; } +void ParallelExecutor::SplitTensorToPlaces( + const std::unordered_map &feed_tensors) { + for (auto it : feed_tensors) { + auto lod_tensors = it.second.SplitLoDTensor(member_->places_); + for (size_t j = 0; j < member_->places_.size(); ++j) { + // TODO(panxy0718): Do I need to delete this var? + member_->local_scopes_[j] + ->Var(it.first) + ->GetMutable() + ->ShareDataWith(lod_tensors[j]); + } + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index 964b476234e622cae934d41bc3793bc3114a5f1a..c7c58b2b808383621a6d492f9188b0d36bfa6858 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -42,9 +42,13 @@ class ParallelExecutor { bool allow_op_delay); void Run(const std::vector& fetch_tensors, - const std::string& fetched_var_name = "fetched_var"); + const std::string& fetched_var_name, + const std::unordered_map& feed_tensors); private: + void SplitTensorToPlaces( + const std::unordered_map& feed_tensors); + ParallelExecutorPrivate* member_; void BCastParamsToGPUs(const ProgramDesc& startup_program) const; diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index 504344e937dfdc362cdc22298a5f963d87011e9d..d9d6b7dd67f1c6e4bbd6a4e1a8f0843d4cb93c05 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -1,8 +1,11 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,6 +16,7 @@ limitations under the License. */ namespace paddle { namespace framework { + void SerializeToStream(std::ostream& os, const SelectedRows& selected_rows, const platform::DeviceContext& dev_ctx) { { // the 1st field, uint32_t version diff --git a/paddle/fluid/framework/selected_rows.h b/paddle/fluid/framework/selected_rows.h index 9458d56a01df432aea573d796456b9be31350038..8e2d9470d3954e0f66c74828a8d8292c2875a8f4 100644 --- a/paddle/fluid/framework/selected_rows.h +++ b/paddle/fluid/framework/selected_rows.h @@ -1,8 +1,11 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -47,6 +50,15 @@ class SelectedRows { void set_rows(const Vector& rows) { rows_ = rows; } + /** + * get the index of id in rows + */ + int64_t index(int64_t id) const { + auto it = std::find(rows_.begin(), rows_.end(), id); + PADDLE_ENFORCE(it != rows_.end(), "id should be in rows"); + return static_cast(std::distance(rows_.begin(), it)); + } + DDim GetCompleteDims() const { std::vector dims = vectorize(value_->dims()); dims[0] = height_; diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index f7a6b5ba84ca1762bd903790aa3c0346b22ed035..6f878541e6de1deec1829145b1b325ecd176a034 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -45,11 +45,10 @@ class Tensor { friend struct EigenVector; public: - Tensor() : offset_(0), is_pinned_(false) {} + Tensor() : offset_(0) {} /*! Constructor with place should only be used in pybind. */ - explicit Tensor(const platform::Place& place) - : offset_(0), is_pinned_(false) { + explicit Tensor(const platform::Place& place) : offset_(0) { holder_->set_place(place); } @@ -70,12 +69,11 @@ class Tensor { * @note If not exist, then allocation. */ template - inline T* mutable_data(platform::Place place, bool is_pinned = false); + inline T* mutable_data(platform::Place place); - inline void* mutable_data(platform::Place place, std::type_index type, - bool is_pinned = false); + inline void* mutable_data(platform::Place place, std::type_index type); - inline void* mutable_data(platform::Place place, bool is_pinned = false); + inline void* mutable_data(platform::Place place); /** * @brief Return a pointer to mutable memory block. @@ -86,8 +84,7 @@ class Tensor { * @note If not exist, then allocation. */ template - inline T* mutable_data(DDim dims, platform::Place place, - bool is_pinned = false); + inline T* mutable_data(DDim dims, platform::Place place); /*! Return the dimensions of the memory block. */ inline const DDim& dims() const; @@ -95,9 +92,6 @@ class Tensor { /*! Return the numel of the memory block. */ inline int64_t numel() const; - /*! Return the numel of the memory block. */ - inline bool isPinned() const; - /*! Resize the dimensions of the memory block. */ inline Tensor& Resize(const DDim& dims); @@ -152,14 +146,12 @@ class Tensor { template struct PlaceholderImpl : public Placeholder { - PlaceholderImpl(Place place, size_t size, std::type_index type, - bool is_pinned = false) - : ptr_(static_cast(memory::Alloc(place, size, is_pinned)), - memory::PODDeleter(place, is_pinned)), + PlaceholderImpl(Place place, size_t size, std::type_index type) + : ptr_(static_cast(memory::Alloc(place, size)), + memory::PODDeleter(place)), place_(place), size_(size), - type_(type), - is_pinned_(is_pinned) { + type_(type) { PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.", (is_cpu_place(place_) ? "CPU" : "GPU")); } @@ -182,9 +174,6 @@ class Tensor { /* the current type of memory */ std::type_index type_; - - /*! use pinned memory or not. */ - bool is_pinned_; }; /*! holds the memory block if allocated. */ @@ -219,7 +208,6 @@ class Tensor { * PlaceHolder::ptr_ and where the tensor data really begins. */ size_t offset_; - bool is_pinned_; }; inline void Tensor::switch_place(platform::Place new_place) { diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index 113814971e115fa88bd0ded34017fa26a9dd5803..f49d1a47a325b2aac6185073203df124be18b54d 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -101,21 +101,19 @@ inline T* Tensor::data() { } template -inline T* Tensor::mutable_data(DDim dims, platform::Place place, - bool is_pinned) { +inline T* Tensor::mutable_data(DDim dims, platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); Resize(dims); - return mutable_data(place, is_pinned); + return mutable_data(place); } template -inline T* Tensor::mutable_data(platform::Place place, bool is_pinned) { +inline T* Tensor::mutable_data(platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); - return reinterpret_cast(mutable_data(place, typeid(T), is_pinned)); + return reinterpret_cast(mutable_data(place, typeid(T))); } -inline void* Tensor::mutable_data(platform::Place place, std::type_index type, - bool is_pinned) { +inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { if (holder_ != nullptr) { holder_->set_type(type); } @@ -129,27 +127,33 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type, holder_->size() < size + offset_) { if (platform::is_cpu_place(place)) { holder_.reset(new PlaceholderImpl( - boost::get(place), size, type, is_pinned)); - } else if (platform::is_gpu_place(place)) { + boost::get(place), size, type)); + } else if (platform::is_gpu_place(place) || + platform::is_cuda_pinned_place(place)) { #ifndef PADDLE_WITH_CUDA - PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); + PADDLE_THROW( + "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode."); } #else - holder_.reset(new PlaceholderImpl( - boost::get(place), size, type, is_pinned)); + if (platform::is_gpu_place(place)) { + holder_.reset(new PlaceholderImpl( + boost::get(place), size, type)); + } else if (platform::is_cuda_pinned_place(place)) { + holder_.reset(new PlaceholderImpl( + boost::get(place), size, type)); + } } #endif offset_ = 0; - is_pinned_ = is_pinned; } return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } -inline void* Tensor::mutable_data(platform::Place place, bool is_pinned) { +inline void* Tensor::mutable_data(platform::Place place) { PADDLE_ENFORCE(this->holder_ != nullptr, - "Cannot invoke mutable data if current hold nothing"); - return mutable_data(place, holder_->type(), is_pinned); + "Cannot invoke mutable data if current hold nothing."); + return mutable_data(place, holder_->type()); } inline Tensor& Tensor::ShareDataWith(const Tensor& src) { @@ -191,8 +195,6 @@ inline const DDim& Tensor::dims() const { return dims_; } inline int64_t Tensor::numel() const { return product(dims_); } -inline bool Tensor::isPinned() const { return is_pinned_; } - inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { Tensor res; res.ShareDataWith(src); diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index 8b7533ce712b0a01060842b6f71449ed6bd23e2c..1d864af011bced9df188147ec436b8de12947ba9 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -148,6 +148,11 @@ struct AnyVisitor : public boost::static_visitor { const platform::CPUPlace& cpu) const { return *out.data(); } + + bool GetResult(const framework::Tensor& out, + const platform::CUDAPinnedPlace& cpu) const { + return *out.data(); + } }; template diff --git a/paddle/fluid/framework/tuple.h b/paddle/fluid/framework/tuple.h index 78996908b18a5a0935d8de9920e8ccef9069e74b..f6c6a1fec13d8b12efd1fa71a7a93316e484d045 100644 --- a/paddle/fluid/framework/tuple.h +++ b/paddle/fluid/framework/tuple.h @@ -35,24 +35,25 @@ class Tuple { public: using ElementVars = std::vector; - Tuple(std::vector& var, std::vector& var_desc) + Tuple(const std::vector& var, + const std::vector& var_desc) : var_(var), var_desc_(var_desc) {} - Tuple(std::vector& var) : var_(var) {} + explicit Tuple(std::vector& var) : var_(var) {} - ElementVar get(int idx) const { return var_[idx]; }; + ElementVar get(int idx) const { return var_[idx]; } - ElementVar& get(int idx) { return var_[idx]; }; + ElementVar& get(int idx) { return var_[idx]; } - bool isSameType(Tuple& t) const; + bool isSameType(const Tuple& t) const; - size_t getSize() const { return var_.size(); }; + size_t getSize() const { return var_.size(); } private: ElementVars var_; std::vector var_desc_; }; -bool Tuple::isSameType(Tuple& t) const { +bool Tuple::isSameType(const Tuple& t) const { size_t tuple_size = getSize(); if (tuple_size != t.getSize()) { return false; diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 52e9c0baa64508f82d0a86a88c8c5f8d23f9f7f2..a5b62ef322bfad0fc956d7d722797bd5add6aea6 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -41,8 +41,7 @@ bool IsPersistable(const framework::VarDesc* var) { return false; } -void LoadPersistables(framework::Executor& executor, - framework::Scope& scope, +void LoadPersistables(framework::Executor& executor, framework::Scope& scope, const framework::ProgramDesc& main_program, const std::string& dirname, const std::string& param_filename) { @@ -108,10 +107,8 @@ std::unique_ptr Load(framework::Executor& executor, } std::unique_ptr Load( - framework::Executor& executor, - framework::Scope& scope, - const std::string& prog_filename, - const std::string& param_filename) { + framework::Executor& executor, framework::Scope& scope, + const std::string& prog_filename, const std::string& param_filename) { std::string model_filename = prog_filename; std::string program_desc_str; ReadBinaryFile(model_filename, program_desc_str); diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index 6817a6fca047c9336233697a7bee4e5e16eedd5e..d07d315b93ef10a464080899b1cb9920abe83be3 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -24,8 +24,7 @@ limitations under the License. */ namespace paddle { namespace inference { -void LoadPersistables(framework::Executor& executor, - framework::Scope& scope, +void LoadPersistables(framework::Executor& executor, framework::Scope& scope, const framework::ProgramDesc& main_program, const std::string& dirname, const std::string& param_filename); diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index e7ffb00ec8d8926193fe510ebdb7185f75c90906..6ed77adb9d891c75e7de358d0d7a0c06c9af96dd 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -4,7 +4,7 @@ function(inference_test TARGET_NAME) set(multiValueArgs ARGS) cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/fluid/tests) + set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests) set(arg_list "") if(inference_test_ARGS) foreach(arg ${inference_test_ARGS}) diff --git a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc index 9ab808efec3abdb86724fb16725962958c5cf55c..3e77dc166c355bc141563eda4705ca8d75226ac4 100644 --- a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc +++ b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc @@ -9,8 +9,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -30,8 +30,8 @@ TEST(inference, fit_a_line) { // The second dim of the input tensor should be 13 // The input data should be >= 0 int64_t batch_size = 10; - SetupTensor( - input, {batch_size, 13}, static_cast(0), static_cast(10)); + SetupTensor(&input, {batch_size, 13}, static_cast(0), + static_cast(10)); std::vector cpu_feeds; cpu_feeds.push_back(&input); diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index e9a27171f1cd68e7b10c860fb4a1417b930ed565..a6b6c3f828f4c6f59fca42e4c3d9580d6c136524 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -35,10 +35,8 @@ TEST(inference, image_classification) { paddle::framework::LoDTensor input; // Use normilized image pixels as input data, // which should be in the range [0.0, 1.0]. - SetupTensor(input, - {FLAGS_batch_size, 3, 32, 32}, - static_cast(0), - static_cast(1)); + SetupTensor(&input, {FLAGS_batch_size, 3, 32, 32}, + static_cast(0), static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -48,8 +46,8 @@ TEST(inference, image_classification) { // Run inference on CPU LOG(INFO) << "--- CPU Runs: ---"; - TestInference( - dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat); + TestInference(dirname, cpu_feeds, cpu_fetchs1, + FLAGS_repeat); LOG(INFO) << output1.dims(); #ifdef PADDLE_WITH_CUDA @@ -59,8 +57,8 @@ TEST(inference, image_classification) { // Run inference on CUDA GPU LOG(INFO) << "--- GPU Runs: ---"; - TestInference( - dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat); + TestInference(dirname, cpu_feeds, cpu_fetchs2, + FLAGS_repeat); LOG(INFO) << output2.dims(); CheckError(output1, output2); diff --git a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc index 184924016634bba26204d937744ca5fa87cd443c..84bb855fea5fa397ff71e2c922fea3302951b7ca 100644 --- a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc +++ b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -36,37 +36,21 @@ TEST(inference, label_semantic_roles) { int64_t predicate_dict_len = 3162; int64_t mark_dict_len = 2; - SetupLoDTensor(word, - lod, - static_cast(0), + SetupLoDTensor(&word, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(predicate, - lod, - static_cast(0), + SetupLoDTensor(&predicate, lod, static_cast(0), static_cast(predicate_dict_len - 1)); - SetupLoDTensor(ctx_n2, - lod, - static_cast(0), + SetupLoDTensor(&ctx_n2, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_n1, - lod, - static_cast(0), + SetupLoDTensor(&ctx_n1, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_0, - lod, - static_cast(0), + SetupLoDTensor(&ctx_0, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_p1, - lod, - static_cast(0), + SetupLoDTensor(&ctx_p1, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_p2, - lod, - static_cast(0), + SetupLoDTensor(&ctx_p2, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(mark, - lod, - static_cast(0), + SetupLoDTensor(&mark, lod, static_cast(0), static_cast(mark_dict_len - 1)); std::vector cpu_feeds; diff --git a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc index 1fb0f9e77797cf6e61e918700763ee33a495cb96..f12828a2685305c20d26492dbf04fa9ddacf9317 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -35,10 +35,8 @@ TEST(inference, recognize_digits) { paddle::framework::LoDTensor input; // Use normilized image pixels as input data, // which should be in the range [-1.0, 1.0]. - SetupTensor(input, - {FLAGS_batch_size, 1, 28, 28}, - static_cast(-1), - static_cast(1)); + SetupTensor(&input, {FLAGS_batch_size, 1, 28, 28}, + static_cast(-1), static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -49,8 +47,8 @@ TEST(inference, recognize_digits) { // Run inference on CPU LOG(INFO) << "--- CPU Runs: is_combined=" << is_combined << " ---"; - TestInference( - dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat, is_combined); + TestInference(dirname, cpu_feeds, cpu_fetchs1, + FLAGS_repeat, is_combined); LOG(INFO) << output1.dims(); #ifdef PADDLE_WITH_CUDA @@ -60,8 +58,8 @@ TEST(inference, recognize_digits) { // Run inference on CUDA GPU LOG(INFO) << "--- GPU Runs: is_combined=" << is_combined << " ---"; - TestInference( - dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat, is_combined); + TestInference(dirname, cpu_feeds, cpu_fetchs2, + FLAGS_repeat, is_combined); LOG(INFO) << output2.dims(); CheckError(output1, output2); diff --git a/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc index b42a33c9a90b5feafaed343a197da0e4db11b7ea..70aa6b194d4417fc85384cc3f615089f024f928e 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -36,25 +36,25 @@ TEST(inference, recommender_system) { // Use the first data from paddle.dataset.movielens.test() as input std::vector user_id_data = {1}; - SetupTensor(user_id, {batch_size, 1}, user_id_data); + SetupTensor(&user_id, {batch_size, 1}, user_id_data); std::vector gender_id_data = {1}; - SetupTensor(gender_id, {batch_size, 1}, gender_id_data); + SetupTensor(&gender_id, {batch_size, 1}, gender_id_data); std::vector age_id_data = {0}; - SetupTensor(age_id, {batch_size, 1}, age_id_data); + SetupTensor(&age_id, {batch_size, 1}, age_id_data); std::vector job_id_data = {10}; - SetupTensor(job_id, {batch_size, 1}, job_id_data); + SetupTensor(&job_id, {batch_size, 1}, job_id_data); std::vector movie_id_data = {783}; - SetupTensor(movie_id, {batch_size, 1}, movie_id_data); + SetupTensor(&movie_id, {batch_size, 1}, movie_id_data); std::vector category_id_data = {10, 8, 9}; - SetupLoDTensor(category_id, {3, 1}, {{0, 3}}, category_id_data); + SetupLoDTensor(&category_id, {3, 1}, {{0, 3}}, category_id_data); std::vector movie_title_data = {1069, 4140, 2923, 710, 988}; - SetupLoDTensor(movie_title, {5, 1}, {{0, 5}}, movie_title_data); + SetupLoDTensor(&movie_title, {5, 1}, {{0, 5}}, movie_title_data); std::vector cpu_feeds; cpu_feeds.push_back(&user_id); diff --git a/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc index a0523905bd1631cd8768b1601e459cb9d110a84d..e15c3f59acb1eac535120554a3799c37e9d4e951 100644 --- a/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc +++ b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -32,10 +32,10 @@ TEST(inference, rnn_encoder_decoder) { paddle::framework::LoDTensor word_data, trg_word; paddle::framework::LoD lod{{0, 4, 10}}; - SetupLoDTensor( - word_data, lod, static_cast(0), static_cast(1)); - SetupLoDTensor( - trg_word, lod, static_cast(0), static_cast(1)); + SetupLoDTensor(&word_data, lod, static_cast(0), + static_cast(1)); + SetupLoDTensor(&trg_word, lod, static_cast(0), + static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&word_data); diff --git a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc index 824b3274ebc7ba046e61798b3f61ef9924a75679..0dbb6a30405eb64133613052ad57b1f705a9e7b4 100644 --- a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc +++ b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -33,9 +33,7 @@ TEST(inference, understand_sentiment) { paddle::framework::LoD lod{{0, 4, 10}}; int64_t word_dict_len = 5147; - SetupLoDTensor(words, - lod, - static_cast(0), + SetupLoDTensor(&words, lod, static_cast(0), static_cast(word_dict_len - 1)); std::vector cpu_feeds; diff --git a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc index 1481760c529c29a7290f476e2a22e1ded5ab7787..c9328eb21b4fdb06c5f65ba0f7337b1e79fa1927 100644 --- a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc +++ b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -33,10 +33,10 @@ TEST(inference, word2vec) { paddle::framework::LoD lod{{0, 1}}; int64_t dict_size = 2073; // The size of dictionary - SetupLoDTensor(first_word, lod, static_cast(0), dict_size - 1); - SetupLoDTensor(second_word, lod, static_cast(0), dict_size - 1); - SetupLoDTensor(third_word, lod, static_cast(0), dict_size - 1); - SetupLoDTensor(fourth_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&first_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&second_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&third_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&fourth_word, lod, static_cast(0), dict_size - 1); std::vector cpu_feeds; cpu_feeds.push_back(&first_word); diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index dce541c0971a6ff9a3728e915fe8c3d009c23550..5118e66f1e7a36333ff4425361e54ec59e6ba05b 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -11,59 +11,59 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#pragma once + +#include +#include +#include +#include -#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/inference/io.h" #include "paddle/fluid/platform/profiler.h" template -void SetupTensor(paddle::framework::LoDTensor& input, - paddle::framework::DDim dims, - T lower, - T upper) { - srand(time(0)); - T* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); - for (int i = 0; i < input.numel(); ++i) { - input_ptr[i] = - (static_cast(rand()) / static_cast(RAND_MAX)) * (upper - lower) + - lower; +void SetupTensor(paddle::framework::LoDTensor* input, + paddle::framework::DDim dims, T lower, T upper) { + std::mt19937 rng(100); // An arbitrarily chosen but fixed seed. + std::uniform_real_distribution uniform_dist(0, 1); + + T* input_ptr = input->mutable_data(dims, paddle::platform::CPUPlace()); + for (int i = 0; i < input->numel(); ++i) { + input_ptr[i] = static_cast(uniform_dist(rng) * (upper - lower) + lower); } } template -void SetupTensor(paddle::framework::LoDTensor& input, - paddle::framework::DDim dims, - std::vector& data) { +void SetupTensor(paddle::framework::LoDTensor* input, + paddle::framework::DDim dims, const std::vector& data) { CHECK_EQ(paddle::framework::product(dims), static_cast(data.size())); - T* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); - memcpy(input_ptr, data.data(), input.numel() * sizeof(T)); + T* input_ptr = input->mutable_data(dims, paddle::platform::CPUPlace()); + memcpy(input_ptr, data.data(), input->numel() * sizeof(T)); } template -void SetupLoDTensor(paddle::framework::LoDTensor& input, - paddle::framework::LoD& lod, - T lower, - T upper) { - input.set_lod(lod); +void SetupLoDTensor(paddle::framework::LoDTensor* input, + const paddle::framework::LoD& lod, T lower, T upper) { + input->set_lod(lod); int dim = lod[0][lod[0].size() - 1]; SetupTensor(input, {dim, 1}, lower, upper); } template -void SetupLoDTensor(paddle::framework::LoDTensor& input, +void SetupLoDTensor(paddle::framework::LoDTensor* input, paddle::framework::DDim dims, - paddle::framework::LoD lod, - std::vector& data) { + const paddle::framework::LoD lod, + const std::vector& data) { const size_t level = lod.size() - 1; CHECK_EQ(dims[0], static_cast((lod[level]).back())); - input.set_lod(lod); + input->set_lod(lod); SetupTensor(input, dims, data); } template -void CheckError(paddle::framework::LoDTensor& output1, - paddle::framework::LoDTensor& output2) { +void CheckError(const paddle::framework::LoDTensor& output1, + const paddle::framework::LoDTensor& output2) { // Check lod information EXPECT_EQ(output1.lod(), output2.lod()); @@ -91,9 +91,8 @@ void CheckError(paddle::framework::LoDTensor& output1, template void TestInference(const std::string& dirname, const std::vector& cpu_feeds, - std::vector& cpu_fetchs, - const int repeat = 1, - const bool is_combined = false) { + const std::vector& cpu_fetchs, + const int repeat = 1, const bool is_combined = false) { // 1. Define place, executor, scope auto place = Place(); auto executor = paddle::framework::Executor(place); @@ -132,11 +131,9 @@ void TestInference(const std::string& dirname, // `fluid.io.save_inference_model`. std::string prog_filename = "__model_combined__"; std::string param_filename = "__params_combined__"; - inference_program = - paddle::inference::Load(executor, - *scope, - dirname + "/" + prog_filename, - dirname + "/" + param_filename); + inference_program = paddle::inference::Load( + executor, *scope, dirname + "/" + prog_filename, + dirname + "/" + param_filename); } else { // Parameters are saved in separate files sited in the specified // `dirname`. diff --git a/paddle/fluid/memory/.clang-format b/paddle/fluid/memory/.clang-format deleted file mode 100644 index 29282dc87e2c499988c17d90d47d44cd5cf7f115..0000000000000000000000000000000000000000 --- a/paddle/fluid/memory/.clang-format +++ /dev/null @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -Standard: Cpp11 -... diff --git a/paddle/fluid/memory/CMakeLists.txt b/paddle/fluid/memory/CMakeLists.txt index 1a61c484823b292234d4758cdc1959d7a21510e6..8b3043af7a18787a08583d47b76da679ccb63740 100644 --- a/paddle/fluid/memory/CMakeLists.txt +++ b/paddle/fluid/memory/CMakeLists.txt @@ -4,13 +4,17 @@ cc_library(memory SRCS memory.cc DEPS place enforce) cc_library(memcpy SRCS memcpy.cc DEPS place) cc_library(paddle_memory - DEPS - memory - memcpy - meta_data - meta_cache - memory_block - buddy_allocator - system_allocator) + DEPS + memory + memcpy + meta_data + meta_cache + memory_block + buddy_allocator + system_allocator) cc_test(memory_test SRCS memory_test.cc DEPS place paddle_memory) + +#if (WITH_GPU) +# nv_test(pinned_memory_test SRCS pinned_memory_test.cu DEPS place paddle_memory) +#endif() diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 22f6f506748735d1a0fe75375aeea22bd92b8b7e..a45f8c33ee5956f3409ee1b7c43628aa0acafb98 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/memory/detail/system_allocator.h" #include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/gpu_info.h" @@ -134,21 +135,31 @@ bool GPUAllocator::UseGpu() const { return true; } // memory. It’s locked to a physical address. void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) { if (size <= 0) return nullptr; - void* p; - // NOTE: here, we use GpuMaxAllocSize() as the maximum memory size + + // NOTE: here, we use CUDAPinnedMaxAllocSize as the maximum memory size // of host pinned allocation. Allocates too much would reduce // the amount of memory available to the underlying system for paging. + size_t usable = + paddle::platform::CUDAPinnedMaxAllocSize() - cuda_pinnd_alloc_size_; - size_t usable = paddle::platform::GpuMaxAllocSize() - fallback_alloc_size_; - - if (size > usable) return nullptr; + if (size > usable) { + LOG(WARNING) << "Cannot malloc " << size / 1024.0 / 1024.0 + << " MB pinned memory." + << ", available " << usable / 1024.0 / 1024.0 << " MB"; + return nullptr; + } + void* p; // PINNED memory is visible to all CUDA contexts. cudaError_t result = cudaMallocHost(&p, size); + if (result == cudaSuccess) { - index = 1; - fallback_alloc_size_ += size; + index = 1; // PINNED memory + cuda_pinnd_alloc_size_ += size; return p; + } else { + LOG(WARNING) << "cudaMallocHost failed."; + return nullptr; } return nullptr; @@ -158,8 +169,8 @@ void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) { cudaError_t err; PADDLE_ASSERT(index == 1); - PADDLE_ASSERT(fallback_alloc_size_ >= size); - fallback_alloc_size_ -= size; + PADDLE_ASSERT(cuda_pinnd_alloc_size_ >= size); + cuda_pinnd_alloc_size_ -= size; err = cudaFreeHost(p); // Purposefully allow cudaErrorCudartUnloading, because @@ -172,7 +183,7 @@ void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) { } } -bool CUDAPinnedAllocator::UseGpu() const { return true; } +bool CUDAPinnedAllocator::UseGpu() const { return false; } #endif diff --git a/paddle/fluid/memory/detail/system_allocator.h b/paddle/fluid/memory/detail/system_allocator.h index e8479e73f433f1d741b2933da4843c0ba80276d5..e3c50ef6483c61e2016bbd967a4100057c87dca3 100644 --- a/paddle/fluid/memory/detail/system_allocator.h +++ b/paddle/fluid/memory/detail/system_allocator.h @@ -21,8 +21,9 @@ namespace memory { namespace detail { /** - * \brief SystemAllocator is the parent class of CPUAllocator and GPUAllocator. - * A BuddyAllocator object uses a SystemAllocator* pointing to the + * \brief SystemAllocator is the parent class of CPUAllocator, + * CUDAPinnedAllocator and GPUAllocator. A BuddyAllocator + * object uses a SystemAllocator* pointing to the * underlying system allocator. */ class SystemAllocator { @@ -62,9 +63,7 @@ class CUDAPinnedAllocator : public SystemAllocator { virtual bool UseGpu() const; private: - size_t gpu_alloc_size_ = - 0; // TODO(zcd): how to define the upper limit of CUDAPinnedMemory? - size_t fallback_alloc_size_ = 0; + size_t cuda_pinnd_alloc_size_ = 0; }; #endif diff --git a/paddle/fluid/memory/memcpy.cc b/paddle/fluid/memory/memcpy.cc index b991360d0442ec2d258443a931a9dcf10b332f1e..eddcaab8befda84dd14ed46c31ac025dfbcc7ca9 100644 --- a/paddle/fluid/memory/memcpy.cc +++ b/paddle/fluid/memory/memcpy.cc @@ -56,6 +56,45 @@ void Copy( } } +template <> +void Copy( + platform::CPUPlace dst_place, void* dst, + platform::CUDAPinnedPlace src_place, const void* src, size_t num) { + std::memcpy(dst, src, num); +} + +template <> +void Copy( + platform::CUDAPinnedPlace dst_place, void* dst, + platform::CPUPlace src_place, const void* src, size_t num) { + std::memcpy(dst, src, num); +} + +template <> +void Copy( + platform::CUDAPinnedPlace dst_place, void* dst, + platform::CUDAPinnedPlace src_place, const void* src, size_t num) { + std::memcpy(dst, src, num); +} + +template <> +void Copy( + platform::CUDAPinnedPlace dst_place, void* dst, + platform::CUDAPlace src_place, const void* src, size_t num, + cudaStream_t stream) { + platform::SetDeviceId(src_place.device); + platform::GpuMemcpyAsync(dst, src, num, cudaMemcpyDeviceToHost, stream); +} + +template <> +void Copy( + platform::CUDAPlace dst_place, void* dst, + platform::CUDAPinnedPlace src_place, const void* src, size_t num, + cudaStream_t stream) { + platform::SetDeviceId(dst_place.device); + platform::GpuMemcpyAsync(dst, src, num, cudaMemcpyHostToDevice, stream); +} + #endif } // namespace memory diff --git a/paddle/fluid/memory/memory.cc b/paddle/fluid/memory/memory.cc index 56593653a622bce323306d86156d140c46f58d18..2c13dbc6d51bfa3853cec5270e8115c899f522ea 100644 --- a/paddle/fluid/memory/memory.cc +++ b/paddle/fluid/memory/memory.cc @@ -38,8 +38,7 @@ BuddyAllocator* GetCPUBuddyAllocator() { } template <> -void* Alloc(platform::CPUPlace place, size_t size, - bool is_pinned) { +void* Alloc(platform::CPUPlace place, size_t size) { VLOG(10) << "Allocate " << size << " bytes on " << platform::Place(place); void* p = GetCPUBuddyAllocator()->Alloc(size); VLOG(10) << " pointer=" << p; @@ -47,8 +46,7 @@ void* Alloc(platform::CPUPlace place, size_t size, } template <> -void Free(platform::CPUPlace place, void* p, - bool is_pinned) { +void Free(platform::CPUPlace place, void* p) { VLOG(10) << "Free pointer=" << p << " on " << platform::Place(place); GetCPUBuddyAllocator()->Free(p); } @@ -84,52 +82,20 @@ BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { return as[gpu_id]; } -BuddyAllocator* GetCUDAPinnedBuddyAllocator(int gpu_id) { - static BuddyAllocator** as = NULL; - if (as == NULL) { - int gpu_num = platform::GetCUDADeviceCount(); - as = new BuddyAllocator*[gpu_num]; - for (int gpu = 0; gpu < gpu_num; gpu++) { - as[gpu] = nullptr; - } - } - platform::SetDeviceId(gpu_id); - if (!as[gpu_id]) { - as[gpu_id] = new BuddyAllocator(new detail::CUDAPinnedAllocator, - platform::GpuMinChunkSize(), - platform::GpuMaxChunkSize()); - VLOG(10) << "\n\nNOTE: each GPU device use " - << FLAGS_fraction_of_gpu_memory_to_use * 100 - << "% of GPU memory.\n" - << "You can set GFlags environment variable '" - << "FLAGS_fraction_of_gpu_memory_to_use" - << "' to change the fraction of GPU usage.\n\n"; - } - return as[gpu_id]; -} - template <> size_t Used(platform::CUDAPlace place) { return GetGPUBuddyAllocator(place.device)->Used(); } template <> -void* Alloc(platform::CUDAPlace place, size_t size, - bool is_pinned) { - void* ptr; - if (is_pinned) { - auto* buddy_allocator = GetCUDAPinnedBuddyAllocator(place.device); - ptr = buddy_allocator->Alloc(size); - } else { - auto* buddy_allocator = GetGPUBuddyAllocator(place.device); - ptr = buddy_allocator->Alloc(size); - } - +void* Alloc(platform::CUDAPlace place, size_t size) { + auto* buddy_allocator = GetGPUBuddyAllocator(place.device); + auto* ptr = buddy_allocator->Alloc(size); if (ptr == nullptr) { int cur_dev = platform::GetCurrentDeviceId(); platform::SetDeviceId(place.device); size_t avail, total; - platform::GpuMemoryUsage(avail, total); + platform::GpuMemoryUsage(&avail, &total); LOG(WARNING) << "Cannot allocate " << size << " bytes in GPU " << place.device << ", available " << avail << " bytes"; LOG(WARNING) << "total " << total; @@ -142,15 +108,42 @@ void* Alloc(platform::CUDAPlace place, size_t size, } template <> -void Free(platform::CUDAPlace place, void* p, - bool is_pinned) { - if (is_pinned) { - GetCUDAPinnedBuddyAllocator(place.device)->Free(p); - } else { - GetGPUBuddyAllocator(place.device)->Free(p); +void Free(platform::CUDAPlace place, void* p) { + GetGPUBuddyAllocator(place.device)->Free(p); +} + +BuddyAllocator* GetCUDAPinnedBuddyAllocator() { + static BuddyAllocator* ba = NULL; + if (ba == NULL) { + ba = new BuddyAllocator(new detail::CUDAPinnedAllocator, + platform::CUDAPinnedMinChunkSize(), + platform::CUDAPinnedMaxChunkSize()); } + return ba; } +template <> +size_t Used(platform::CUDAPinnedPlace place) { + return GetCUDAPinnedBuddyAllocator()->Used(); +} + +template <> +void* Alloc(platform::CUDAPinnedPlace place, + size_t size) { + auto* buddy_allocator = GetCUDAPinnedBuddyAllocator(); + void* ptr = buddy_allocator->Alloc(size); + + if (ptr == nullptr) { + LOG(WARNING) << "cudaMallocHost Cannot allocate " << size + << " bytes in CUDAPinnedPlace"; + } + return ptr; +} + +template <> +void Free(platform::CUDAPinnedPlace place, void* p) { + GetCUDAPinnedBuddyAllocator()->Free(p); +} #endif size_t Usage::operator()(const platform::CPUPlace& cpu) const { @@ -165,6 +158,14 @@ size_t Usage::operator()(const platform::CUDAPlace& gpu) const { #endif } +size_t Usage::operator()(const platform::CUDAPinnedPlace& cuda_pinned) const { +#ifdef PADDLE_WITH_CUDA + return Used(cuda_pinned); +#else + PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); +#endif +} + size_t memory_usage(const platform::Place& p) { return boost::apply_visitor(Usage(), p); } diff --git a/paddle/fluid/memory/memory.h b/paddle/fluid/memory/memory.h index 062bfc880e78dc5d90c567ffe5c4e521704c9ca6..3e6bfddd69cb16edf323d040ea5369cd551f299e 100644 --- a/paddle/fluid/memory/memory.h +++ b/paddle/fluid/memory/memory.h @@ -33,7 +33,7 @@ namespace memory { * address is valid or not. */ template -void* Alloc(Place place, size_t size, bool is_pinned = false); +void* Alloc(Place place, size_t size); /** * \brief Free memory block in one place. @@ -43,7 +43,7 @@ void* Alloc(Place place, size_t size, bool is_pinned = false); * */ template -void Free(Place place, void* ptr, bool is_pinned = false); +void Free(Place place, void* ptr); /** * \brief Total size of used memory in one place. @@ -57,6 +57,7 @@ size_t Used(Place place); struct Usage : public boost::static_visitor { size_t operator()(const platform::CPUPlace& cpu) const; size_t operator()(const platform::CUDAPlace& gpu) const; + size_t operator()(const platform::CUDAPinnedPlace& cuda_pinned) const; }; size_t memory_usage(const platform::Place& p); @@ -74,13 +75,11 @@ class PODDeleter { static_assert(std::is_pod::value, "T must be POD"); public: - explicit PODDeleter(Place place, bool is_pinned = false) - : place_(place), is_pinned_(is_pinned) {} - void operator()(T* ptr) { Free(place_, static_cast(ptr), is_pinned_); } + explicit PODDeleter(Place place) : place_(place) {} + void operator()(T* ptr) { Free(place_, static_cast(ptr)); } private: Place place_; - bool is_pinned_; }; /** diff --git a/paddle/fluid/memory/memory_test.cc b/paddle/fluid/memory/memory_test.cc index eb27a52b254c1cda065197746eb179bbd1d7f2f1..9fbbe62559b1e29d6942a1ada62558b20830489b 100644 --- a/paddle/fluid/memory/memory_test.cc +++ b/paddle/fluid/memory/memory_test.cc @@ -13,16 +13,16 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/memory.h" + +#include + +#include "gtest/gtest.h" #include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/meta_data.h" - #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/place.h" -#include -#include - inline bool is_aligned(void const *p) { return 0 == (reinterpret_cast(p) & 0x3); } @@ -141,4 +141,59 @@ TEST(BuddyAllocator, GPUMultAlloc) { } } +size_t align(size_t size, paddle::platform::CUDAPinnedPlace place) { + size += sizeof(paddle::memory::detail::Metadata); + size_t alignment = paddle::platform::CUDAPinnedMinChunkSize(); + size_t remaining = size % alignment; + return remaining == 0 ? size : size + (alignment - remaining); +} + +TEST(BuddyAllocator, CUDAPinnedAllocator) { + void *p = nullptr; + + EXPECT_EQ(p, nullptr); + + paddle::platform::CUDAPinnedPlace cpu; + p = paddle::memory::Alloc(cpu, 4096); + + EXPECT_NE(p, nullptr); + + paddle::platform::Place place = cpu; + EXPECT_EQ(paddle::memory::Used(cpu), paddle::memory::memory_usage(place)); + + paddle::memory::Free(cpu, p); +} + +TEST(BuddyAllocator, CUDAPinnedMultAllocator) { + paddle::platform::CUDAPinnedPlace cpu; + + std::unordered_map ps; + + size_t total_size = paddle::memory::Used(cpu); + EXPECT_EQ(total_size, 0UL); + + for (auto size : + {0, 128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { + ps[paddle::memory::Alloc(cpu, size)] = size; + + // Buddy Allocator doesn't manage too large memory chunk + if (paddle::memory::Used(cpu) == total_size) continue; + + size_t aligned_size = align(size, cpu); + total_size += aligned_size; + EXPECT_EQ(total_size, paddle::memory::Used(cpu)); + } + + for (auto p : ps) { + EXPECT_EQ(is_aligned(p.first), true); + paddle::memory::Free(cpu, p.first); + + // Buddy Allocator doesn't manage too large memory chunk + if (paddle::memory::Used(cpu) == total_size) continue; + + size_t aligned_size = align(p.second, cpu); + total_size -= aligned_size; + EXPECT_EQ(total_size, paddle::memory::Used(cpu)); + } +} #endif diff --git a/paddle/fluid/memory/pinned_memory_test.cu b/paddle/fluid/memory/pinned_memory_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..a000001f41788fb16ac075426f06357cbe42d642 --- /dev/null +++ b/paddle/fluid/memory/pinned_memory_test.cu @@ -0,0 +1,147 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include +#include + +#include "paddle/fluid/memory/detail/memory_block.h" +#include "paddle/fluid/memory/detail/meta_data.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/memory/memory.h" + +#include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/place.h" + +// This unit test is an example comparing the performance between using pinned +// memory and not. In general, using pinned memory will be faster. +template +__global__ void Kernel(T* output, int dim) { + int tid = blockIdx.x * blockDim.x + threadIdx.x; + if (tid < dim) { + output[tid] = output[tid] * output[tid] / 100; + } +} + +template +float test_pinned_memory() { + Place cpu_place; + paddle::platform::CUDAPlace cuda_place; + + const int data_size = 4096; + const int iteration = 10; + + // create event start and end + cudaEvent_t start_e, stop_e, copying_e; + float elapsedTime = 0; + cudaEventCreate(&start_e); + cudaEventCreate(&stop_e); + cudaEventCreate(©ing_e); + + // create computation stream, data copying stream + cudaStream_t computation_stream, copying_stream; + cudaStreamCreate(&computation_stream); + cudaStreamCreate(©ing_stream); + + // create record event, pinned memory, gpu memory + std::vector record_event(iteration); + std::vector input_pinned_mem(iteration); + std::vector gpu_mem(iteration); + std::vector output_pinned_mem(iteration); + + // initial data + for (int j = 0; j < iteration; ++j) { + cudaEventCreateWithFlags(&record_event[j], cudaEventDisableTiming); + cudaEventCreate(&(record_event[j])); + input_pinned_mem[j] = static_cast( + paddle::memory::Alloc(cpu_place, data_size * sizeof(float))); + output_pinned_mem[j] = static_cast( + paddle::memory::Alloc(cpu_place, data_size * sizeof(float))); + gpu_mem[j] = static_cast( + paddle::memory::Alloc(cuda_place, data_size * sizeof(float))); + + for (int k = 0; k < data_size; ++k) { + input_pinned_mem[j][k] = k; + } + } + + cudaEventRecord(start_e, computation_stream); + + // computation + for (int m = 0; m < 30; ++m) { + for (int i = 0; i < iteration; ++i) { + // cpu -> GPU on computation stream. + // note: this operation is async for pinned memory. + paddle::memory::Copy(cuda_place, gpu_mem[i], cpu_place, + input_pinned_mem[i], data_size * sizeof(float), + computation_stream); + + // call kernel on computation stream. + Kernel<<<4, 1024, 0, computation_stream>>>(gpu_mem[i], data_size); + + // record event_computation on computation stream + cudaEventRecord(record_event[i], computation_stream); + + // wait event_computation on copy stream. + // note: this operation is async. + cudaStreamWaitEvent(copying_stream, record_event[i], 0); + + // copy data GPU->CPU, on copy stream. + // note: this operation is async for pinned memory. + paddle::memory::Copy(cpu_place, output_pinned_mem[i], cuda_place, + gpu_mem[i], data_size * sizeof(float), + copying_stream); + } + } + + cudaEventRecord(copying_e, copying_stream); + cudaStreamWaitEvent(computation_stream, copying_e, 0); + + cudaEventRecord(stop_e, computation_stream); + + cudaEventSynchronize(start_e); + cudaEventSynchronize(stop_e); + cudaEventElapsedTime(&elapsedTime, start_e, stop_e); + + // std::cout << cpu_place << " " + // << "time consume:" << elapsedTime / 30 << std::endl; + + for (int l = 0; l < iteration; ++l) { + for (int k = 0; k < data_size; ++k) { + float temp = input_pinned_mem[l][k]; + temp = temp * temp / 100; + EXPECT_FLOAT_EQ(temp, output_pinned_mem[l][k]); + } + } + + // destroy resource + cudaEventDestroy(copying_e); + cudaEventDestroy(start_e); + cudaEventDestroy(stop_e); + for (int j = 0; j < 10; ++j) { + cudaEventDestroy((record_event[j])); + paddle::memory::Free(cpu_place, input_pinned_mem[j]); + paddle::memory::Free(cpu_place, output_pinned_mem[j]); + paddle::memory::Free(cuda_place, gpu_mem[j]); + } + return elapsedTime / 30; +} + +TEST(CPUANDCUDAPinned, CPUAllocatorAndCUDAPinnedAllocator) { + // Generally speaking, operation on pinned_memory is faster than that on + // unpinned-memory, but if this unit test fails frequently, please close this + // test for the time being. + float time1 = test_pinned_memory(); + float time2 = test_pinned_memory(); + EXPECT_GT(time1, time2); +} diff --git a/paddle/fluid/operators/.clang-format b/paddle/fluid/operators/.clang-format deleted file mode 100644 index 29282dc87e2c499988c17d90d47d44cd5cf7f115..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/.clang-format +++ /dev/null @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -Standard: Cpp11 -... diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 9ed79453b962b8702a88cea888a860cd5d8d64d1..84eabab563e3404ad2a28bf76116c592db04742e 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -3,8 +3,8 @@ string(REPLACE "_mkldnn" "" GENERAL_OPS "${GENERAL_OPS}") string(REPLACE ".cc" "" GENERAL_OPS "${GENERAL_OPS}") list(REMOVE_DUPLICATES GENERAL_OPS) set(DEPS_OPS "") -set(pybind_file ${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/pybind.h) -file(WRITE ${pybind_file} "// Generated by the paddle/operator/CMakeLists.txt. DO NOT EDIT!\n\n") +set(pybind_file ${PADDLE_BINARY_DIR}/paddle/fluid/pybind/pybind.h) +file(WRITE ${pybind_file} "// Generated by the paddle/fluid/operator/CMakeLists.txt. DO NOT EDIT!\n\n") function(op_library TARGET) # op_library is a function to create op library. The interface is same as # cc_library. But it handle split GPU/CPU code and link some common library @@ -193,6 +193,7 @@ if(WITH_DISTRIBUTE) set_source_files_properties(send_vars_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) op_library(send_barrier_op DEPS ${DISTRIBUTE_DEPS}) set_source_files_properties(send_barrier_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + set_source_files_properties(send_recv_op_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) cc_test(test_send_recv SRCS send_recv_op_test.cc DEPS prefetch_op send_op listen_and_serv_op sum_op executor) else() set(DEPS_OPS ${DEPS_OPS} send_op prefetch_op recv_op listen_and_serv_op send_vars_op send_barrier_op) diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index a32aba4c1ff2f5e775aeb41f25b02322dbc6a64a..c70e3cc3c9198008d9eca5f462000aa67ff7e5ba 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -128,10 +128,32 @@ class CUDNNConvOpKernel : public framework::OpKernel { handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, workspace_size_limit, &algo)); + +#if CUDA_VERSION >= 9000 && CUDNN_VERSION_MIN(7, 0, 1) + // Tensor core is supported since the volta GPU and + // is only enabled when input and filter data are float16 + if (dev_ctx.GetComputeCapability() >= 70 && + std::type_index(typeid(T)) == + std::type_index(typeid(platform::float16))) { + PADDLE_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + cudnn_conv_desc, CUDNN_TENSOR_OP_MATH)); + // Currently tensor core is only enabled using this algo + algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; + } else { + PADDLE_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + cudnn_conv_desc, CUDNN_DEFAULT_MATH)); + } +#endif + // get workspace size able to allocate PADDLE_ENFORCE(platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); + // It is possible for float16 on Volta GPU to allocate more memory than + // the limit because the algo is overrided to use tensor core. + PADDLE_ENFORCE_LE(workspace_size_in_bytes, workspace_size_limit, + "workspace_size to be allocated exceeds the limit"); + // Allocate on GPU memory platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 650bc92be22af9ea8afcacf590a11190109e8811..695db841a4ec666b2c8783dfc7df959711341d85 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -13,6 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/conv_op.h" + +#include +#include + #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cudnn_helper.h" #endif diff --git a/paddle/fluid/operators/detail/bytebuffer_stream.cc b/paddle/fluid/operators/detail/bytebuffer_stream.cc index 741dd51de9e75feb608161579e56cb160b058ebb..a14171563edb0ac9a22b7ae493c965de3efb7823 100644 --- a/paddle/fluid/operators/detail/bytebuffer_stream.cc +++ b/paddle/fluid/operators/detail/bytebuffer_stream.cc @@ -17,7 +17,7 @@ limitations under the License. */ // file and did some modifications so that we can send gRPC // requests without too much copying of the tensor data. -#include "bytebuffer_stream.h" +#include "paddle/fluid/operators/detail/bytebuffer_stream.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/bytebuffer_stream.h b/paddle/fluid/operators/detail/bytebuffer_stream.h index 1791a48aab1b66147f645c90757b35ef5f6e001b..054dd4ff294414cca55d7e033f2c5403bbb85526 100644 --- a/paddle/fluid/operators/detail/bytebuffer_stream.h +++ b/paddle/fluid/operators/detail/bytebuffer_stream.h @@ -19,9 +19,11 @@ limitations under the License. */ #pragma once -#include +#include + #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream.h" +#include "grpc++/grpc++.h" namespace grpc { // A ZeroCopyInputStream that reads from grpc_byte_buffer @@ -56,7 +58,7 @@ class GrpcBufferReader final *data = GRPC_SLICE_START_PTR(slice_) + GRPC_SLICE_LENGTH(slice_) - backup_count_; GPR_CODEGEN_ASSERT(backup_count_ <= INT_MAX); - *size = (int)backup_count_; + *size = static_cast(backup_count_); backup_count_ = 0; return true; } @@ -68,7 +70,7 @@ class GrpcBufferReader final *data = GRPC_SLICE_START_PTR(slice_); // On win x64, int is only 32bit GPR_CODEGEN_ASSERT(GRPC_SLICE_LENGTH(slice_) <= INT_MAX); - byte_count_ += * size = (int)GRPC_SLICE_LENGTH(slice_); + byte_count_ += * size = static_cast(GRPC_SLICE_LENGTH(slice_)); return true; } diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index d79ba6d291950e1f089eb11713bd1c3e4d154b27..ef987d07f08525bff5267cdc2076ae767417e4f1 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -14,6 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/detail/grpc_client.h" +#include + #include #include "paddle/fluid/framework/threadpool.h" @@ -54,7 +56,7 @@ bool RPCClient::AsyncSendVariable(const std::string& ep, auto call = s->stub_g_.PrepareUnaryCall( s->context_.get(), "/sendrecv.SendRecvService/SendVariable", req, &cq_); call->StartCall(); - call->Finish(&s->reply_, &s->status_, static_cast(s)); + call->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); }); req_count_++; @@ -66,7 +68,7 @@ void ProcGetResponse(const VarHandle& var_h, // const sendrecv::VariableMessage& ret_msg) { const ::grpc::ByteBuffer& ret_msg) { framework::Variable* outvar = NULL; - DeserializeFromByteBuffer(ret_msg, *var_h.ctx, var_h.scope, outvar); + DeserializeFromByteBuffer(ret_msg, *var_h.ctx, var_h.scope, &outvar); } template @@ -110,7 +112,7 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, auto call = s->stub_g_.PrepareUnaryCall( s->context_.get(), "/sendrecv.SendRecvService/GetVariable", buf, &cq_); call->StartCall(); - call->Finish(&s->reply_, &s->status_, static_cast(s)); + call->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); }); req_count_++; @@ -170,7 +172,7 @@ void RPCClient::AsyncSendBatchBarrier(const std::string& ep, int64_t time_out) { sendrecv::VariableMessage req; req.set_varname(BATCH_BARRIER_MESSAGE); auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); - rpc->Finish(&s->reply_, &s->status_, static_cast(s)); + rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); req_count_++; } @@ -182,7 +184,7 @@ void RPCClient::AsyncSendFetchBarrier(const std::string& ep, int64_t time_out) { sendrecv::VariableMessage req; req.set_varname(FETCH_BARRIER_MESSAGE); auto rpc = s->stub_->AsyncGetVariable(s->context_.get(), req, &cq_); - rpc->Finish(&s->reply_, &s->status_, static_cast(s)); + rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); req_count_++; } diff --git a/paddle/fluid/operators/detail/grpc_client.h b/paddle/fluid/operators/detail/grpc_client.h index fe237e54ef61fb5b6e9bfa46fbe6b3df3dd40265..4425b19328f503eb7f9022916ed6452cdfea4eeb 100644 --- a/paddle/fluid/operators/detail/grpc_client.h +++ b/paddle/fluid/operators/detail/grpc_client.h @@ -14,10 +14,9 @@ limitations under the License. */ #pragma once -#include -#include #include -#include + +#include // NOLINT #include #include #include @@ -25,11 +24,11 @@ limitations under the License. */ #include #include -#include -#include -#include -#include - +#include "grpc++/generic/generic_stub.h" +#include "grpc++/grpc++.h" +#include "grpc++/support/byte_buffer.h" +#include "grpc++/support/slice.h" +#include "grpc/support/log.h" #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 7c978b28b6873d05afb435de4caf7f4ce5d33193..2e7bf1921a26fc88d854e4db2c501548695a136a 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -186,7 +186,8 @@ void AsyncGRPCServer::WaitClientGet(int count) { void AsyncGRPCServer::RunSyncUpdate() { ::grpc::ServerBuilder builder; - builder.AddListeningPort(address_, ::grpc::InsecureServerCredentials()); + builder.AddListeningPort(address_, ::grpc::InsecureServerCredentials(), + &selected_port_); builder.SetMaxSendMessageSize(std::numeric_limits::max()); builder.SetMaxReceiveMessageSize(std::numeric_limits::max()); builder.RegisterService(&service_); @@ -196,7 +197,8 @@ void AsyncGRPCServer::RunSyncUpdate() { cq_prefetch_ = builder.AddCompletionQueue(); server_ = builder.BuildAndStart(); - LOG(INFO) << "Server listening on " << address_ << std::endl; + LOG(INFO) << "Server listening on " << address_ + << " selected port: " << selected_port_; std::function send_register = std::bind(&AsyncGRPCServer::TryToRegisterNewSendOne, this); @@ -273,7 +275,7 @@ void AsyncGRPCServer::TryToRegisterNewPrefetchOne() { // FIXME(typhoonzero): change cq_name to enum. void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, - std::string cq_name, + const std::string& cq_name, std::function TryToRegisterNewOne) { TryToRegisterNewOne(); diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index b0596d3cd1e108f28e8f1485d6b5c989c55be7e9..380447f47c142bdc16e60f78c4b2d94235ec5060 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -14,10 +14,11 @@ limitations under the License. */ #pragma once -#include #include +#include // NOLINT #include +#include "grpc++/grpc++.h" #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/program_desc.h" @@ -62,6 +63,8 @@ class AsyncGRPCServer final { void SetExecutor(framework::Executor *executor) { executor_ = executor; } + int GetSelectedPort() { return selected_port_; } + const ReceivedMessage Get() { return this->var_recv_queue_.Pop(); } void Push(const std::string &msg_name) { @@ -71,7 +74,8 @@ class AsyncGRPCServer final { void ShutDown(); protected: - void HandleRequest(::grpc::ServerCompletionQueue *cq, std::string cq_name, + void HandleRequest(::grpc::ServerCompletionQueue *cq, + const std::string &cq_name, std::function TryToRegisterNewOne); void TryToRegisterNewSendOne(); void TryToRegisterNewGetOne(); @@ -109,6 +113,7 @@ class AsyncGRPCServer final { int prefetch_blk_id_; framework::ProgramDesc *program_; framework::Executor *executor_; + int selected_port_; }; }; // namespace detail diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc index 1ad62863a1a98c28cb08f47dfa8a5bfae463ba91..b89aed0157de8e95564015b3e7f42316a39537f5 100644 --- a/paddle/fluid/operators/detail/grpc_server_test.cc +++ b/paddle/fluid/operators/detail/grpc_server_test.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include #include -#include +#include // NOLINT #include "gtest/gtest.h" #include "paddle/fluid/operators/detail/grpc_client.h" diff --git a/paddle/fluid/operators/detail/proto_encoder_helper.h b/paddle/fluid/operators/detail/proto_encoder_helper.h index 4a7bfb8bd586fe84c9243bc64117d146c4386674..d91d054b2507f32d1e948dde33da06a70cabe775 100644 --- a/paddle/fluid/operators/detail/proto_encoder_helper.h +++ b/paddle/fluid/operators/detail/proto_encoder_helper.h @@ -19,7 +19,9 @@ limitations under the License. */ #pragma once -#include +#include + +#include "grpc++/grpc++.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { @@ -142,6 +144,6 @@ class ProtoEncodeHelper { char* limit_; // Just for CHECKs }; -} // detail -} // operators -} // paddle +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc index 7e3f015dabdb3fd6190d1ca2f422aa526e8889cd..f8576d01b10f4c0fda4d12d371b2966739acfc21 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ b/paddle/fluid/operators/detail/sendrecvop_utils.cc @@ -13,8 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detail/sendrecvop_utils.h" + #include -#include +#include // NOLINT + #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream.h" #include "paddle/fluid/framework/data_type.h" @@ -42,7 +44,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, void* buf = malloc(1024); void* payload = nullptr; size_t payload_size; - ProtoEncodeHelper e((char*)buf, 1024); + ProtoEncodeHelper e(static_cast(buf), 1024); e.WriteString(VarMsg::kVarnameFieldNumber, name); if (var->IsType()) { e.WriteUint64(VarMsg::kTypeFieldNumber, 0); @@ -152,7 +154,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, framework::proto::VarType_Type_SELECTED_ROWS) { auto* slr = var->GetMutable(); - ProtoEncodeHelper e2((char*)buf, 128); + ProtoEncodeHelper e2(static_cast(buf), 128); // NOTE: rows is of type int64_t size_t rows_memory_size = slr->rows().size() * framework::SizeOfType(typeid(int64_t)); @@ -181,10 +183,10 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, const framework::Scope* scope, - framework::Variable*& var) { + framework::Variable** var) { operators::detail::VariableResponse resp(scope, &ctx); PADDLE_ENFORCE(resp.Parse(msg) == 0, "parse bytebuffer to tensor error!"); - var = resp.GetVar(); + *var = resp.GetVar(); } } // namespace detail diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.h b/paddle/fluid/operators/detail/sendrecvop_utils.h index b3b2b8469c8f19313038f2551ab04708a05656d5..d7954440846b8db9a9add0110fb9a546a762774d 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.h +++ b/paddle/fluid/operators/detail/sendrecvop_utils.h @@ -51,7 +51,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, const framework::Scope* scope, - framework::Variable*& var); + framework::Variable** var); inline std::type_index ToTypeIndex(sendrecv::VariableMessage::Type type) { switch (type) { diff --git a/paddle/fluid/operators/detail/serde_test.cc b/paddle/fluid/operators/detail/serde_test.cc index ea1670e56f3c2fedc2617db1425472e52c6519f5..f8cae6b26acf9d37ca286487065d70ede4c03120 100644 --- a/paddle/fluid/operators/detail/serde_test.cc +++ b/paddle/fluid/operators/detail/serde_test.cc @@ -14,9 +14,9 @@ limitations under the License. */ #include #include -#include +#include // NOLINT -#include +#include "google/protobuf/text_format.h" #include "gtest/gtest.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor_util.h" @@ -107,7 +107,7 @@ void RunSerdeTestSelectedRows(platform::Place place) { for (int i = 0; i < tensor_numel; ++i) { EXPECT_FLOAT_EQ(tensor_data2[i], 32.7); } - for (int i = 0; i < rows2->size(); ++i) { + for (int64_t i = 0; i < rows2->size(); ++i) { EXPECT_EQ(rows_data2[i], i); } EXPECT_EQ(slr2->height(), 1000); diff --git a/paddle/fluid/operators/detail/simple_block_queue.h b/paddle/fluid/operators/detail/simple_block_queue.h index 36b58b0c6700b5af7eaea92d2b0c32adaba35bb8..69773e05df7ed76f31c26f4304693fec2e9aac9c 100644 --- a/paddle/fluid/operators/detail/simple_block_queue.h +++ b/paddle/fluid/operators/detail/simple_block_queue.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include +#include // NOLINT #include -#include +#include // NOLINT namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/variable_response.cc b/paddle/fluid/operators/detail/variable_response.cc index f59c9b50bb36c12c9abc0a52e0d11c6a73217047..78e1d274a92241b5f2093beb63acdc8c497dfb83 100644 --- a/paddle/fluid/operators/detail/variable_response.cc +++ b/paddle/fluid/operators/detail/variable_response.cc @@ -13,7 +13,11 @@ // limitations under the License. #include "paddle/fluid/operators/detail/variable_response.h" -#include + +#include +#include +#include + #include "paddle/fluid/operators/detail/send_recv.pb.h" #include "paddle/fluid/operators/detail/sendrecvop_utils.h" @@ -108,7 +112,8 @@ bool ReadRaw(::google::protobuf::io::CodedInputStream* input, bool VariableResponse::CopyLodTensorData( ::google::protobuf::io::CodedInputStream* input, - const platform::DeviceContext& ctx, framework::DDim& dims, int length) { + const platform::DeviceContext& ctx, const framework::DDim& dims, + int length) { auto var = scope_->FindVar(meta_.varname()); auto* tensor = var->GetMutable(); tensor->Resize(dims); @@ -144,14 +149,15 @@ inline framework::DDim GetDims( bool VariableResponse::CopySelectRowsTensorData( ::google::protobuf::io::CodedInputStream* input, - const platform::DeviceContext& ctx, framework::DDim& dims, int length) { + const platform::DeviceContext& ctx, const framework::DDim& dims, + int length) { auto var = scope_->FindVar(meta_.varname()); auto* slr = var->GetMutable(); slr->set_height(meta_.slr_height()); auto* tensor = slr->mutable_value(); tensor->Resize(dims); PADDLE_ENFORCE_EQ( - tensor->numel(), + static_cast(tensor->numel()), length / framework::SizeOfType( paddle::operators::detail::ToTypeIndex(meta_.data_type()))); void* tensor_data = tensor->mutable_data( diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/detail/variable_response.h index e121ed7bce966d7dea94f71087f2187dcaa17cec..050b6b84010b4f3e95bc88e5bb738ff18b7fe423 100644 --- a/paddle/fluid/operators/detail/variable_response.h +++ b/paddle/fluid/operators/detail/variable_response.h @@ -14,6 +14,8 @@ #pragma once +#include + #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" @@ -60,14 +62,14 @@ class VariableResponse { private: bool CopySelectRowsTensorData(::google::protobuf::io::CodedInputStream* input, const platform::DeviceContext& ctx, - framework::DDim& dims, int length); + const framework::DDim& dims, int length); bool CopySelectRowsData(::google::protobuf::io::CodedInputStream* input, const platform::DeviceContext& ctx, int length); bool CopyLodTensorData(::google::protobuf::io::CodedInputStream* input, const platform::DeviceContext& ctx, - framework::DDim& dims, int length); + const framework::DDim& dims, int length); private: const framework::Scope* scope_; diff --git a/paddle/fluid/operators/fc_mkldnn_op.cc b/paddle/fluid/operators/fc_mkldnn_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..847b7b0c12e1679501dbe83d578b23ca2aef3e9e --- /dev/null +++ b/paddle/fluid/operators/fc_mkldnn_op.cc @@ -0,0 +1,303 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/fc_op.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/mkldnn_helper.h" + +namespace paddle { +namespace operators { + +using paddle::framework::Tensor; +using paddle::platform::MKLDNNDeviceContext; + +template +class MKLDNNMD { + public: + explicit MKLDNNMD(const T* in, const T* w, bool bias) + : in(paddle::framework::vectorize2int(in->dims())), + w(paddle::framework::vectorize2int(w->dims())) { + with_bias_ = bias; + } + + mkldnn::memory::desc dst() const { + return platform::MKLDNNMemDesc({in[0], w[1]}, + mkldnn::memory::data_type::f32, + mkldnn::memory::format::nc); + } + + mkldnn::memory::desc src() const { + return is_spatial() + ? platform::MKLDNNMemDesc({in[0], in[1], in[2], in[3]}, + mkldnn::memory::data_type::f32, + mkldnn::memory::format::nchw) + : platform::MKLDNNMemDesc({in[0], in[1]}, + mkldnn::memory::data_type::f32, + mkldnn::memory::format::nc); + } + + mkldnn::memory::desc weights() const { + return is_spatial() + ? platform::MKLDNNMemDesc({w[1], in[1], in[2], in[3]}, + mkldnn::memory::data_type::f32, + mkldnn::memory::format::oihw) + : platform::MKLDNNMemDesc({w[1], in[1]}, + mkldnn::memory::data_type::f32, + mkldnn::memory::format::oi); + } + + mkldnn::memory::desc bias() const { + return with_bias_ + ? platform::MKLDNNMemDesc({w[1]}, mkldnn::memory::data_type::f32, + mkldnn::memory::format::format_undef) + : platform::MKLDNNMemDesc({}, mkldnn::memory::data_type::f32, + mkldnn::memory::format::format_undef); + } + + private: + bool is_spatial() const { return in.size() > 1 && w.size() > 1; } + + std::vector in; + std::vector w; + bool with_bias_; + bool is_spatial_; +}; + +class MKLDNNMemory { + public: + MKLDNNMemory(MKLDNNMD* t, const mkldnn::engine& e) + : md_(t), engine_(e) {} + virtual ~MKLDNNMemory() = default; + + template + mkldnn::memory dst(const Output* out) { + return mkldnn::memory({md_->dst(), engine_}, + static_cast(const_cast(out))); + } + + template + mkldnn::memory dst(Output* out) { + return mkldnn::memory({md_->dst(), engine_}, out); + } + + template + mkldnn::memory src(const Input* in) { + return mkldnn::memory({md_->src(), engine_}, + static_cast(const_cast(in))); + } + + template + mkldnn::memory weights(const Weight* w) { + return mkldnn::memory({md_->weights(), engine_}, + static_cast(const_cast(w))); + } + + mkldnn::memory bias() { + return mkldnn::memory(mkldnn::memory::primitive_desc(md_->bias(), engine_)); + } + + private: + MKLDNNMD* md_; + const mkldnn::engine& engine_; +}; + +template +class FCMKLDNNOpKernel : public paddle::framework::OpKernel { + void Compute(const paddle::framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), + "It must use CPUPlace."); + + auto& dev_ctx = ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); + + auto input = ctx.Input("Input"); + auto w = ctx.Input("W"); + + PADDLE_ENFORCE(input->dims().size() == 2 || input->dims().size() == 4, + "Input must be with 2 or 4 dimensions, i.e. NCHW"); + PADDLE_ENFORCE(w->dims().size() == 2 || w->dims().size() == 4, + "Weights must be with 2 or 4 dimensions, i.e. OI or OIHW"); + + bool with_bias = ctx.Attr("bias_attr"); + MKLDNNMD md(input, w, with_bias); + + std::shared_ptr pd = + FcFwdPrimitiveDesc(md.src(), md.weights(), md.dst(), md.bias(), + with_bias, mkldnn_engine); + + const std::string key = ctx.op().Output("Out"); + const std::string key_fc_pd = key + "@fc_pd"; + + dev_ctx.SetBlob(key_fc_pd, pd); + + MKLDNNMemory mem(&md, mkldnn_engine); + + const T* input_data = input->data(); + const T* w_data = w->data(); + + auto output = ctx.Output("Out"); + T* output_data = output->mutable_data(ctx.GetPlace()); + + auto dst_memory = mem.dst(output_data); + auto src_memory = mem.src(input_data); + auto weights_memory = mem.weights(w_data); + auto bias_memory = mem.bias(); + + auto forward = with_bias ? mkldnn::inner_product_forward( + *pd, src_memory, weights_memory, bias_memory, + dst_memory) + : mkldnn::inner_product_forward( + *pd, src_memory, weights_memory, dst_memory); + + std::vector pipeline = {forward}; + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + } + + private: + std::unique_ptr + FcFwdPrimitiveDesc(const mkldnn::memory::desc& src, + const mkldnn::memory::desc& weights, + const mkldnn::memory::desc& dst, + const mkldnn::memory::desc& bias, const bool with_bias, + const mkldnn::engine& engine) const { + auto desc = with_bias + ? mkldnn::inner_product_forward::desc( + mkldnn::prop_kind::forward, src, weights, bias, dst) + : mkldnn::inner_product_forward::desc( + mkldnn::prop_kind::forward, src, weights, dst); + + auto pd = new mkldnn::inner_product_forward::primitive_desc(desc, engine); + return std::unique_ptr(pd); + } +}; + +template +class FCMKLDNNGradOpKernel : public paddle::framework::OpKernel { + public: + void Compute(const paddle::framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), + "It must use CPUPlace."); + + auto& dev_ctx = ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); + + T* input_grad_data = nullptr; + T* w_grad_data = nullptr; + + Tensor* input_grad = ctx.Output(framework::GradVarName("Input")); + Tensor* w_grad = ctx.Output(framework::GradVarName("W")); + + if (input_grad) { + input_grad_data = input_grad->mutable_data(ctx.GetPlace()); + } + if (w_grad) { + w_grad_data = w_grad->mutable_data(ctx.GetPlace()); + } + + const Tensor* input = ctx.Input("Input"); + const T* input_data = input->data(); + + const Tensor* w = ctx.Input("W"); + const T* w_data = w->data(); + + const Tensor* out_grad = ctx.Input(framework::GradVarName("Out")); + const T* out_grad_data = out_grad->data(); + + bool with_bias = ctx.Attr("bias_attr"); + + MKLDNNMD md(input, w, with_bias); + MKLDNNMemory mem(&md, mkldnn_engine); + + auto dst_memory = mem.dst(out_grad_data); + auto src_memory = mem.src(input_data); + auto weights_memory = mem.weights(w_data); + auto bias_memory = mem.bias(); + + const std::string key = ctx.op().Input("Out"); + const std::string key_fc_pd = key + "@fc_pd"; + + auto pd = + std::static_pointer_cast( + dev_ctx.GetBlob(key_fc_pd)); + + PADDLE_ENFORCE(pd != nullptr, "Fail to find key_fc_pd in device context"); + + if (w_grad) { + auto weights_grad_memory = mem.weights(w_grad_data); + + mkldnn::inner_product_backward_weights::primitive_desc bwd_weight_pd = + FcBwdWeightsPrimitiveDesc(md.src(), md.weights(), md.dst(), md.bias(), + with_bias, *pd, mkldnn_engine); + + auto bwd_weights_prim = mkldnn::inner_product_backward_weights( + bwd_weight_pd, src_memory, dst_memory, weights_grad_memory, + bias_memory); + + std::vector pipeline{bwd_weights_prim}; + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + } + + if (input_grad) { + auto src_grad_memory = mem.src(input_grad_data); + + mkldnn::inner_product_backward_data::primitive_desc bwd_data_pd = + FcBwdDataPrimitiveDesc(md.src(), md.weights(), md.dst(), *pd, + mkldnn_engine); + + auto bwd_data_prim = mkldnn::inner_product_backward_data( + bwd_data_pd, dst_memory, weights_memory, src_grad_memory); + + std::vector pipeline{bwd_data_prim}; + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + } + } + + private: + mkldnn::inner_product_backward_weights::primitive_desc + FcBwdWeightsPrimitiveDesc( + const mkldnn::memory::desc& src, const mkldnn::memory::desc& diff_weights, + const mkldnn::memory::desc& diff_dst, const mkldnn::memory::desc& bias, + const bool with_bias, + const mkldnn::inner_product_forward::primitive_desc& pd, + const mkldnn::engine& engine) const { + auto bwd_weight_desc = with_bias + ? mkldnn::inner_product_backward_weights::desc( + src, diff_weights, bias, diff_dst) + : mkldnn::inner_product_backward_weights::desc( + src, diff_weights, bias, diff_dst); + + return mkldnn::inner_product_backward_weights::primitive_desc( + bwd_weight_desc, engine, pd); + } + + mkldnn::inner_product_backward_data::primitive_desc FcBwdDataPrimitiveDesc( + const mkldnn::memory::desc& diff_src, const mkldnn::memory::desc& weights, + const mkldnn::memory::desc& diff_dst, + const mkldnn::inner_product_forward::primitive_desc& pd, + const mkldnn::engine& engine) const { + auto bwd_data_desc = + mkldnn::inner_product_backward_data::desc(diff_src, weights, diff_dst); + return mkldnn::inner_product_backward_data::primitive_desc(bwd_data_desc, + engine, pd); + } +}; +} // namespace operators +} // namespace paddle + +REGISTER_OP_KERNEL(fc, MKLDNN, ::paddle::platform::CPUPlace, + paddle::operators::FCMKLDNNOpKernel); + +REGISTER_OP_KERNEL(fc_grad, MKLDNN, ::paddle::platform::CPUPlace, + paddle::operators::FCMKLDNNGradOpKernel); diff --git a/paddle/fluid/operators/fc_op.cc b/paddle/fluid/operators/fc_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..381771f157d78fb04e54f0a07c40e4df2c91441a --- /dev/null +++ b/paddle/fluid/operators/fc_op.cc @@ -0,0 +1,102 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/fc_op.h" +#include + +namespace paddle { +namespace operators { + +void FCOp::InferShape(framework::InferShapeContext* ctx) const { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "X(Input) of Fully Connected should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Out(Output) of Fully Connected should not be null."); + PADDLE_ENFORCE(ctx->HasInput("W"), + "W(Input) of Fully Connected should not be null."); + + auto in_dims = ctx->GetInputDim("Input"); + auto w_dims = ctx->GetInputDim("W"); + std::vector output_shape({in_dims[0], w_dims[1]}); + + PADDLE_ENFORCE(in_dims.size() == 2 || in_dims.size() == 4, + "Fully Connected input should be 2-D or 4-D tensor."); + + PADDLE_ENFORCE(w_dims.size() == 2 || w_dims.size() == 4, + "Fully Connected input should be 2-D or 4-D tensor."); + + ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); + ctx->ShareLoD("Input", "Out"); +} + +framework::OpKernelType FCOp::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + framework::LibraryType library{framework::LibraryType::kMKLDNN}; + framework::DataLayout layout{framework::DataLayout::kAnyLayout}; + + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), + layout, library); +} + +void FCOpGrad::InferShape(framework::InferShapeContext* ctx) const { + auto in_dims = ctx->GetInputDim("Input"); + auto w_dims = ctx->GetInputDim("W"); + + if (ctx->HasOutput(framework::GradVarName("Input"))) { + ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); + } + if (ctx->HasOutput(framework::GradVarName("W"))) { + ctx->SetOutputDim(framework::GradVarName("W"), w_dims); + } +} + +framework::OpKernelType FCOpGrad::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + framework::LibraryType library{framework::LibraryType::kMKLDNN}; + framework::DataLayout layout{framework::DataLayout::kAnyLayout}; + + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), + layout, library); +} + +FCOpMaker::FCOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Input", "(Tensor) The input tensor of fully connected operator. "); + AddInput("W", "(Tensor), The second input tensor of fc op."); + AddOutput("Out", "(Tensor) The output tensor of fully connected operator. "); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); + AddAttr("bias_attr", "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); + AddComment(R"DOC( + Fully Connected Operator. + + The fully connected operation calculates the output based on the input, weights and bias attribute. + The size of each dimension of the parameters checked in the infer-shape. + The matrix of bias is generated by the mkldnn framework, when the bias_attr is True. + Additional parametrs are use_mkldnn and bias_attr. + The input(X) size and output(Out) size may be diffrent. + + The fully connected layer only supports MKLDNN version +)DOC"); +} + +} // namespace operators +} // namespace paddle + +REGISTER_OP(fc, paddle::operators::FCOp, paddle::operators::FCOpMaker, fc_grad, + paddle::operators::FCOpGrad); diff --git a/paddle/fluid/operators/fc_op.h b/paddle/fluid/operators/fc_op.h new file mode 100644 index 0000000000000000000000000000000000000000..70fa96440d344397a7427c1338afee85bde923d4 --- /dev/null +++ b/paddle/fluid/operators/fc_op.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +class FCOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; +}; + +class FCOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; +}; + +class FCOpMaker : public framework::OpProtoAndCheckerMaker { + public: + FCOpMaker(OpProto* proto, OpAttrChecker* op_checker); +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index b19add24e2bd325896a96be53d3d9762abfe217c..9188f2d989e601b7a97dedaf71f7080829cdb7c3 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -12,20 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include +#include -#include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/threadpool.h" -#include "paddle/fluid/operators/detail/grpc_server.h" +#include "paddle/fluid/operators/listen_and_serv_op.h" namespace paddle { namespace operators { -constexpr char kOptimizeBlock[] = "OptimizeBlock"; - void RunServer(std::shared_ptr service) { service->RunSyncUpdate(); VLOG(4) << "RunServer thread end"; @@ -45,152 +39,159 @@ static void CreateTensorFromMessageType(framework::Variable *var, } } -static void ParallelExecuteBlocks(const std::vector ¶llel_blkids, - framework::Executor *executor, - framework::ProgramDesc *program, - framework::Scope *scope) { +static void ParallelExecuteBlocks( + const std::vector ¶llel_blkids, framework::Executor *executor, + const std::vector> + &prepared, + framework::ProgramDesc *program, framework::Scope *scope) { std::vector> fs; for (size_t idx : parallel_blkids) { - fs.push_back(framework::Async([&executor, &program, &scope, idx]() { - int run_block = idx; // thread local - try { - executor->Run(*program, scope, run_block, false, false); - } catch (std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); - } - })); + fs.push_back( + framework::Async([&executor, &prepared, &program, &scope, idx]() { + int run_block = idx; // thread local + try { + executor->RunPreparedContext(prepared[run_block].get(), scope, + false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + })); } for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); } -class ListenAndServOp : public framework::OperatorBase { - public: - ListenAndServOp(const std::string &type, - const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorBase(type, inputs, outputs, attrs) { - if (!rpc_service_) { - std::string endpoint = Attr("endpoint"); - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); - server_thread_.reset(new std::thread(RunServer, rpc_service_)); - } - } +ListenAndServOp::ListenAndServOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + +int ListenAndServOp::GetSelectedPort() { + return rpc_service_->GetSelectedPort(); +} + +void ListenAndServOp::Stop() { + rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); + server_thread_->join(); +} + +void ListenAndServOp::RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const { + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(dev_place); + framework::Scope &recv_scope = scope.NewScope(); - void Stop() override { - rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); - server_thread_->join(); + if (!rpc_service_) { + std::string endpoint = Attr("endpoint"); + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); } - void RunImpl(const framework::Scope &scope, - const platform::Place &dev_place) const override { - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(dev_place); - framework::Scope &recv_scope = scope.NewScope(); - - // FIXME(Yancey1989): initialize rpc server with lazy mode. - rpc_service_->SetScope(&recv_scope); - rpc_service_->SetDevCtx(&dev_ctx); - auto ins = Inputs("X"); - auto fan_in = Attr("Fanin"); - - auto *block = Attr(kOptimizeBlock); - auto *program = block->Program(); - int num_blocks = program->Size(); - PADDLE_ENFORCE_GE(num_blocks, 2, - "server program should have at least 2 blocks"); - - framework::Executor executor(dev_place); - - // TODO(qiao) set proper fields for table lookup and update - rpc_service_->SetExecutor(&executor); - rpc_service_->SetPrefetchBlkdId(0); - rpc_service_->SetProgram(program); - - // TODO(typhoonzero): change this to a while_op for every cluster-batch. - bool exit_flag = false; - // Record received sparse variables, so that - // we could reset those after execute optimize program - std::vector sparse_vars; - while (!exit_flag) { - // Get from multiple trainers, we don't care about the order in which - // the gradients arrives, just add suffix 0~n and merge the gradient. - rpc_service_->SetCond(0); - size_t recv_var_cnt = 0; - int batch_barrier = 0; - while (batch_barrier != fan_in) { - const detail::ReceivedMessage v = rpc_service_->Get(); - auto recv_var_name = v.first; - if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { - LOG(INFO) << "received terminate message and exit"; - exit_flag = true; - break; - } else if (recv_var_name == BATCH_BARRIER_MESSAGE) { - VLOG(3) << "recv batch barrier message"; - batch_barrier++; - continue; - } else { - VLOG(3) << "received grad: " << recv_var_name; - recv_var_cnt++; - auto var = v.second->GetVar(); - if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << recv_var_name; - PADDLE_THROW("Can not find server side var"); - } - if (var->IsType()) { - sparse_vars.push_back(var); - } - } - } - if (exit_flag) { - rpc_service_->SetCond(1); - rpc_service_->ShutDown(); + auto ins = Inputs("X"); + auto fan_in = Attr("Fanin"); + auto *block = Attr(kOptimizeBlock); + auto *program = block->Program(); + size_t num_blocks = program->Size(); + PADDLE_ENFORCE_GE(num_blocks, 2, + "server program should have at least 2 blocks"); + + framework::Executor executor(dev_place); + std::vector block_list; + for (size_t blkid = 1; blkid < num_blocks; ++blkid) { + block_list.push_back(blkid); + } + auto prepared = executor.Prepare(*program, block_list); + // Insert placeholder for block0 which holds current op itself. + prepared.insert(prepared.begin(), + std::shared_ptr(nullptr)); + + rpc_service_->SetScope(&recv_scope); + rpc_service_->SetDevCtx(&dev_ctx); + // TODO(qiao) set proper fields for table lookup and update + rpc_service_->SetExecutor(&executor); + rpc_service_->SetPrefetchBlkdId(0); + rpc_service_->SetProgram(program); + // start the server listening after all member initialized. + server_thread_.reset(new std::thread(RunServer, rpc_service_)); + // FIXME(typhoonzero): do we need to wait until the server port is ready? + sleep(5); + + // TODO(typhoonzero): change this to a while_op for every cluster-batch. + bool exit_flag = false; + // Record received sparse variables, so that + // we could reset those after execute optimize program + std::vector sparse_vars; + while (!exit_flag) { + // Get from multiple trainers, we don't care about the order in which + // the gradients arrives, just add suffix 0~n and merge the gradient. + rpc_service_->SetCond(0); + size_t recv_var_cnt = 0; + int batch_barrier = 0; + while (batch_barrier != fan_in) { + const detail::ReceivedMessage v = rpc_service_->Get(); + auto recv_var_name = v.first; + if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { + LOG(INFO) << "received terminate message and exit"; + exit_flag = true; break; - } - - // NOTE: if is_gpu_place, CUDA kernels are laugched by multiple threads - // and this will still work. - - // The optimize blocks which have the same parent ID would run parallel - // TODO(Yancey1989): need to use ParallelExecutor for future - size_t last_parent_blkid = program->Block(1).Parent(); - std::vector parallel_blkids; - parallel_blkids.push_back(1); - double ts = detail::GetTimestamp(); - for (size_t blkid = 2; blkid < num_blocks; ++blkid) { - if (program->Block(blkid).Parent() != last_parent_blkid) { - for (size_t idx : parallel_blkids) VLOG(3) << idx; - ParallelExecuteBlocks(parallel_blkids, &executor, program, - &recv_scope); - parallel_blkids.clear(); - last_parent_blkid = program->Block(blkid).Parent(); + } else if (recv_var_name == BATCH_BARRIER_MESSAGE) { + VLOG(3) << "recv batch barrier message"; + batch_barrier++; + continue; + } else { + VLOG(3) << "received grad: " << recv_var_name; + recv_var_cnt++; + auto var = v.second->GetVar(); + if (var == nullptr) { + LOG(ERROR) << "Can not find server side var: " << recv_var_name; + PADDLE_THROW("Can not find server side var"); + } + if (var->IsType()) { + sparse_vars.push_back(var); } - parallel_blkids.push_back(blkid); - } - ParallelExecuteBlocks(parallel_blkids, &executor, program, &recv_scope); - - VLOG(3) << "run all blocks spent " << detail::GetTimestamp() - ts - << "(ms)"; - - // Reset the received sparse variables, the sum operator would not - // sum the input sparse variables which rows is empty at the next - // mini-batch. - // TODO(Yancey1989): move the reset action into an operator, we couldn't - // have any hide logic in the operator. - for (auto &var : sparse_vars) { - var->GetMutable()->mutable_rows()->clear(); } + } + if (exit_flag) { rpc_service_->SetCond(1); - // FIXME(typhoonzero): use another condition to sync wait clients get. - rpc_service_->WaitClientGet(fan_in); - sparse_vars.clear(); - } // while(true) - } + rpc_service_->ShutDown(); + break; + } - protected: - std::shared_ptr rpc_service_; - std::shared_ptr server_thread_; -}; + // NOTE: if is_gpu_place, CUDA kernels are laugched by multiple threads + // and this will still work. + + // The optimize blocks which have the same parent ID would run parallel + // TODO(Yancey1989): need to use ParallelExecutor for future + int32_t last_parent_blkid = program->Block(1).Parent(); + std::vector parallel_blkids; + parallel_blkids.push_back(1); + double ts = detail::GetTimestamp(); + for (size_t blkid = 2; blkid < num_blocks; ++blkid) { + if (program->Block(blkid).Parent() != last_parent_blkid) { + ParallelExecuteBlocks(parallel_blkids, &executor, prepared, program, + &recv_scope); + parallel_blkids.clear(); + last_parent_blkid = program->Block(blkid).Parent(); + } + parallel_blkids.push_back(blkid); + } + ParallelExecuteBlocks(parallel_blkids, &executor, prepared, program, + &recv_scope); + VLOG(2) << "run all blocks spent " << detail::GetTimestamp() - ts << "(ms)"; + + // Reset the received sparse variables, the sum operator would not + // sum the input sparse variables which rows is empty at the next + // mini-batch. + // TODO(Yancey1989): move the reset action into an operator, we couldn't + // have any hide logic in the operator. + for (auto &var : sparse_vars) { + var->GetMutable()->mutable_rows()->clear(); + } + rpc_service_->SetCond(1); + // FIXME(typhoonzero): use another condition to sync wait clients get. + rpc_service_->WaitClientGet(fan_in); + sparse_vars.clear(); + } // while(true) +} class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { public: diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h new file mode 100644 index 0000000000000000000000000000000000000000..0da87afc961e896f04b4f0028bf9b17d5e992548 --- /dev/null +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/threadpool.h" +#include "paddle/fluid/operators/detail/grpc_server.h" + +namespace paddle { +namespace operators { + +constexpr char kOptimizeBlock[] = "OptimizeBlock"; + +void RunServer(std::shared_ptr service); + +class ListenAndServOp : public framework::OperatorBase { + public: + ListenAndServOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs); + + int GetSelectedPort(); + + void Stop() override; + + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override; + + protected: + mutable std::shared_ptr rpc_service_; + mutable std::shared_ptr server_thread_; +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index deabcdc99f819851b2df9bb0c7b05a5b339568f3..bf33be310686640fa187a07cf46a157b7f433340 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -18,22 +18,6 @@ limitations under the License. */ namespace paddle { namespace operators { -static inline framework::OpKernelType ExpectedKernelType( - const framework::ExecutionContext& ctx) { - auto* table_var = ctx.InputVar("W"); - if (table_var->IsType()) { - return framework::OpKernelType( - framework::ToDataType(table_var->Get().type()), - ctx.device_context()); - } else if (table_var->IsType()) { - return framework::OpKernelType( - framework::ToDataType(table_var->Get().value().type()), - ctx.device_context()); - } else { - PADDLE_THROW("W should be LoDTensor or SelectedRows"); - } -} - class LookupTableOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -67,7 +51,8 @@ class LookupTableOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return ExpectedKernelType(ctx); + auto data_type = framework::GetDataTypeOfVar(ctx.InputVar("W")); + return framework::OpKernelType(data_type, ctx.device_context()); } }; @@ -138,7 +123,8 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return ExpectedKernelType(ctx); + auto data_type = framework::GetDataTypeOfVar(ctx.InputVar("W")); + return framework::OpKernelType(data_type, ctx.device_context()); } }; diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index fff5edda62d4b115605a4cab35ed5457b4db5f21..cb088c267bcc028ff11583cd73de5ca1722a9b69 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -30,13 +30,7 @@ using LoDTensor = framework::LoDTensor; using SelectedRows = framework::SelectedRows; using DDim = framework::DDim; -static constexpr int64_t kNoPadding = -1; - -inline size_t getIndex(const std::vector &rows, int64_t value) { - auto it = std::find(rows.begin(), rows.end(), value); - PADDLE_ENFORCE(it != rows.end(), "id should be in rows"); - return static_cast(std::distance(rows.begin(), it)); -} +constexpr int64_t kNoPadding = -1; template class LookupTableKernel : public framework::OpKernel { @@ -55,7 +49,9 @@ class LookupTableKernel : public framework::OpKernel { auto *table_t = context.Input("W"); table_dim = table_t->value().dims(); } else { - PADDLE_THROW("table only support LoDTensor and SelectedRows"); + PADDLE_THROW( + "The parameter W of a LookupTable " + "must be either LoDTensor or SelectedRows"); } int64_t *ids; @@ -107,7 +103,7 @@ class LookupTableKernel : public framework::OpKernel { memset(output + i * row_width, 0, row_width * sizeof(T)); } else { PADDLE_ENFORCE_GE(ids[i], 0); - auto id_index = getIndex(table_t.rows(), ids[i]); + auto id_index = table_t.index(ids[i]); memcpy(output + i * row_width, table + id_index * row_width, row_width * sizeof(T)); } @@ -128,7 +124,9 @@ class LookupTableGradKernel : public framework::OpKernel { auto *table_t = context.Input("W"); table_dim = table_t->value().dims(); } else { - PADDLE_THROW("table only support LoDTensor and SelectedRows"); + PADDLE_THROW( + "The parameter W of a LookupTable " + "must be either LoDTensor or SelectedRows"); } bool is_sparse = context.Attr("is_sparse"); diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 299a0aed01dfe0448d896738d9fd33319b1b2887..44fd739fb1d161c6c7d6ab1cc611c59220280a4e 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -322,6 +322,14 @@ void set_constant_with_place( TensorSetConstantCPU(tensor, value)); } +template <> +void set_constant_with_place( + const platform::DeviceContext& context, framework::Tensor* tensor, + float value) { + framework::VisitDataType(framework::ToDataType(tensor->type()), + TensorSetConstantCPU(tensor, value)); +} + struct TensorSetConstantWithPlace : public boost::static_visitor { TensorSetConstantWithPlace(const platform::DeviceContext& context, framework::Tensor* tensor, float value) diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index 1e909db5288afccb9dd0be08a45cf3c27048ae6f..82e12943148a806bae719c722944d6a9d5236b7c 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -39,18 +39,33 @@ void gemm( cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; - const half h_alpha = static_cast(alpha); - const half h_beta = static_cast(beta); - const half* h_A = reinterpret_cast(A); - const half* h_B = reinterpret_cast(B); - half* h_C = reinterpret_cast(C); + float h_alpha = static_cast(alpha); + float h_beta = static_cast(beta); // TODO(kexinzhao): add processing code for compute capability < 53 case PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53, - "cublas Hgemm requires GPU compute capability >= 53"); - PADDLE_ENFORCE(platform::dynload::cublasHgemm( - context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb, - h_A, lda, &h_beta, h_C, N)); + "cublas fp16 gemm requires GPU compute capability >= 53"); + + cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT; +#if CUDA_VERSION >= 9000 + if (context.GetComputeCapability() >= 70) { + PADDLE_ENFORCE(platform::dynload::cublasSetMathMode(context.cublas_handle(), + CUBLAS_TENSOR_OP_MATH)); + algo = CUBLAS_GEMM_DFALT_TENSOR_OP; + } else { + PADDLE_ENFORCE(platform::dynload::cublasSetMathMode(context.cublas_handle(), + CUBLAS_DEFAULT_MATH)); + } +#endif + + // cublasHgemm does true FP16 computation which is slow for non-Volta + // GPUs. So use cublasGemmEx instead which does pesudo FP16 computation: + // input/output in fp16, computation in fp32, which can also be accelerated + // using tensor cores in volta GPUs. + PADDLE_ENFORCE(platform::dynload::cublasGemmEx( + context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, B, + CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &h_beta, C, CUDA_R_16F, N, + CUDA_R_32F, algo)); } template <> diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index 5518ebed3f792a5acdfbb27976bc2c6dbd78069a..a579182ec1bd5d10d95bbf8c6f5a0e70ceaaaf4b 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -14,6 +14,8 @@ limitations under the License. */ #define EIGEN_USE_GPU +#include + #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/softmax.h" #include "paddle/fluid/operators/math/softmax_impl.h" @@ -95,6 +97,7 @@ template class SoftmaxCUDNNFunctor; template class SoftmaxGradCUDNNFunctor; template class SoftmaxGradCUDNNFunctor; +template class SoftmaxFunctor; template class SoftmaxFunctor; template class SoftmaxFunctor; template class SoftmaxGradFunctor; diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index 3e123f7bf5512618538fd35aa7e74b82586a5448..dd9971ba091cc3ece86654f65c335b98087f45ed 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -27,7 +27,7 @@ using EigenMatrix = framework::EigenMatrix; template struct ValueClip { HOSTDEVICE T operator()(const T& x) const { - const T kThreshold = -64.; + const T kThreshold = static_cast(-64.); return x < kThreshold ? kThreshold : x; } }; diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc index a31d64e899df33f16f707e96d7ff7b85eca8d6ea..20b8a5c98ab16ac8121cb2fd01deb8ecc1966d44 100644 --- a/paddle/fluid/operators/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl_op_test.cu.cc @@ -15,8 +15,8 @@ limitations under the License. */ #include #include #include -#include -#include +#include // NOLINT +#include // NOLINT #include #include "paddle/fluid/framework/init.h" @@ -43,7 +43,7 @@ const f::DDim kDims = {20, 20}; // nccl op common tester, init communicator. class NCCLTester : public ::testing::Test { public: - virtual void SetUp() override { + void SetUp() override { int count = p::GetCUDADeviceCount(); if (count <= 1) { LOG(WARNING) @@ -64,7 +64,7 @@ class NCCLTester : public ::testing::Test { NCCLInitOp(); } - virtual void TearDown() override { + void TearDown() override { for (auto &device_context : dev_ctxs_) { delete device_context; } diff --git a/paddle/fluid/operators/prior_box_op.cc b/paddle/fluid/operators/prior_box_op.cc index c22a55bce263423d5c17fffdb06b7ece02ae26da..82e54139c8c1f42b1d8f74811a6793ec5c66473e 100644 --- a/paddle/fluid/operators/prior_box_op.cc +++ b/paddle/fluid/operators/prior_box_op.cc @@ -73,7 +73,7 @@ class PriorBoxOp : public framework::OperatorWithKernel { const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Input")->type()), - platform::CPUPlace()); + ctx.device_context()); } }; @@ -171,6 +171,5 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(prior_box, ops::PriorBoxOp, ops::PriorBoxOpMaker, paddle::framework::EmptyGradOpMaker); -REGISTER_OP_CPU_KERNEL( - prior_box, ops::PriorBoxOpKernel, - ops::PriorBoxOpKernel); +REGISTER_OP_CPU_KERNEL(prior_box, ops::PriorBoxOpKernel, + ops::PriorBoxOpKernel); diff --git a/paddle/fluid/operators/prior_box_op.cu b/paddle/fluid/operators/prior_box_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..76bf2b3b7de7a24c80e927c16199f89c5b7fb794 --- /dev/null +++ b/paddle/fluid/operators/prior_box_op.cu @@ -0,0 +1,167 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/prior_box_op.h" + +namespace paddle { +namespace operators { + +template +__device__ inline T clip(T in) { + return min(max(in, 0.), 1.); +} + +template +__global__ void GenPriorBox(T* out, const T* aspect_ratios, const int height, + const int width, const int im_height, + const int im_width, const int as_num, + const T offset, const T step_width, + const T step_height, const T* min_sizes, + const T* max_sizes, const int min_num, + bool is_clip) { + int num_priors = max_sizes ? as_num * min_num + min_num : as_num * min_num; + int box_num = height * width * num_priors; + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < box_num; + i += blockDim.x * gridDim.x) { + int h = i / (num_priors * width); + int w = (i / num_priors) % width; + int p = i % num_priors; + int m = max_sizes ? p / (as_num + 1) : p / as_num; + T cx = (w + offset) * step_width; + T cy = (h + offset) * step_height; + T bw, bh; + T min_size = min_sizes[m]; + if (max_sizes) { + int s = p % (as_num + 1); + if (s < as_num) { + T ar = aspect_ratios[s]; + bw = min_size * sqrt(ar) / 2.; + bh = min_size / sqrt(ar) / 2.; + } else { + T max_size = max_sizes[m]; + bw = sqrt(min_size * max_size) / 2.; + bh = bw; + } + } else { + int s = p % as_num; + T ar = aspect_ratios[s]; + bw = min_size * sqrt(ar) / 2.; + bh = min_size / sqrt(ar) / 2.; + } + T xmin = (cx - bw) / im_width; + T ymin = (cy - bh) / im_height; + T xmax = (cx + bw) / im_width; + T ymax = (cy + bh) / im_height; + out[i * 4] = is_clip ? clip(xmin) : xmin; + out[i * 4 + 1] = is_clip ? clip(ymin) : ymin; + out[i * 4 + 2] = is_clip ? clip(xmax) : xmax; + out[i * 4 + 3] = is_clip ? clip(ymax) : ymax; + } +} + +template +__global__ void SetVariance(T* out, const T* var, const int vnum, + const int num) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; + i += blockDim.x * gridDim.x) { + out[i] = var[i % vnum]; + } +} + +template +class PriorBoxOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("Input"); + auto* image = ctx.Input("Image"); + auto* boxes = ctx.Output("Boxes"); + auto* vars = ctx.Output("Variances"); + + auto min_sizes = ctx.Attr>("min_sizes"); + auto max_sizes = ctx.Attr>("max_sizes"); + auto input_aspect_ratio = ctx.Attr>("aspect_ratios"); + auto variances = ctx.Attr>("variances"); + auto flip = ctx.Attr("flip"); + auto clip = ctx.Attr("clip"); + + std::vector aspect_ratios; + ExpandAspectRatios(input_aspect_ratio, flip, aspect_ratios); + + T step_w = static_cast(ctx.Attr("step_w")); + T step_h = static_cast(ctx.Attr("step_h")); + T offset = static_cast(ctx.Attr("offset")); + + auto im_width = image->dims()[3]; + auto im_height = image->dims()[2]; + + auto width = input->dims()[3]; + auto height = input->dims()[2]; + + T step_width, step_height; + if (step_w == 0 || step_h == 0) { + step_width = static_cast(im_width) / width; + step_height = static_cast(im_height) / height; + } else { + step_width = step_w; + step_height = step_h; + } + + int num_priors = aspect_ratios.size() * min_sizes.size(); + if (max_sizes.size() > 0) { + num_priors += max_sizes.size(); + } + int min_num = static_cast(min_sizes.size()); + int box_num = width * height * num_priors; + + int block = 512; + int grid = (box_num + block - 1) / block; + + auto stream = + ctx.template device_context().stream(); + + boxes->mutable_data(ctx.GetPlace()); + vars->mutable_data(ctx.GetPlace()); + + framework::Tensor r; + framework::TensorFromVector(aspect_ratios, ctx.device_context(), &r); + + framework::Tensor min; + framework::TensorFromVector(min_sizes, ctx.device_context(), &min); + + T* max_data = nullptr; + framework::Tensor max; + if (max_sizes.size() > 0) { + framework::TensorFromVector(max_sizes, ctx.device_context(), &max); + max_data = max.data(); + } + + GenPriorBox<<>>( + boxes->data(), r.data(), height, width, im_height, im_width, + aspect_ratios.size(), offset, step_width, step_height, min.data(), + max_data, min_num, clip); + + framework::Tensor v; + framework::TensorFromVector(variances, ctx.device_context(), &v); + grid = (box_num * 4 + block - 1) / block; + SetVariance<<>>(vars->data(), v.data(), + variances.size(), box_num * 4); + } +}; // namespace operators + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(prior_box, ops::PriorBoxOpCUDAKernel, + ops::PriorBoxOpCUDAKernel); diff --git a/paddle/fluid/operators/prior_box_op.h b/paddle/fluid/operators/prior_box_op.h index 18bb2deb6b5acf626dfb2883a5771d9d195d45c0..1e4a12aac1c5f1c3b7e2e1bc83170de9ad590fc3 100644 --- a/paddle/fluid/operators/prior_box_op.h +++ b/paddle/fluid/operators/prior_box_op.h @@ -51,7 +51,7 @@ struct ClipFunctor { } }; -template +template class PriorBoxOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -106,49 +106,24 @@ class PriorBoxOpKernel : public framework::OpKernel { int idx = 0; for (size_t s = 0; s < min_sizes.size(); ++s) { auto min_size = min_sizes[s]; - // first prior: aspect_ratio = 1, size = min_size - box_width = box_height = min_size / 2.; - // xmin - e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; - // ymin - e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; - // xmax - e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; - // ymax - e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; - - idx++; - if (max_sizes.size() > 0) { - auto max_size = max_sizes[s]; - // second prior: aspect_ratio = 1, - // size = sqrt(min_size * max_size) - box_width = box_height = sqrt(min_size * max_size) / 2.; - // xmin + // priors with different aspect ratios + for (size_t r = 0; r < aspect_ratios.size(); ++r) { + float ar = aspect_ratios[r]; + box_width = min_size * sqrt(ar) / 2.; + box_height = min_size / sqrt(ar) / 2.; e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; - // ymin e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; - // xmax e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; - // ymax e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; idx++; } - - // rest of priors - for (size_t r = 0; r < aspect_ratios.size(); ++r) { - float ar = aspect_ratios[r]; - if (fabs(ar - 1.) < 1e-6) { - continue; - } - box_width = min_size * sqrt(ar) / 2.; - box_height = min_size / sqrt(ar) / 2.; - // xmin + if (max_sizes.size() > 0) { + auto max_size = max_sizes[s]; + // square prior with size sqrt(minSize * maxSize) + box_width = box_height = sqrt(min_size * max_size) / 2.; e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; - // ymin e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; - // xmax e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; - // ymax e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; idx++; } diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index f9a8058f2a32b6736d6513b017b761a31ddc2e37..96c0c1cbe6d588364416925a7ab1bc8f90ac6fd7 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -12,7 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include +#include // NOLINT + #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index b6ac7b21d56f7760b3f4814581c90b0ff2cc4a6a..eacedeea8835d27b712b287824b9d30b03ebebbf 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -21,6 +21,22 @@ namespace reader { class MultipleReader : public framework::ReaderBase { public: + class ThreadBufferMap { + public: + std::vector& operator[]( + const std::thread::id& thread_id) { + std::lock_guard lock(mutex_); + return buffer_[thread_id]; + } + + void Clear() { buffer_.clear(); } + + private: + std::mutex mutex_; + std::unordered_map> + buffer_; + }; + MultipleReader(const std::vector& file_names, const std::vector& dims, size_t thread_num) : file_names_(file_names), dims_(dims) { @@ -47,28 +63,27 @@ class MultipleReader : public framework::ReaderBase { framework::Channel* waiting_file_idx_; framework::Channel* available_thread_idx_; framework::Channel>* buffer_; - mutable std::vector local_buffer_; + mutable ThreadBufferMap thread_buffer_map_; }; void MultipleReader::ReadNext(std::vector* out) { if (!HasNext()) { PADDLE_THROW("There is no next data!"); } - - if (local_buffer_.empty()) { - buffer_->Receive(&local_buffer_); - } - *out = local_buffer_; - local_buffer_.clear(); + auto& thread_local_buffer = thread_buffer_map_[std::this_thread::get_id()]; + *out = thread_local_buffer; + thread_local_buffer.clear(); } bool MultipleReader::HasNext() const { - return local_buffer_.empty() ? buffer_->Receive(&local_buffer_) : true; + auto& thread_local_buffer = thread_buffer_map_[std::this_thread::get_id()]; + return thread_local_buffer.empty() ? buffer_->Receive(&thread_local_buffer) + : true; } void MultipleReader::ReInit() { EndScheduler(); - local_buffer_.clear(); + thread_buffer_map_.Clear(); StartNewScheduler(); } @@ -176,7 +191,7 @@ class OpenFilesOp : public framework::OperatorBase { const auto& ranks = Attr>("ranks"); PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), - int(shape_concat.size()), + static_cast(shape_concat.size()), "The accumulate of all ranks should be equal to the " "shape concat's length."); const auto& file_names = Attr>("file_names"); diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index b87b8e6b26cdeb017e700870998a53c1b295988c..93f9c74b809770136d3d3300e0e0700b1bc0459e 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -14,6 +14,9 @@ limitations under the License. */ #include "paddle/fluid/operators/reshape_op.h" +#include +#include + namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index 871b4d38d56f10f3c0c178caa566508ab75f316c..807e5ad951b893a4c027a96d743f0606b70cf160 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -14,6 +14,9 @@ limitations under the License. */ #pragma once +#include +#include + #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 04392b3e05fa2d8b602946ba03672bf2491dcfbc..542bc3fde2a3616807eea560be85fb42026d5825 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -20,6 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/operators/listen_and_serv_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/string/printf.h" @@ -34,6 +35,7 @@ namespace m = paddle::operators::math; // global for simplicity. std::unique_ptr listen_and_serv_op; +int selected_port; void InitTensorsInScope(f::Scope &scope, p::CPUPlace &place) { p::CPUDeviceContext ctx(place); @@ -128,14 +130,16 @@ void StartServerNet(bool is_sparse) { AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block); f::AttributeMap attrs; - attrs.insert({"endpoint", std::string("127.0.0.1:6174")}); + attrs.insert({"endpoint", std::string("127.0.0.1:0")}); attrs.insert({"Fanin", 1}); attrs.insert({"ParamList", std::vector({"Out"})}); attrs.insert({"GradList", std::vector({"x1"})}); attrs.insert({"OptimizeBlock", optimize_block}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); + LOG(INFO) << "selected port before run " << selected_port; listen_and_serv_op->Run(scope, place); + LOG(INFO) << "server exit"; } TEST(SendRecvOp, CPUDense) { @@ -149,12 +153,19 @@ TEST(SendRecvOp, CPUDense) { scope.Var("RPC_CLIENT_VAR"); f::AttributeMap attrs; - attrs.insert({"endpoints", std::vector({"127.0.0.1:6174"})}); - attrs.insert({"epmap", std::vector({"127.0.0.1:6174"})}); + selected_port = static_cast( + listen_and_serv_op.get()) + ->GetSelectedPort(); + LOG(INFO) << "selected port " << selected_port; + std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); + attrs.insert({"endpoints", std::vector({endpoint})}); + attrs.insert({"epmap", std::vector({endpoint})}); auto send_op = f::OpRegistry::CreateOp( "send", {{"X", {"x1"}}}, {{"Out", {"Out"}}, {"RPCClient", {"RPC_CLIENT_VAR"}}}, attrs); + LOG(INFO) << "before run " << endpoint; send_op->Run(scope, place); + LOG(INFO) << "end run"; auto in_var = scope.Var("x1"); auto tensor = in_var->GetMutable(); @@ -167,6 +178,7 @@ TEST(SendRecvOp, CPUDense) { for (int64_t i = 0; i < target->numel(); ++i) { EXPECT_EQ(expected[i] * 2, actual[i]); } + LOG(INFO) << "before stop"; listen_and_serv_op->Stop(); server_thread.join(); listen_and_serv_op.reset(nullptr); @@ -182,8 +194,13 @@ TEST(SendRecvOp, CPUSparse) { InitSelectedRowsInScope(scope, place); scope.Var("RPC_CLIENT_VAR"); f::AttributeMap attrs; - attrs.insert({"endpoints", std::vector({"127.0.0.1:6174"})}); - attrs.insert({"epmap", std::vector({"127.0.0.1:6174"})}); + selected_port = static_cast( + listen_and_serv_op.get()) + ->GetSelectedPort(); + LOG(INFO) << "selected port " << selected_port; + std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); + attrs.insert({"endpoints", std::vector({endpoint})}); + attrs.insert({"epmap", std::vector({endpoint})}); auto send_op = f::OpRegistry::CreateOp( "send", {{"X", {"x1"}}}, {{"Out", {"Out"}}, {"RPCClient", {"RPC_CLIENT_VAR"}}}, attrs); diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc index d0aa2f9cbadaadf4e7e625628d9db5677d50d277..074fa9e00f2ec531f324ff10113d95144687d500 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/sgd_op.cc @@ -43,9 +43,8 @@ class SGDOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("Param")->type()), - ctx.GetPlace()); + auto data_type = framework::GetDataTypeOfVar(ctx.InputVar("Param")); + return framework::OpKernelType(data_type, ctx.device_context()); } }; @@ -53,10 +52,12 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: SGDOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Param", "(Tensor) Input parameter"); + AddInput("Param", "(Tensor or SelectedRows) Input parameter"); AddInput("LearningRate", "(Tensor) Learning rate of SGD"); - AddInput("Grad", "(Tensor) Input gradient"); - AddOutput("ParamOut", "(Tensor) Output parameter"); + AddInput("Grad", "(Tensor or SelectedRows) Input gradient"); + AddOutput("ParamOut", + "(Tensor or SelectedRows, same with Param) " + "Output parameter, should share the same memory with Param"); AddComment(R"DOC( SGD operator diff --git a/paddle/fluid/operators/sgd_op.h b/paddle/fluid/operators/sgd_op.h index 0ad801079400f1830d85a945e57a434a86adeb00..8d2bdf75903b4958e14605781f65c5a214cb5300 100644 --- a/paddle/fluid/operators/sgd_op.h +++ b/paddle/fluid/operators/sgd_op.h @@ -23,60 +23,97 @@ namespace operators { template class SGDOpKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto* param = ctx.Input("Param"); - auto* param_out = ctx.Output("ParamOut"); - auto* learning_rate = ctx.Input("LearningRate"); - - auto* grad_var = ctx.InputVar("Grad"); - // Actually, all tensors are LoDTensor except SelectedRows. - if (grad_var->IsType()) { - param_out->mutable_data(ctx.GetPlace()); - auto* grad = ctx.Input("Grad"); - - auto p = framework::EigenVector::Flatten(*param); - auto g = framework::EigenVector::Flatten(*grad); - auto o = framework::EigenVector::Flatten(*param_out); - auto* lr = learning_rate->data(); - - o = p - lr[0] * g; - } else if (grad_var->IsType()) { - // TODO(qijun): In Sparse SGD operator, in-place update is enforced. - // This manual optimization brings difficulty to track data dependency. - // It's better to find a more elegant solution. - PADDLE_ENFORCE_EQ(param, param_out); - auto* grad = ctx.Input("Grad"); + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *learning_rate = ctx.Input("LearningRate"); + + const auto *param_var = ctx.InputVar("Param"); + const auto *grad_var = ctx.InputVar("Grad"); + + if (param_var->IsType()) { + const auto *param = ctx.Input("Param"); + auto *param_out = ctx.Output("ParamOut"); + + // Actually, all tensors are LoDTensor except SelectedRows. + if (grad_var->IsType()) { + param_out->mutable_data(ctx.GetPlace()); + const auto *grad = ctx.Input("Grad"); + + auto p = framework::EigenVector::Flatten(*param); + auto g = framework::EigenVector::Flatten(*grad); + auto o = framework::EigenVector::Flatten(*param_out); + auto *lr = learning_rate->data(); + + o = p - lr[0] * g; + } else if (grad_var->IsType()) { + // TODO(qijun): In Sparse SGD operator, in-place update is enforced. + // This manual optimization brings difficulty to track data dependency. + // It's better to find a more elegant solution. + PADDLE_ENFORCE_EQ(param, param_out); + const auto *grad = ctx.Input("Grad"); + + // for distributed training, a sparse var may be empty, + // just skip updating. + if (grad->rows().size() == 0) { + return; + } + + auto grad_height = grad->height(); + auto out_dims = param_out->dims(); + PADDLE_ENFORCE_EQ(grad_height, out_dims[0]); + + auto &grad_value = grad->value(); + auto &grad_rows = grad->rows(); + + size_t grad_row_numel = grad_value.numel() / grad_rows.size(); + PADDLE_ENFORCE_EQ(grad_row_numel, param_out->numel() / grad_height); + + auto *grad_data = grad_value.data(); + auto *out_data = param_out->data(); + auto *lr = learning_rate->data(); + for (size_t i = 0; i < grad_rows.size(); i++) { + PADDLE_ENFORCE(grad_rows[i] < grad_height, + "Input rows index should less than height"); + for (int64_t j = 0; j < grad_row_numel; j++) { + out_data[grad_rows[i] * grad_row_numel + j] -= + lr[0] * grad_data[i * grad_row_numel + j]; + } + } + } else { + PADDLE_THROW("Unsupported Variable Type of Grad"); + } + } else if (param_var->IsType()) { + PADDLE_ENFORCE(grad_var->IsType(), + "when param " + "is SelectedRows, gradient should also be SelectedRows"); + const auto ¶m = param_var->Get(); + auto *param_out = ctx.Output("ParamOut"); + const auto &grad = grad_var->Get(); // for distributed training, a sparse var may be empty, // just skip updating. - if (grad->rows().size() == 0) { + if (grad.rows().size() == 0) { return; } - auto in_height = grad->height(); - auto out_dims = param_out->dims(); - PADDLE_ENFORCE_EQ(in_height, out_dims[0]); - - auto& in_value = grad->value(); - auto& in_rows = grad->rows(); + size_t param_row_width = param.value().numel() / param.rows().size(); + size_t grad_row_width = grad.value().numel() / grad.rows().size(); + PADDLE_ENFORCE_EQ(param_row_width, grad_row_width, + "param_row should have the same size with grad_row"); - int64_t in_row_numel = in_value.numel() / in_rows.size(); - PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height); - - auto* in_data = in_value.data(); - auto* out_data = param_out->data(); - auto* lr = learning_rate->data(); - for (size_t i = 0; i < in_rows.size(); i++) { - PADDLE_ENFORCE(in_rows[i] < in_height, + const auto *lr = learning_rate->data(); + const auto *grad_data = grad.value().data(); + auto *out_data = param_out->mutable_value()->data(); + for (size_t i = 0; i < grad.rows().size(); i++) { + PADDLE_ENFORCE(grad.rows()[i] < grad.height(), "Input rows index should less than height"); - for (int64_t j = 0; j < in_row_numel; j++) { - out_data[in_rows[i] * in_row_numel + j] -= - lr[0] * in_data[i * in_row_numel + j]; + int64_t id_index = param.index(grad.rows()[i]); + for (int64_t j = 0; j < grad_row_width; j++) { + out_data[id_index * grad_row_width + j] -= + lr[0] * grad_data[i * grad_row_width + j]; } } - } else { - PADDLE_THROW("Unsupported Variable Type of Grad"); + PADDLE_THROW("Unsupported Variable Type of Parameter"); } } }; diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index e2c0f915d96b7746191572fa27b725d90cb6e2e5..6bdefc0f23910c90f3878d8f2634ca6e03c6f736 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -13,6 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/softmax_op.h" + +#include + #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cudnn_helper.h" #endif @@ -20,6 +23,7 @@ limitations under the License. */ #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" #endif + namespace paddle { namespace operators { @@ -60,8 +64,8 @@ class SoftmaxOp : public framework::OperatorWithKernel { auto input_data_type = framework::ToDataType(ctx.Input("X")->type()); if (input_data_type == framework::proto::VarType::FP16) { - PADDLE_ENFORCE_EQ(library_, framework::LibraryType::kCUDNN, - "float16 can only be used when CUDNN is used"); + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "float16 can only be used on GPU place"); } std::string data_format = ctx.Attr("data_format"); @@ -70,6 +74,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { library_); } }; + class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: SoftmaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) diff --git a/paddle/fluid/operators/softmax_op.cu.cc b/paddle/fluid/operators/softmax_op.cu.cc index dbd13fd38a33d4068a5b5d47cd92a81293f6e748..0c1f7cef7ab7b66358d80f6f0670e0d07536128c 100644 --- a/paddle/fluid/operators/softmax_op.cu.cc +++ b/paddle/fluid/operators/softmax_op.cu.cc @@ -13,11 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/softmax_op.h" +#include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; - -REGISTER_OP_CUDA_KERNEL( - softmax, ops::SoftmaxKernel); +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( - softmax_grad, - ops::SoftmaxGradKernel); + softmax, ops::SoftmaxKernel, + ops::SoftmaxKernel); +REGISTER_OP_CUDA_KERNEL(softmax_grad, + ops::SoftmaxGradKernel); diff --git a/paddle/fluid/operators/split_ids_op.h b/paddle/fluid/operators/split_ids_op.h index 3e750ed2d171876ce2d3c232f5d34234217b3c3e..d36ed398ebce661a62ca92696b0089b5289d5b1c 100644 --- a/paddle/fluid/operators/split_ids_op.h +++ b/paddle/fluid/operators/split_ids_op.h @@ -30,19 +30,16 @@ class SplitIdsOpKernel : public framework::OpKernel { PADDLE_THROW("SplitIds do not support GPU kernel"); } - const auto* ids_t = ctx.Input("Ids"); - auto& ids_dims = ids_t->dims(); + auto& ids_dims = ctx.Input("Ids")->dims(); + const T* ids = ctx.Input("Ids")->data(); auto outs = ctx.MultiOutput("Out"); - - const T* ids = ids_t->data(); - const size_t shard_num = outs.size(); std::vector> out_ids; out_ids.resize(outs.size()); // split id by their shard_num. - for (size_t i = 0; i < ids_dims[0]; ++i) { + for (int i = 0; i < ids_dims[0]; ++i) { T id = ids[i]; size_t shard_id = static_cast(id) % shard_num; out_ids[shard_id].push_back(id); diff --git a/paddle/fluid/platform/.clang-format b/paddle/fluid/platform/.clang-format deleted file mode 100644 index 29282dc87e2c499988c17d90d47d44cd5cf7f115..0000000000000000000000000000000000000000 --- a/paddle/fluid/platform/.clang-format +++ /dev/null @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -Standard: Cpp11 -... diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 686c0889140f0050b37192542ca98e2f3e5f23df..6780b8cc6deca64e9eaefa0b40d309449e730c8c 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -6,8 +6,8 @@ add_custom_target(profiler_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch _ add_dependencies(profiler_py_proto profiler_py_proto_init) add_custom_command(TARGET profiler_py_proto POST_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto/profiler - COMMAND cp *.py ${PADDLE_SOURCE_DIR}/python/paddle/fluid/proto/profiler + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/profiler + COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/profiler COMMENT "Copy generated python proto into directory paddle/fluid/proto/profiler." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/paddle/fluid/platform/cpu_info.cc b/paddle/fluid/platform/cpu_info.cc index 8db08edba805e41d33ec6a6a4b338cca0d4906ef..4fc9aae8e36e9b43d65fab0b92ec3a2549057128 100644 --- a/paddle/fluid/platform/cpu_info.cc +++ b/paddle/fluid/platform/cpu_info.cc @@ -27,6 +27,11 @@ DEFINE_double(fraction_of_cpu_memory_to_use, 1, "Default use 100% of CPU memory for PaddlePaddle," "reserve the rest for page tables, etc"); +DEFINE_double( + fraction_of_cuda_pinned_memory_to_use, 0.5, + "Default use 50% of CPU memory as the pinned_memory for PaddlePaddle," + "reserve the rest for page tables, etc"); + namespace paddle { namespace platform { @@ -62,5 +67,22 @@ size_t CpuMaxChunkSize() { return CpuMaxAllocSize() / 32; } +size_t CUDAPinnedMaxAllocSize() { + // For distributed systems, it requires configuring and limiting + // the fraction of memory to use. + return FLAGS_fraction_of_cuda_pinned_memory_to_use * CpuTotalPhysicalMemory(); +} + +size_t CUDAPinnedMinChunkSize() { + // Allow to allocate the minimum chunk size is 64 KB. + return 1 << 16; +} + +size_t CUDAPinnedMaxChunkSize() { + // Allow to allocate the maximum chunk size is roughly 1/256 of CUDA_PINNED + // memory. + return CUDAPinnedMaxAllocSize() / 256; +} + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/cpu_info.h b/paddle/fluid/platform/cpu_info.h index a930151bd15a33d5b8861c6239e7dd964822f0f6..f06c2b67fe4385f427322e9bb2f3080fdd3acc94 100644 --- a/paddle/fluid/platform/cpu_info.h +++ b/paddle/fluid/platform/cpu_info.h @@ -22,11 +22,20 @@ namespace platform { //! Get the maximum allocation size for a machine. size_t CpuMaxAllocSize(); +//! Get the maximum allocation size for a machine. +size_t CUDAPinnedMaxAllocSize(); + //! Get the minimum chunk size for buddy allocator. size_t CpuMinChunkSize(); //! Get the maximum chunk size for buddy allocator. size_t CpuMaxChunkSize(); +//! Get the minimum chunk size for buddy allocator. +size_t CUDAPinnedMinChunkSize(); + +//! Get the maximum chunk size for buddy allocator. +size_t CUDAPinnedMaxChunkSize(); + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/cpu_info_test.cc b/paddle/fluid/platform/cpu_info_test.cc index 78332f90cd96d80cca0cf865f4815aaf18463253..aac882e846309f23f49f68aba805da0857c7fb2d 100644 --- a/paddle/fluid/platform/cpu_info_test.cc +++ b/paddle/fluid/platform/cpu_info_test.cc @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/platform/cpu_info.h" -#include "paddle/fluid/string/printf.h" #include #include @@ -20,6 +19,7 @@ #include "gflags/gflags.h" #include "glog/logging.h" #include "gtest/gtest.h" +#include "paddle/fluid/string/printf.h" DECLARE_double(fraction_of_cpu_memory_to_use); diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index 7c604e14eb245232ed92f53a00b9bde45c2fbaec..c0d399d078f73743836fc2a0c1d4b1e6b31ecd83 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -257,9 +257,11 @@ class ScopedConvolutionDescriptor { } #endif + cudnnDataType_t compute_type = + (type == CUDNN_DATA_DOUBLE) ? CUDNN_DATA_DOUBLE : CUDNN_DATA_FLOAT; PADDLE_ENFORCE(dynload::cudnnSetConvolutionNdDescriptor( desc_, pads.size(), pads.data(), strides.data(), dilations.data(), - CUDNN_CROSS_CORRELATION, type)); + CUDNN_CROSS_CORRELATION, compute_type)); return desc_; } diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 59b76a1edb5ec5900520fbccb6a6f8f6e7a70aa4..feb4f367008d76d86a93c561a8eec1f2485c99d6 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -53,6 +53,16 @@ DeviceContextPool::DeviceContextPool( PADDLE_THROW( "'CUDAPlace' is not supported, Please re-compile with WITH_GPU " "option"); +#endif + } else if (platform::is_cuda_pinned_place(p)) { +#ifdef PADDLE_WITH_CUDA + device_contexts_.emplace( + p, + PtrType(new CUDAPinnedDeviceContext(boost::get(p)))); +#else + PADDLE_THROW( + "'CUDAPlace' is not supported, Please re-compile with WITH_GPU " + "option"); #endif } } @@ -186,6 +196,20 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_handle_; } cudaStream_t CUDADeviceContext::stream() const { return stream_; } +CUDAPinnedDeviceContext::CUDAPinnedDeviceContext() { + eigen_device_.reset(new Eigen::DefaultDevice()); +} + +CUDAPinnedDeviceContext::CUDAPinnedDeviceContext(CUDAPinnedPlace place) + : place_(place) { + eigen_device_.reset(new Eigen::DefaultDevice()); +} + +Eigen::DefaultDevice* CUDAPinnedDeviceContext::eigen_device() const { + return eigen_device_.get(); +} + +Place CUDAPinnedDeviceContext::GetPlace() const { return place_; } #endif #ifdef PADDLE_WITH_MKLDNN diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 202394c7be7e103a609dd0999fc883c794ef0edd..6b796d92d09cdde2db60c7651c03d3782ff013e6 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -118,6 +118,25 @@ struct DefaultDeviceContextType { using TYPE = CUDADeviceContext; }; +// Currently, CUDAPinnedDeviceContext is only used to data copying. +class CUDAPinnedDeviceContext : public DeviceContext { + public: + CUDAPinnedDeviceContext(); + explicit CUDAPinnedDeviceContext(CUDAPinnedPlace place); + + Place GetPlace() const override; + + Eigen::DefaultDevice* eigen_device() const; + + private: + CUDAPinnedPlace place_; + std::unique_ptr eigen_device_; +}; + +template <> +struct DefaultDeviceContextType { + using TYPE = CUDAPinnedDeviceContext; +}; #endif #ifdef PADDLE_WITH_MKLDNN diff --git a/paddle/fluid/platform/dynload/cublas.cc b/paddle/fluid/platform/dynload/cublas.cc index e90e3105f0809b3c7507a86fa5a3d61864290fcb..eb541579a136de2a84ecc9773e0c312b405f7e86 100644 --- a/paddle/fluid/platform/dynload/cublas.cc +++ b/paddle/fluid/platform/dynload/cublas.cc @@ -24,6 +24,10 @@ void *cublas_dso_handle = nullptr; CUBLAS_BLAS_ROUTINE_EACH(DEFINE_WRAP); +#ifdef CUBLAS_BLAS_ROUTINE_EACH_R2 +CUBLAS_BLAS_ROUTINE_EACH_R2(DEFINE_WRAP); +#endif + } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h index fa9041134d863ebfd8d1e00379da3b92323ae6e3..a41018d350e89881888d5e31089c2b9ecd76f6c0 100644 --- a/paddle/fluid/platform/dynload/cublas.h +++ b/paddle/fluid/platform/dynload/cublas.h @@ -1,22 +1,23 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #pragma once #include +#include #include -#include +#include // NOLINT #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -34,18 +35,18 @@ extern void *cublas_dso_handle; * note: default dynamic linked libs */ #ifdef PADDLE_USE_DSO -#define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - inline cublasStatus_t operator()(Args... args) { \ - typedef cublasStatus_t (*cublasFunc)(Args...); \ - std::call_once(cublas_dso_flag, \ - paddle::platform::dynload::GetCublasDsoHandle, \ - &cublas_dso_handle); \ - void *p_##__name = dlsym(cublas_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + inline cublasStatus_t operator()(Args... args) { \ + typedef cublasStatus_t (*cublasFunc)(Args...); \ + std::call_once(cublas_dso_flag, []() { \ + cublas_dso_handle = paddle::platform::dynload::GetCublasDsoHandle(); \ + }); \ + void *p_##__name = dlsym(cublas_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #else #define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \ @@ -70,6 +71,7 @@ extern void *cublas_dso_handle; __macro(cublasDgemm_v2); \ __macro(cublasHgemm); \ __macro(cublasSgemmEx); \ + __macro(cublasGemmEx); \ __macro(cublasSgeam_v2); \ __macro(cublasDgeam_v2); \ __macro(cublasCreate_v2); \ @@ -89,9 +91,15 @@ extern void *cublas_dso_handle; __macro(cublasSgetrfBatched); \ __macro(cublasSgetriBatched); \ __macro(cublasDgetrfBatched); \ - __macro(cublasDgetriBatched) + __macro(cublasDgetriBatched); -CUBLAS_BLAS_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP); +CUBLAS_BLAS_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP) + +// APIs available after CUDA 9.0 +#if CUDA_VERSION >= 9000 +#define CUBLAS_BLAS_ROUTINE_EACH_R2(__macro) __macro(cublasSetMathMode); +CUBLAS_BLAS_ROUTINE_EACH_R2(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP) +#endif #undef DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP } // namespace dynload diff --git a/paddle/fluid/platform/dynload/cudnn.cc b/paddle/fluid/platform/dynload/cudnn.cc index c65b060ab46cfcd38292be66dd5f2123f88bae63..f3cd3b2bbedef7c9140c2acddea0732972ff7fa0 100644 --- a/paddle/fluid/platform/dynload/cudnn.cc +++ b/paddle/fluid/platform/dynload/cudnn.cc @@ -44,7 +44,8 @@ CUDNN_DNN_ROUTINE_EACH_R7(DEFINE_WRAP); #ifdef PADDLE_USE_DSO bool HasCUDNN() { - std::call_once(cudnn_dso_flag, GetCUDNNDsoHandle, &cudnn_dso_handle); + std::call_once(cudnn_dso_flag, + []() { cudnn_dso_handle = GetCUDNNDsoHandle(); }); return cudnn_dso_handle != nullptr; } diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index 81acc445bd3803dede158ff09507a72fb6e293ac..24475b62ca2825c45ff7edb39328dece3b822b25 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include +#include // NOLINT #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -30,19 +30,19 @@ extern bool HasCUDNN(); #ifdef PADDLE_USE_DSO extern void EnforceCUDNNLoaded(const char* fn_name); -#define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - auto operator()(Args... args) -> decltype(__name(args...)) { \ - using cudnn_func = decltype(__name(args...)) (*)(Args...); \ - std::call_once(cudnn_dso_flag, \ - paddle::platform::dynload::GetCUDNNDsoHandle, \ - &cudnn_dso_handle); \ - EnforceCUDNNLoaded(#__name); \ - void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using cudnn_func = decltype(__name(args...)) (*)(Args...); \ + std::call_once(cudnn_dso_flag, []() { \ + cudnn_dso_handle = paddle::platform::dynload::GetCUDNNDsoHandle(); \ + }); \ + EnforceCUDNNLoaded(#__name); \ + void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern struct DynLoad__##__name __name #else @@ -140,7 +140,8 @@ CUDNN_DNN_ROUTINE_EACH_R5(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) #if CUDNN_VERSION >= 7001 #define CUDNN_DNN_ROUTINE_EACH_R7(__macro) \ - __macro(cudnnSetConvolutionGroupCount); + __macro(cudnnSetConvolutionGroupCount); \ + __macro(cudnnSetConvolutionMathType); CUDNN_DNN_ROUTINE_EACH_R7(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) #endif diff --git a/paddle/fluid/platform/dynload/cupti.h b/paddle/fluid/platform/dynload/cupti.h index c1bf88f8cb690861b97686d99d36410143445243..d0d676b9d8ac462900b48246bec43166d04ef97b 100644 --- a/paddle/fluid/platform/dynload/cupti.h +++ b/paddle/fluid/platform/dynload/cupti.h @@ -11,14 +11,15 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #ifdef PADDLE_WITH_CUPTI + #include #include #include -#include +#include // NOLINT + #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -36,18 +37,18 @@ extern void *cupti_dso_handle; * note: default dynamic linked libs */ #ifdef PADDLE_USE_DSO -#define DECLARE_DYNAMIC_LOAD_CUPTI_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - inline CUptiResult CUPTIAPI operator()(Args... args) { \ - typedef CUptiResult CUPTIAPI (*cuptiFunc)(Args...); \ - std::call_once(cupti_dso_flag, \ - paddle::platform::dynload::GetCUPTIDsoHandle, \ - &cupti_dso_handle); \ - void *p_##__name = dlsym(cupti_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_CUPTI_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + inline CUptiResult CUPTIAPI operator()(Args... args) { \ + typedef CUptiResult CUPTIAPI (*cuptiFunc)(Args...); \ + std::call_once(cupti_dso_flag, []() { \ + cupti_dso_handle = paddle::platform::dynload::GetCUPTIDsoHandle(); \ + }); \ + void *p_##__name = dlsym(cupti_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #else #define DECLARE_DYNAMIC_LOAD_CUPTI_WRAP(__name) \ diff --git a/paddle/fluid/platform/dynload/curand.h b/paddle/fluid/platform/dynload/curand.h index 1b3ff962d6edceb37deb94cc7daead7346d25352..4697fb6cd96770127206bdabeea77e43eb09d1f5 100644 --- a/paddle/fluid/platform/dynload/curand.h +++ b/paddle/fluid/platform/dynload/curand.h @@ -11,12 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include #include -#include + +#include // NOLINT + #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -25,18 +26,18 @@ namespace dynload { extern std::once_flag curand_dso_flag; extern void *curand_dso_handle; #ifdef PADDLE_USE_DSO -#define DECLARE_DYNAMIC_LOAD_CURAND_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - curandStatus_t operator()(Args... args) { \ - typedef curandStatus_t (*curandFunc)(Args...); \ - std::call_once(curand_dso_flag, \ - paddle::platform::dynload::GetCurandDsoHandle, \ - &curand_dso_handle); \ - void *p_##__name = dlsym(curand_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_CURAND_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + curandStatus_t operator()(Args... args) { \ + typedef curandStatus_t (*curandFunc)(Args...); \ + std::call_once(curand_dso_flag, []() { \ + curand_dso_handle = paddle::platform::dynload::GetCurandDsoHandle(); \ + }); \ + void *p_##__name = dlsym(curand_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #else #define DECLARE_DYNAMIC_LOAD_CURAND_WRAP(__name) \ diff --git a/paddle/fluid/platform/dynload/dynamic_loader.cc b/paddle/fluid/platform/dynload/dynamic_loader.cc index e590e81bab51fd9fe12309335522614263d8e21d..3c1ccc7445ed27c711ab250aa223c66ae0da45dc 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.cc +++ b/paddle/fluid/platform/dynload/dynamic_loader.cc @@ -11,12 +11,14 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/platform/dynload/dynamic_loader.h" + #include + #include -#include +#include // NOLINT #include + #include "gflags/gflags.h" #include "glog/logging.h" #include "paddle/fluid/platform/dynload/cupti_lib_path.h" @@ -65,22 +67,21 @@ static inline std::string join(const std::string& part1, return ret; } -static inline void GetDsoHandleFromDefaultPath(std::string& dso_path, - void** dso_handle, - int dynload_flags) { +static inline void* GetDsoHandleFromDefaultPath(const std::string& dso_path, + int dynload_flags) { VLOG(3) << "Try to find library: " << dso_path << " from default system path."; // default search from LD_LIBRARY_PATH/DYLD_LIBRARY_PATH - *dso_handle = dlopen(dso_path.c_str(), dynload_flags); + void* dso_handle = dlopen(dso_path.c_str(), dynload_flags); // DYLD_LIBRARY_PATH is disabled after Mac OS 10.11 to // bring System Integrity Projection (SIP), if dso_handle // is null, search from default package path in Mac OS. #if defined(__APPLE__) || defined(__OSX__) - if (nullptr == *dso_handle) { - dso_path = join("/usr/local/cuda/lib/", dso_path); - *dso_handle = dlopen(dso_path.c_str(), dynload_flags); - if (nullptr == *dso_handle) { + if (nullptr == dso_handle) { + dso_handle = + dlopen(join("/usr/local/cuda/lib/", dso_path).c_str(), dynload_flags); + if (nullptr == dso_handle) { if (dso_path == "libcudnn.dylib") { LOG(WARNING) << "Note: [Recommend] copy cudnn into /usr/local/cuda/ \n " "For instance, sudo tar -xzf " @@ -91,28 +92,29 @@ static inline void GetDsoHandleFromDefaultPath(std::string& dso_path, } } #endif + + return dso_handle; } -static inline void GetDsoHandleFromSearchPath(const std::string& search_root, - const std::string& dso_name, - void** dso_handle, - bool throw_on_error = true) { +static inline void* GetDsoHandleFromSearchPath(const std::string& search_root, + const std::string& dso_name, + bool throw_on_error = true) { int dynload_flags = RTLD_LAZY | RTLD_LOCAL; - *dso_handle = nullptr; + void* dso_handle = nullptr; std::string dlPath = dso_name; if (search_root.empty()) { - GetDsoHandleFromDefaultPath(dlPath, dso_handle, dynload_flags); + dso_handle = GetDsoHandleFromDefaultPath(dlPath, dynload_flags); } else { // search xxx.so from custom path dlPath = join(search_root, dso_name); - *dso_handle = dlopen(dlPath.c_str(), dynload_flags); + dso_handle = dlopen(dlPath.c_str(), dynload_flags); // if not found, search from default path - if (nullptr == *dso_handle) { + if (nullptr == dso_handle) { LOG(WARNING) << "Failed to find dynamic library: " << dlPath << " (" << dlerror() << ")"; dlPath = dso_name; - GetDsoHandleFromDefaultPath(dlPath, dso_handle, dynload_flags); + dso_handle = GetDsoHandleFromDefaultPath(dlPath, dynload_flags); } } auto error_msg = @@ -124,70 +126,71 @@ static inline void GetDsoHandleFromSearchPath(const std::string& search_root, "using the DYLD_LIBRARY_PATH is impossible unless System " "Integrity Protection (SIP) is disabled."; if (throw_on_error) { - PADDLE_ENFORCE(nullptr != *dso_handle, error_msg, dlPath, dlerror()); - } else if (nullptr == *dso_handle) { + PADDLE_ENFORCE(nullptr != dso_handle, error_msg, dlPath, dlerror()); + } else if (nullptr == dso_handle) { LOG(WARNING) << string::Sprintf(error_msg, dlPath, dlerror()); } + + return dso_handle; } -void GetCublasDsoHandle(void** dso_handle) { +void* GetCublasDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.dylib", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.dylib"); #else - GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.so", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.so"); #endif } -void GetCUDNNDsoHandle(void** dso_handle) { +void* GetCUDNNDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.dylib", dso_handle, - false); + return GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.dylib", false); #else - GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.so", dso_handle, false); + return GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.so", false); #endif } -void GetCUPTIDsoHandle(void** dso_handle) { +void* GetCUPTIDsoHandle() { std::string cupti_path = cupti_lib_path; if (!FLAGS_cupti_dir.empty()) { cupti_path = FLAGS_cupti_dir; } #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(cupti_path, "libcupti.dylib", dso_handle, false); + return GetDsoHandleFromSearchPath(cupti_path, "libcupti.dylib", false); #else - GetDsoHandleFromSearchPath(cupti_path, "libcupti.so", dso_handle, false); + return GetDsoHandleFromSearchPath(cupti_path, "libcupti.so", false); #endif } -void GetCurandDsoHandle(void** dso_handle) { +void* GetCurandDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.dylib", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.dylib"); #else - GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.so", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.so"); #endif } -void GetWarpCTCDsoHandle(void** dso_handle) { +void* GetWarpCTCDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "libwarpctc.dylib", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "libwarpctc.dylib"); #else - GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "libwarpctc.so", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "libwarpctc.so"); #endif } -void GetLapackDsoHandle(void** dso_handle) { +void* GetLapackDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.dylib", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.dylib"); #else - GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.so", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.so"); #endif } -void GetNCCLDsoHandle(void** dso_handle) { +void* GetNCCLDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleFromSearchPath(FLAGS_nccl_dir, "libnccl.dylib", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_nccl_dir, "libnccl.dylib"); #else - GetDsoHandleFromSearchPath(FLAGS_nccl_dir, "libnccl.so", dso_handle); + return GetDsoHandleFromSearchPath(FLAGS_nccl_dir, "libnccl.so"); #endif } diff --git a/paddle/fluid/platform/dynload/dynamic_loader.h b/paddle/fluid/platform/dynload/dynamic_loader.h index b5b9c4af916241c1c7361b506f74563ebcf69b9a..4c85093a43e0e8d75b64c5b29d1ec68db1b44909 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.h +++ b/paddle/fluid/platform/dynload/dynamic_loader.h @@ -18,55 +18,13 @@ namespace paddle { namespace platform { namespace dynload { -/** - * @brief load the DSO of CUBLAS - * - * @param **dso_handle dso handler - * - */ -void GetCublasDsoHandle(void** dso_handle); - -/** - * @brief load the DSO of CUDNN - * - * @param **dso_handle dso handler - * - */ -void GetCUDNNDsoHandle(void** dso_handle); - -void GetCUPTIDsoHandle(void** dso_handle); - -/** - * @brief load the DSO of CURAND - * - * @param **dso_handle dso handler - * - */ -void GetCurandDsoHandle(void** dso_handle); - -/** - * @brief load the DSO of warp-ctc - * - * @param **dso_handle dso handler - * - */ -void GetWarpCTCDsoHandle(void** dso_handle); - -/** - * @brief load the DSO of lapack - * - * @param **dso_handle dso handler - * - */ -void GetLapackDsoHandle(void** dso_handle); - -/** - * @brief load the DSO of NVIDIA nccl - * - * @param **dso_handle dso handler - * - */ -void GetNCCLDsoHandle(void** dso_handle); +void* GetCublasDsoHandle(); +void* GetCUDNNDsoHandle(); +void* GetCUPTIDsoHandle(); +void* GetCurandDsoHandle(); +void* GetWarpCTCDsoHandle(); +void* GetLapackDsoHandle(); +void* GetNCCLDsoHandle(); } // namespace dynload } // namespace platform diff --git a/paddle/fluid/platform/dynload/nccl.cc b/paddle/fluid/platform/dynload/nccl.cc index 3edc70c46d03ddcc751e865676928c47fcb48e69..2c40c48ee08497f9a2a414687b9c51d87ba574aa 100644 --- a/paddle/fluid/platform/dynload/nccl.cc +++ b/paddle/fluid/platform/dynload/nccl.cc @@ -25,11 +25,6 @@ void *nccl_dso_handle; NCCL_RAND_ROUTINE_EACH(DEFINE_WRAP); -void LoadNCCLDSO() { - platform::call_once(nccl_dso_flag, - [] { GetNCCLDsoHandle(&nccl_dso_handle); }); -} - } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/dynload/nccl.h b/paddle/fluid/platform/dynload/nccl.h index dc78bcb44d3316a1ecee0c8d70dcb4777a9e2de4..d21e29df3cf9b2d78920d8bac41209d200b5ba3a 100644 --- a/paddle/fluid/platform/dynload/nccl.h +++ b/paddle/fluid/platform/dynload/nccl.h @@ -11,12 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include #include -#include + +#include // NOLINT + #include "paddle/fluid/platform/call_once.h" #include "paddle/fluid/platform/dynload/dynamic_loader.h" @@ -28,18 +29,19 @@ extern std::once_flag nccl_dso_flag; extern void* nccl_dso_handle; #ifdef PADDLE_USE_DSO -extern void LoadNCCLDSO(); -#define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - auto operator()(Args... args) -> decltype(__name(args...)) { \ - using nccl_func = decltype(__name(args...)) (*)(Args...); \ - paddle::platform::dynload::LoadNCCLDSO(); \ - void* p_##__name = dlsym(nccl_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using nccl_func = decltype(__name(args...)) (*)(Args...); \ + std::call_once(nccl_dso_flag, []() { \ + nccl_dso_handle = paddle::platform::dynload::GetNCCLDsoHandle(); \ + }); \ + void* p_##__name = dlsym(nccl_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #else #define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ diff --git a/paddle/fluid/platform/dynload/warpctc.h b/paddle/fluid/platform/dynload/warpctc.h index f5ded0eb6b1107c886641e848f5040a7a2d806a5..7fa468370463a51c486b80317f401612930bc72e 100644 --- a/paddle/fluid/platform/dynload/warpctc.h +++ b/paddle/fluid/platform/dynload/warpctc.h @@ -15,9 +15,10 @@ limitations under the License. */ #pragma once #include -#include -#include "ctc.h" +#include // NOLINT + #include "paddle/fluid/platform/dynload/dynamic_loader.h" +#include "warpctc/include/ctc.h" namespace paddle { namespace platform { @@ -31,18 +32,18 @@ extern void* warpctc_dso_handle; * (for each function) to dynamic load warpctc routine * via operator overloading. */ -#define DYNAMIC_LOAD_WARPCTC_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - auto operator()(Args... args) -> decltype(__name(args...)) { \ - using warpctcFunc = decltype(__name(args...)) (*)(Args...); \ - std::call_once(warpctc_dso_flag, \ - paddle::platform::dynload::GetWarpCTCDsoHandle, \ - &warpctc_dso_handle); \ - void* p_##_name = dlsym(warpctc_dso_handle, #__name); \ - return reinterpret_cast(p_##_name)(args...); \ - } \ - }; \ +#define DYNAMIC_LOAD_WARPCTC_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using warpctcFunc = decltype(__name(args...)) (*)(Args...); \ + std::call_once(warpctc_dso_flag, []() { \ + warpctc_dso_handle = paddle::platform::dynload::GetWarpCTCDsoHandle(); \ + }); \ + void* p_##_name = dlsym(warpctc_dso_handle, #__name); \ + return reinterpret_cast(p_##_name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #define DECLARE_DYNAMIC_LOAD_WARPCTC_WRAP(__name) \ diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index d303fd6d63f8424c1c88a31eb3fa6f2136e0e430..7b8c29e1e642ec6bb4023afd8c083311b8b31812 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -16,35 +16,35 @@ limitations under the License. */ #include // for dladdr #include // for backtrace + +#ifdef __GNUC__ +#include // for __cxa_demangle +#endif // __GNUC__ + +#ifdef PADDLE_WITH_CUDA +#include +#include +#include +#include +#include +#endif // PADDLE_WITH_CUDA + #include #include #include #include #include +#include "glog/logging.h" #include "paddle/fluid/platform/macros.h" #include "paddle/fluid/string/printf.h" #include "paddle/fluid/string/to_string.h" -#ifdef __GNUC__ -#include // for __cxa_demangle -#endif - -#include - #ifdef PADDLE_WITH_CUDA - #include "paddle/fluid/platform/dynload/cublas.h" #include "paddle/fluid/platform/dynload/cudnn.h" #include "paddle/fluid/platform/dynload/curand.h" #include "paddle/fluid/platform/dynload/nccl.h" - -#include -#include -#include -#include -#include - #endif namespace paddle { @@ -185,7 +185,7 @@ inline typename std::enable_if::type throw_on_error( } } -#endif // PADDLE_ONLY_CPU +#endif // PADDLE_WITH_CUDA template inline void throw_on_error(T e) { diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index bb9a3543ff267dadf3dfee260a320d292a1ba3cb..57d751cc00b5f11f1ba1a3b0c9a6b7ce9e79f586 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -96,7 +96,6 @@ TEST(ENFORCE_GT, FAIL) { bool caught_exception = false; try { PADDLE_ENFORCE_GT(1, 2UL); - } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; EXPECT_TRUE( @@ -115,7 +114,6 @@ TEST(ENFORCE_GE, FAIL) { bool caught_exception = false; try { PADDLE_ENFORCE_GE(1, 2UL); - } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; EXPECT_TRUE( @@ -135,7 +133,6 @@ TEST(ENFORCE_LE, FAIL) { bool caught_exception = false; try { PADDLE_ENFORCE_GT(1, 2UL); - } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; EXPECT_TRUE( @@ -171,7 +168,6 @@ TEST(ENFORCE_NOT_NULL, FAIL) { try { int* a = nullptr; PADDLE_ENFORCE_NOT_NULL(a); - } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; EXPECT_TRUE(HasPrefix(StringPiece(error.what()), "a should not be null")); diff --git a/paddle/fluid/platform/float16.h b/paddle/fluid/platform/float16.h index 2cf311c7e56a9bbb0bdb0078d5cfefb4bb50018b..e77f768bf9f437a289b16d2ec9597c570b0a9ad2 100644 --- a/paddle/fluid/platform/float16.h +++ b/paddle/fluid/platform/float16.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #ifdef PADDLE_WITH_CUDA #include @@ -293,39 +294,39 @@ struct PADDLE_ALIGN(2) float16 { HOSTDEVICE inline explicit operator bool() const { return (x & 0x7fff) != 0; } HOSTDEVICE inline explicit operator int8_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator uint8_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator int16_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator uint16_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator int32_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator uint32_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator int64_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator uint64_t() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } HOSTDEVICE inline explicit operator double() const { - return static_cast(float(*this)); + return static_cast(static_cast(*this)); } private: @@ -370,7 +371,7 @@ DEVICE inline half operator+(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hadd(a, b); #else - float res = float(float16(a)) + float(float16(b)); + float res = static_cast(float16(a)) + static_cast(float16(b)); return half(float16(res)); #endif } @@ -379,7 +380,7 @@ DEVICE inline half operator-(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hsub(a, b); #else - float res = float(float16(a)) - float(float16(b)); + float res = static_cast(float16(a)) - static_cast(float16(b)); return half(float16(res)); #endif } @@ -388,7 +389,7 @@ DEVICE inline half operator*(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hmul(a, b); #else - float res = float(float16(a)) * float(float16(b)); + float res = static_cast(float16(a)) * static_cast(float16(b)); return half(float16(res)); #endif } @@ -399,7 +400,7 @@ DEVICE inline half operator/(const half& a, const half& b) { float denom = __half2float(b); return __float2half(num / denom); #else - float res = float(float16(a)) / float(float16(b)); + float res = static_cast(float16(a)) / static_cast(float16(b)); return half(float16(res)); #endif } @@ -408,27 +409,27 @@ DEVICE inline half operator-(const half& a) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hneg(a); #else - float res = -float(float16(a)); + float res = -static_cast(float16(a)); return half(float16(res)); #endif } -DEVICE inline half& operator+=(half& a, const half& b) { +DEVICE inline half& operator+=(half& a, const half& b) { // NOLINT a = a + b; return a; } -DEVICE inline half& operator-=(half& a, const half& b) { +DEVICE inline half& operator-=(half& a, const half& b) { // NOLINT a = a - b; return a; } -DEVICE inline half& operator*=(half& a, const half& b) { +DEVICE inline half& operator*=(half& a, const half& b) { // NOLINT a = a * b; return a; } -DEVICE inline half& operator/=(half& a, const half& b) { +DEVICE inline half& operator/=(half& a, const half& b) { // NOLINT a = a / b; return a; } @@ -437,7 +438,7 @@ DEVICE inline bool operator==(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __heq(a, b); #else - return float(float16(a)) == float(float16(b)); + return static_cast(float16(a)) == static_cast(float16(b)); #endif } @@ -445,7 +446,7 @@ DEVICE inline bool operator!=(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hne(a, b); #else - return float(float16(a)) != float(float16(b)); + return static_cast(float16(a)) != static_cast(float16(b)); #endif } @@ -453,7 +454,7 @@ DEVICE inline bool operator<(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hlt(a, b); #else - return float(float16(a)) < float(float16(b)); + return static_cast(float16(a)) < static_cast(float16(b)); #endif } @@ -461,7 +462,7 @@ DEVICE inline bool operator<=(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hle(a, b); #else - return float(float16(a)) <= float(float16(b)); + return static_cast(float16(a)) <= static_cast(float16(b)); #endif } @@ -469,7 +470,7 @@ DEVICE inline bool operator>(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hgt(a, b); #else - return float(float16(a)) > float(float16(b)); + return static_cast(float16(a)) > static_cast(float16(b)); #endif } @@ -477,7 +478,7 @@ DEVICE inline bool operator>=(const half& a, const half& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hge(a, b); #else - return float(float16(a)) >= float(float16(b)); + return static_cast(float16(a)) >= static_cast(float16(b)); #endif } @@ -489,7 +490,7 @@ HOSTDEVICE inline float16 operator+(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return float16(__hadd(half(a), half(b))); #else - return float16(float(a) + float(b)); + return float16(static_cast(a) + static_cast(b)); #endif } @@ -497,7 +498,7 @@ HOSTDEVICE inline float16 operator-(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return float16(__hsub(half(a), half(b))); #else - return float16(float(a) - float(b)); + return float16(static_cast(a) - static_cast(b)); #endif } @@ -505,7 +506,7 @@ HOSTDEVICE inline float16 operator*(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return float16(__hmul(half(a), half(b))); #else - return float16(float(a) * float(b)); + return float16(static_cast(a) * static_cast(b)); #endif } @@ -516,7 +517,7 @@ HOSTDEVICE inline float16 operator/(const float16& a, const float16& b) { float denom = __half2float(half(b)); return float16(num / denom); #else - return float16(float(a) / float(b)); + return float16(static_cast(a) / static_cast(b)); #endif } @@ -530,22 +531,22 @@ HOSTDEVICE inline float16 operator-(const float16& a) { #endif } -HOSTDEVICE inline float16& operator+=(float16& a, const float16& b) { +HOSTDEVICE inline float16& operator+=(float16& a, const float16& b) { // NOLINT a = a + b; return a; } -HOSTDEVICE inline float16& operator-=(float16& a, const float16& b) { +HOSTDEVICE inline float16& operator-=(float16& a, const float16& b) { // NOLINT a = a - b; return a; } -HOSTDEVICE inline float16& operator*=(float16& a, const float16& b) { +HOSTDEVICE inline float16& operator*=(float16& a, const float16& b) { // NOLINT a = a * b; return a; } -HOSTDEVICE inline float16& operator/=(float16& a, const float16& b) { +HOSTDEVICE inline float16& operator/=(float16& a, const float16& b) { // NOLINT a = a / b; return a; } @@ -554,7 +555,7 @@ HOSTDEVICE inline bool operator==(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __heq(half(a), half(b)); #else - return float(a) == float(b); + return static_cast(a) == static_cast(b); #endif } @@ -562,7 +563,7 @@ HOSTDEVICE inline bool operator!=(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hne(half(a), half(b)); #else - return float(a) != float(b); + return static_cast(a) != static_cast(b); #endif } @@ -570,7 +571,7 @@ HOSTDEVICE inline bool operator<(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hlt(half(a), half(b)); #else - return float(a) < float(b); + return static_cast(a) < static_cast(b); #endif } @@ -578,7 +579,7 @@ HOSTDEVICE inline bool operator<=(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hle(half(a), half(b)); #else - return float(a) <= float(b); + return static_cast(a) <= static_cast(b); #endif } @@ -586,7 +587,7 @@ HOSTDEVICE inline bool operator>(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hgt(half(a), half(b)); #else - return float(a) > float(b); + return static_cast(a) > static_cast(b); #endif } @@ -594,7 +595,7 @@ HOSTDEVICE inline bool operator>=(const float16& a, const float16& b) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hge(half(a), half(b)); #else - return float(a) >= float(b); + return static_cast(a) >= static_cast(b); #endif } @@ -679,22 +680,22 @@ inline float16 operator-(const float16& a) { return res; } -inline float16& operator+=(float16& a, const float16& b) { +inline float16& operator+=(float16& a, const float16& b) { // NOLINT a = a + b; return a; } -inline float16& operator-=(float16& a, const float16& b) { +inline float16& operator-=(float16& a, const float16& b) { // NOLINT a = a - b; return a; } -inline float16& operator*=(float16& a, const float16& b) { +inline float16& operator*=(float16& a, const float16& b) { // NOLINT a = a * b; return a; } -inline float16& operator/=(float16& a, const float16& b) { +inline float16& operator/=(float16& a, const float16& b) { // NOLINT a = a / b; return a; } @@ -784,19 +785,19 @@ inline bool operator>=(const float16& a, const float16& b) { // Arithmetic operators for float16, software emulated on other CPU #else inline float16 operator+(const float16& a, const float16& b) { - return float16(float(a) + float(b)); + return float16(static_cast(a) + static_cast(b)); } inline float16 operator-(const float16& a, const float16& b) { - return float16(float(a) - float(b)); + return float16(static_cast(a) - static_cast(b)); } inline float16 operator*(const float16& a, const float16& b) { - return float16(float(a) * float(b)); + return float16(static_cast(a) * static_cast(b)); } inline float16 operator/(const float16& a, const float16& b) { - return float16(float(a) / float(b)); + return float16(static_cast(a) / static_cast(b)); } inline float16 operator-(const float16& a) { @@ -805,51 +806,57 @@ inline float16 operator-(const float16& a) { return res; } -inline float16& operator+=(float16& a, const float16& b) { - a = float16(float(a) + float(b)); +inline float16& operator+=(float16& a, const float16& b) { // NOLINT + a = float16(static_cast(a) + static_cast(b)); return a; } -inline float16& operator-=(float16& a, const float16& b) { - a = float16(float(a) - float(b)); +inline float16& operator-=(float16& a, const float16& b) { // NOLINT + a = float16(static_cast(a) - static_cast(b)); return a; } -inline float16& operator*=(float16& a, const float16& b) { - a = float16(float(a) * float(b)); +inline float16& operator*=(float16& a, const float16& b) { // NOLINT + a = float16(static_cast(a) * static_cast(b)); return a; } -inline float16& operator/=(float16& a, const float16& b) { - a = float16(float(a) / float(b)); +inline float16& operator/=(float16& a, const float16& b) { // NOLINT + a = float16(static_cast(a) / static_cast(b)); return a; } inline bool operator==(const float16& a, const float16& b) { - return float(a) == float(b); + return static_cast(a) == static_cast(b); } inline bool operator!=(const float16& a, const float16& b) { - return float(a) != float(b); + return static_cast(a) != static_cast(b); } inline bool operator<(const float16& a, const float16& b) { - return float(a) < float(b); + return static_cast(a) < static_cast(b); } inline bool operator<=(const float16& a, const float16& b) { - return float(a) <= float(b); + return static_cast(a) <= static_cast(b); } inline bool operator>(const float16& a, const float16& b) { - return float(a) > float(b); + return static_cast(a) > static_cast(b); } inline bool operator>=(const float16& a, const float16& b) { - return float(a) >= float(b); + return static_cast(a) >= static_cast(b); } #endif +HOSTDEVICE inline float16 raw_uint16_to_float16(uint16_t a) { + float16 res; + res.x = a; + return res; +} + HOSTDEVICE inline bool(isnan)(const float16& a) { #if defined(PADDLE_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 return __hisnan(half(a)); @@ -886,28 +893,116 @@ struct is_pod { is_standard_layout::value; }; +template <> +struct numeric_limits { + static const bool is_specialized = true; + static const bool is_signed = true; + static const bool is_integer = false; + static const bool is_exact = false; + static const bool has_infinity = true; + static const bool has_quiet_NaN = true; + static const bool has_signaling_NaN = true; + static const float_denorm_style has_denorm = denorm_present; + static const bool has_denorm_loss = false; + static const std::float_round_style round_style = std::round_to_nearest; + static const bool is_iec559 = false; + static const bool is_bounded = false; + static const bool is_modulo = false; + static const int digits = 11; + static const int digits10 = 3; + static const int max_digits10 = 5; + static const int radix = 2; + static const int min_exponent = -13; + static const int min_exponent10 = -4; + static const int max_exponent = 16; + static const int max_exponent10 = 4; + static const bool traps = true; + static const bool tinyness_before = false; + + static paddle::platform::float16(min)() { + return paddle::platform::raw_uint16_to_float16(0x400); + } + static paddle::platform::float16 lowest() { + return paddle::platform::raw_uint16_to_float16(0xfbff); + } + static paddle::platform::float16(max)() { + return paddle::platform::raw_uint16_to_float16(0x7bff); + } + static paddle::platform::float16 epsilon() { + return paddle::platform::raw_uint16_to_float16(0x0800); + } + static paddle::platform::float16 round_error() { + return paddle::platform::float16(0.5); + } + static paddle::platform::float16 infinity() { + return paddle::platform::raw_uint16_to_float16(0x7c00); + } + static paddle::platform::float16 quiet_NaN() { + return paddle::platform::raw_uint16_to_float16(0x7e00); + } + static paddle::platform::float16 signaling_NaN() { + return paddle::platform::raw_uint16_to_float16(0x7e00); + } + static paddle::platform::float16 denorm_min() { + return paddle::platform::raw_uint16_to_float16(0x1); + } +}; + } // namespace std namespace Eigen { + +using float16 = paddle::platform::float16; + +template <> +struct NumTraits : GenericNumTraits { + enum { + IsSigned = true, + IsInteger = false, + IsComplex = false, + RequireInitialization = false + }; + + HOSTDEVICE static inline float16 epsilon() { + return paddle::platform::raw_uint16_to_float16(0x0800); + } + HOSTDEVICE static inline float16 dummy_precision() { return float16(1e-2f); } + HOSTDEVICE static inline float16 highest() { + return paddle::platform::raw_uint16_to_float16(0x7bff); + } + HOSTDEVICE static inline float16 lowest() { + return paddle::platform::raw_uint16_to_float16(0xfbff); + } + HOSTDEVICE static inline float16 infinity() { + return paddle::platform::raw_uint16_to_float16(0x7c00); + } + HOSTDEVICE static inline float16 quiet_NaN() { + return paddle::platform::raw_uint16_to_float16(0x7c01); + } +}; + namespace numext { template <> -EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isnan)( - const paddle::platform::float16& a) { +HOSTDEVICE inline bool(isnan)(const float16& a) { return (paddle::platform::isnan)(a); } template <> -EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isinf)( - const paddle::platform::float16& a) { +HOSTDEVICE inline bool(isinf)(const float16& a) { return (paddle::platform::isinf)(a); } template <> -EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool(isfinite)( - const paddle::platform::float16& a) { +HOSTDEVICE inline bool(isfinite)(const float16& a) { return (paddle::platform::isfinite)(a); } +template <> +HOSTDEVICE inline float16 exp(const float16& a) { + return float16(::expf(static_cast(a))); +} + } // namespace numext + } // namespace Eigen diff --git a/paddle/fluid/platform/gpu_info.cc b/paddle/fluid/platform/gpu_info.cc index dd70ff9ff574b32bc96a9e8255b1bf77a5cc84e4..aaebeb1353a13ab16fcf98f10da59d41fd2f5b48 100644 --- a/paddle/fluid/platform/gpu_info.cc +++ b/paddle/fluid/platform/gpu_info.cc @@ -14,8 +14,9 @@ limitations under the License. */ #include "paddle/fluid/platform/gpu_info.h" -#include "gflags/gflags.h" +#include +#include "gflags/gflags.h" #include "paddle/fluid/platform/enforce.h" DEFINE_double(fraction_of_gpu_memory_to_use, 0.92, @@ -77,8 +78,8 @@ void SetDeviceId(int id) { "cudaSetDevice failed in paddle::platform::SetDeviceId"); } -void GpuMemoryUsage(size_t &available, size_t &total) { - PADDLE_ENFORCE(cudaMemGetInfo(&available, &total), +void GpuMemoryUsage(size_t *available, size_t *total) { + PADDLE_ENFORCE(cudaMemGetInfo(available, total), "cudaMemGetInfo failed in paddle::platform::GetMemoryUsage"); } @@ -86,7 +87,7 @@ size_t GpuMaxAllocSize() { size_t total = 0; size_t available = 0; - GpuMemoryUsage(available, total); + GpuMemoryUsage(&available, &total); // Reserve the rest for page tables, etc. return static_cast(total * FLAGS_fraction_of_gpu_memory_to_use); @@ -101,7 +102,7 @@ size_t GpuMaxChunkSize() { size_t total = 0; size_t available = 0; - GpuMemoryUsage(available, total); + GpuMemoryUsage(&available, &total); VLOG(10) << "GPU Usage " << available / 1024 / 1024 << "M/" << total / 1024 / 1024 << "M"; size_t reserving = static_cast(0.05 * total); diff --git a/paddle/fluid/platform/gpu_info.h b/paddle/fluid/platform/gpu_info.h index fa469fa77f5ca780da153cc87da8d04f239711f3..36345e17406e22970806fa274d5a73a703517c43 100644 --- a/paddle/fluid/platform/gpu_info.h +++ b/paddle/fluid/platform/gpu_info.h @@ -23,10 +23,6 @@ limitations under the License. */ namespace paddle { namespace platform { -//! Environment variable: fraction of GPU memory to use on each device. -const std::string kEnvFractionGpuMemoryToUse = - "PADDLE_FRACTION_GPU_MEMORY_TO_USE"; - //! Get the total number of GPU devices in system. int GetCUDADeviceCount(); @@ -46,7 +42,7 @@ int GetCurrentDeviceId(); void SetDeviceId(int device_id); //! Get the memory usage of current GPU device. -void GpuMemoryUsage(size_t &available, size_t &total); +void GpuMemoryUsage(size_t *available, size_t *total); //! Get the maximum allocation size of current GPU device. size_t GpuMaxAllocSize(); diff --git a/paddle/fluid/platform/place.cc b/paddle/fluid/platform/place.cc index de8f958eb012cb1ac563cbbbac8951e439bf8f33..655ce8485d4584aa0955315b045da6bf541f7fe2 100644 --- a/paddle/fluid/platform/place.cc +++ b/paddle/fluid/platform/place.cc @@ -26,6 +26,7 @@ class PlacePrinter : public boost::static_visitor<> { void operator()(const CUDAPlace &p) { os_ << "CUDAPlace(" << p.device << ")"; } + void operator()(const CUDAPinnedPlace &p) { os_ << "CUDAPinnedPlace"; } private: std::ostream &os_; @@ -40,12 +41,19 @@ const Place &get_place() { return the_default_place; } const CUDAPlace default_gpu() { return CUDAPlace(0); } const CPUPlace default_cpu() { return CPUPlace(); } +const CUDAPinnedPlace default_cuda_pinned() { return CUDAPinnedPlace(); } bool is_gpu_place(const Place &p) { return boost::apply_visitor(IsCUDAPlace(), p); } -bool is_cpu_place(const Place &p) { return !is_gpu_place(p); } +bool is_cpu_place(const Place &p) { + return boost::apply_visitor(IsCPUPlace(), p); +} + +bool is_cuda_pinned_place(const Place &p) { + return boost::apply_visitor(IsCUDAPinnedPlace(), p); +} bool places_are_same_class(const Place &p1, const Place &p2) { return p1.which() == p2.which(); @@ -53,7 +61,7 @@ bool places_are_same_class(const Place &p1, const Place &p2) { bool is_same_place(const Place &p1, const Place &p2) { if (places_are_same_class(p1, p2)) { - if (is_cpu_place(p1)) { + if (is_cpu_place(p1) || is_cuda_pinned_place(p1)) { return true; } else { return boost::get(p1) == boost::get(p2); diff --git a/paddle/fluid/platform/place.h b/paddle/fluid/platform/place.h index 4cc8b377b8b671eb5a446ecbae21ba9628fbd2c8..ad54a878996bd36f2d714f6554b44c89dae3fd0c 100644 --- a/paddle/fluid/platform/place.h +++ b/paddle/fluid/platform/place.h @@ -11,10 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include +#include + #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/variant.h" @@ -45,12 +46,33 @@ struct CUDAPlace { int device; }; +struct CUDAPinnedPlace { + CUDAPinnedPlace() {} + + // needed for variant equality comparison + inline bool operator==(const CUDAPinnedPlace &) const { return true; } + inline bool operator!=(const CUDAPinnedPlace &) const { return false; } +}; + struct IsCUDAPlace : public boost::static_visitor { bool operator()(const CPUPlace &) const { return false; } bool operator()(const CUDAPlace &gpu) const { return true; } + bool operator()(const CUDAPinnedPlace &) const { return false; } }; -typedef boost::variant Place; +struct IsCPUPlace : public boost::static_visitor { + bool operator()(const CPUPlace &cpu) const { return true; } + bool operator()(const CUDAPlace &) const { return false; } + bool operator()(const CUDAPinnedPlace &) const { return false; } +}; + +struct IsCUDAPinnedPlace : public boost::static_visitor { + bool operator()(const CPUPlace &) const { return false; } + bool operator()(const CUDAPlace &) const { return false; } + bool operator()(const CUDAPinnedPlace &cuda_pinned) const { return true; } +}; + +typedef boost::variant Place; using PlaceList = std::vector; @@ -59,9 +81,11 @@ const Place &get_place(); const CUDAPlace default_gpu(); const CPUPlace default_cpu(); +const CUDAPinnedPlace default_cuda_pinned(); bool is_gpu_place(const Place &); bool is_cpu_place(const Place &); +bool is_cuda_pinned_place(const Place &); bool places_are_same_class(const Place &, const Place &); bool is_same_place(const Place &, const Place &); @@ -95,6 +119,16 @@ struct PlaceVisitorWrapper #else PADDLE_THROW("Paddle is not compiled with CUDA. Cannot visit cuda device"); return typename Visitor::result_type(); +#endif + } + + typename Visitor::result_type operator()( + const CUDAPinnedPlace &cuda_pinned) const { +#ifdef PADDLE_WITH_CUDA + return visitor_(cuda_pinned); +#else + PADDLE_THROW("Paddle is not compiled with CUDA. Cannot visit cuda_pinned"); + return typename Visitor::result_type(); #endif } }; diff --git a/paddle/fluid/pybind/.clang-format b/paddle/fluid/pybind/.clang-format deleted file mode 100644 index 29282dc87e2c499988c17d90d47d44cd5cf7f115..0000000000000000000000000000000000000000 --- a/paddle/fluid/pybind/.clang-format +++ /dev/null @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -Standard: Cpp11 -... diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index ada69ea4a425f70dc085ad9046bb6b930136803d..787925d9f8800b49de5b8b642304605ef4087d1e 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -15,4 +15,6 @@ if(WITH_PYTHON) target_link_libraries(paddle_pybind rt) endif(NOT APPLE AND NOT ANDROID) endif(WITH_AMD_GPU) + + cc_test(tensor_py_test SRCS tensor_py_test.cc DEPS python) endif(WITH_PYTHON) diff --git a/paddle/fluid/pybind/const_value.cc b/paddle/fluid/pybind/const_value.cc index 6657b25ed2443c1ac9cb0a09098968d3181fc6ba..3f28e616494ad1322708ad6403aaf50b22d724e6 100644 --- a/paddle/fluid/pybind/const_value.cc +++ b/paddle/fluid/pybind/const_value.cc @@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "const_value.h" +#include "paddle/fluid/pybind/const_value.h" #include "paddle/fluid/framework/operator.h" namespace paddle { namespace pybind { -void BindConstValue(pybind11::module& m) { - m.def("kEmptyVarName", [] { return framework::kEmptyVarName; }); - m.def("kTempVarName", [] { return framework::kTempVarName; }); - m.def("kGradVarSuffix", [] { return framework::kGradVarSuffix; }); - m.def("kZeroVarSuffix", [] { return framework::kZeroVarSuffix; }); +void BindConstValue(pybind11::module* m) { + m->def("kEmptyVarName", [] { return framework::kEmptyVarName; }); + m->def("kTempVarName", [] { return framework::kTempVarName; }); + m->def("kGradVarSuffix", [] { return framework::kGradVarSuffix; }); + m->def("kZeroVarSuffix", [] { return framework::kZeroVarSuffix; }); } } // namespace pybind diff --git a/paddle/fluid/pybind/const_value.h b/paddle/fluid/pybind/const_value.h index 79e71e039dea6585aaf8193f1417c6ab3fbf6f76..2fab3160d1d95af7f6a49c472c2e211c19e67cac 100644 --- a/paddle/fluid/pybind/const_value.h +++ b/paddle/fluid/pybind/const_value.h @@ -11,16 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once + #include + #include "paddle/fluid/platform/enforce.h" #include "pybind11/pybind11.h" -namespace py = pybind11; - namespace paddle { namespace pybind { -extern void BindConstValue(pybind11::module& m); + +void BindConstValue(pybind11::module* m); + } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/exception.cc b/paddle/fluid/pybind/exception.cc index 4bd3ecf728dedaf74a554f77b114065f2d515786..08a2f185e117718d07ba984f76dfe5bf8229c33c 100644 --- a/paddle/fluid/pybind/exception.cc +++ b/paddle/fluid/pybind/exception.cc @@ -17,8 +17,8 @@ limitations under the License. */ namespace paddle { namespace pybind { -void BindException(pybind11::module& m) { - static pybind11::exception exc(m, "EnforceNotMet"); +void BindException(pybind11::module* m) { + static pybind11::exception exc(*m, "EnforceNotMet"); pybind11::register_exception_translator([](std::exception_ptr p) { try { if (p) std::rethrow_exception(p); @@ -27,7 +27,8 @@ void BindException(pybind11::module& m) { } }); - m.def("__unittest_throw_exception__", [] { PADDLE_THROW("test exception"); }); + m->def("__unittest_throw_exception__", + [] { PADDLE_THROW("test exception"); }); } } // namespace pybind diff --git a/paddle/fluid/pybind/exception.h b/paddle/fluid/pybind/exception.h index bc6b0c067978959d4cdafec51db9574927b34b21..5e054267361f2c62b3ad36581be0ad17ce0718de 100644 --- a/paddle/fluid/pybind/exception.h +++ b/paddle/fluid/pybind/exception.h @@ -11,14 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once + #include + #include "paddle/fluid/platform/enforce.h" #include "pybind11/pybind11.h" + namespace paddle { namespace pybind { -extern void BindException(pybind11::module& m); +void BindException(pybind11::module* m); + } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 45a64f43846e79c27295e52c59dca6bdfaa120a3..2fe829036386086075a7f6ad0b9348a9e8c5e85a 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -11,10 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/pybind/protobuf.h" + #include #include +#include +#include + #include "paddle/fluid/framework/backward.h" #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_desc.h" @@ -95,10 +98,11 @@ struct type_caster> namespace paddle { namespace pybind { -using namespace paddle::framework; // NOLINT +namespace pd = paddle::framework; template -static py::bytes SerializeMessage(T &self) { +static pybind11::bytes SerializeMessage( + T &self) { // NOLINT due to pybind11 convention. // Check IsInitialized in Python std::string retv; PADDLE_ENFORCE(self.Proto()->SerializePartialToString(&retv), @@ -107,24 +111,24 @@ static py::bytes SerializeMessage(T &self) { } // Bind Methods -void BindProgramDesc(py::module &m) { - py::class_(m, "ProgramDesc", "") - .def(py::init<>()) +void BindProgramDesc(pybind11::module *m) { + pybind11::class_(*m, "ProgramDesc", "") + .def(pybind11::init<>()) .def("__init__", - [](ProgramDesc &self, const ProgramDesc &other) { - new (&self) ProgramDesc(other); + [](pd::ProgramDesc &self, const pd::ProgramDesc &other) { + new (&self) pd::ProgramDesc(other); }) .def("__init__", - [](ProgramDesc &self, const py::bytes &binary_str) { + [](pd::ProgramDesc &self, const pybind11::bytes &binary_str) { std::string str(binary_str); - new (&self) ProgramDesc(str); + new (&self) pd::ProgramDesc(str); }) - .def("append_block", &ProgramDesc::AppendBlock, - py::return_value_policy::reference) + .def("append_block", &pd::ProgramDesc::AppendBlock, + pybind11::return_value_policy::reference) .def("append_backward", - [](ProgramDesc &program_desc, const VarDesc &target, + [](pd::ProgramDesc &program_desc, const pd::VarDesc &target, const std::unordered_set &no_grad_vars) { - ParamGradInfoMap param_grad_map = + pd::ParamGradInfoMap param_grad_map = AppendBackward(program_desc, target, no_grad_vars); std::unordered_map< std::string, std::tuple) + .def("block", &pd::ProgramDesc::MutableBlock, + pybind11::return_value_policy::reference) + .def("num_blocks", &pd::ProgramDesc::Size) + .def("serialize_to_string", SerializeMessage) .def("parse_from_string", - [](ProgramDesc &program_desc, const std::string &data) { - proto::ProgramDesc *desc = program_desc.Proto(); + [](pd::ProgramDesc &program_desc, const std::string &data) { + pd::proto::ProgramDesc *desc = program_desc.Proto(); PADDLE_ENFORCE(desc->ParseFromString(data), "Fail to parse ProgramDesc from string. This could " "be a bug of Paddle."); }); } -void BindBlockDesc(py::module &m) { - py::class_(m, "BlockDesc", "") - .def_property_readonly("id", &BlockDesc::ID) - .def_property_readonly("parent", &BlockDesc::Parent) - .def("get_forward_block_idx", &BlockDesc::ForwardBlockID) - .def("set_forward_block_idx", &BlockDesc::SetForwardBlockID) - .def("append_op", &BlockDesc::AppendOp, - py::return_value_policy::reference) - .def("prepend_op", &BlockDesc::PrependOp, - py::return_value_policy::reference) - .def("insert_op", &BlockDesc::InsertOp, - py::return_value_policy::reference) - .def("remove_op", &BlockDesc::RemoveOp) +void BindBlockDesc(pybind11::module *m) { + pybind11::class_(*m, "BlockDesc", "") + .def_property_readonly("id", &pd::BlockDesc::ID) + .def_property_readonly("parent", &pd::BlockDesc::Parent) + .def("get_forward_block_idx", &pd::BlockDesc::ForwardBlockID) + .def("set_forward_block_idx", &pd::BlockDesc::SetForwardBlockID) + .def("append_op", &pd::BlockDesc::AppendOp, + pybind11::return_value_policy::reference) + .def("prepend_op", &pd::BlockDesc::PrependOp, + pybind11::return_value_policy::reference) + .def("insert_op", &pd::BlockDesc::InsertOp, + pybind11::return_value_policy::reference) + .def("remove_op", &pd::BlockDesc::RemoveOp) .def("var", - [](BlockDesc &self, py::bytes byte_name) { + [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.Var(name); }, - py::return_value_policy::reference) + pybind11::return_value_policy::reference) .def("has_var", - [](BlockDesc &self, py::bytes byte_name) { + [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.HasVar(name); }, - py::return_value_policy::reference) + pybind11::return_value_policy::reference) .def("rename_var", - [](BlockDesc &self, const py::bytes &byte_name, - const py::bytes &byte_name_new) { + [](pd::BlockDesc &self, const pybind11::bytes &byte_name, + const pybind11::bytes &byte_name_new) { std::string name = byte_name; std::string new_name = byte_name_new; self.RenameVar(name, new_name); }) .def("has_var_recursive", - [](BlockDesc &self, py::bytes byte_name) { + [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.HasVarRecursive(name); }) .def("find_var", - [](BlockDesc &self, py::bytes byte_name) { + [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.FindVar(name); }, - py::return_value_policy::reference) + pybind11::return_value_policy::reference) .def("find_var_recursive", - [](BlockDesc &self, py::bytes byte_name) { + [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.FindVarRecursive(name); }, - py::return_value_policy::reference) - .def("all_vars", &BlockDesc::AllVars, py::return_value_policy::reference) - .def("op_size", &BlockDesc::OpSize) - .def("op", &BlockDesc::Op, py::return_value_policy::reference) - .def("serialize_to_string", SerializeMessage); + pybind11::return_value_policy::reference) + .def("remove_var", + [](pd::BlockDesc &self, pybind11::bytes byte_name) { + std::string name = byte_name; + return self.RemoveVar(name); + }, + pybind11::return_value_policy::reference) + .def("all_vars", &pd::BlockDesc::AllVars, + pybind11::return_value_policy::reference) + .def("op_size", &pd::BlockDesc::OpSize) + .def("op", &pd::BlockDesc::Op, pybind11::return_value_policy::reference) + .def("serialize_to_string", SerializeMessage); } -void BindVarDsec(py::module &m) { - py::class_ var_desc(m, "VarDesc", ""); +void BindVarDsec(pybind11::module *m) { + pybind11::class_ var_desc(*m, "VarDesc", ""); var_desc .def("name", - [](VarDesc &self) { - py::bytes name = self.Name(); + [](pd::VarDesc &self) { + pybind11::bytes name = self.Name(); return name; }, - py::return_value_policy::reference) - .def("set_name", &VarDesc::SetName) - .def("set_shape", &VarDesc::SetShape) - .def("set_shapes", &VarDesc::SetShapes) - .def("set_dtype", &VarDesc::SetDataType) - .def("set_dtypes", &VarDesc::SetDataTypes) - .def("set_capacity", &VarDesc::SetCapacity) - .def("shape", &VarDesc::GetShape, py::return_value_policy::reference) - .def("shapes", &VarDesc::GetShapes, py::return_value_policy::reference) - .def("dtype", &VarDesc::GetDataType, py::return_value_policy::reference) - .def("dtypes", &VarDesc::GetDataTypes, py::return_value_policy::reference) - .def("lod_level", &VarDesc::GetLoDLevel) - .def("lod_levels", &VarDesc::GetLoDLevels, - py::return_value_policy::reference) - .def("set_lod_level", &VarDesc::SetLoDLevel) - .def("set_lod_levels", &VarDesc::SetLoDLevels) - .def("type", &VarDesc::GetType) - .def("set_type", &VarDesc::SetType) - .def("serialize_to_string", SerializeMessage) - .def("persistable", &VarDesc::Persistable) - .def("set_persistable", &VarDesc::SetPersistable); + pybind11::return_value_policy::reference) + .def("set_name", &pd::VarDesc::SetName) + .def("set_shape", &pd::VarDesc::SetShape) + .def("set_shapes", &pd::VarDesc::SetShapes) + .def("set_dtype", &pd::VarDesc::SetDataType) + .def("set_dtypes", &pd::VarDesc::SetDataTypes) + .def("set_capacity", &pd::VarDesc::SetCapacity) + .def("shape", &pd::VarDesc::GetShape, + pybind11::return_value_policy::reference) + .def("shapes", &pd::VarDesc::GetShapes, + pybind11::return_value_policy::reference) + .def("dtype", &pd::VarDesc::GetDataType, + pybind11::return_value_policy::reference) + .def("dtypes", &pd::VarDesc::GetDataTypes, + pybind11::return_value_policy::reference) + .def("lod_level", &pd::VarDesc::GetLoDLevel) + .def("lod_levels", &pd::VarDesc::GetLoDLevels, + pybind11::return_value_policy::reference) + .def("set_lod_level", &pd::VarDesc::SetLoDLevel) + .def("set_lod_levels", &pd::VarDesc::SetLoDLevels) + .def("type", &pd::VarDesc::GetType) + .def("set_type", &pd::VarDesc::SetType) + .def("serialize_to_string", SerializeMessage) + .def("persistable", &pd::VarDesc::Persistable) + .def("set_persistable", &pd::VarDesc::SetPersistable); - py::enum_(var_desc, "VarType", "") - .value("BOOL", proto::VarType::BOOL) - .value("INT16", proto::VarType::INT16) - .value("INT32", proto::VarType::INT32) - .value("INT64", proto::VarType::INT64) - .value("FP16", proto::VarType::FP16) - .value("FP32", proto::VarType::FP32) - .value("FP64", proto::VarType::FP64) - .value("LOD_TENSOR", proto::VarType::LOD_TENSOR) - .value("SELECTED_ROWS", proto::VarType::SELECTED_ROWS) - .value("FEED_MINIBATCH", proto::VarType::FEED_MINIBATCH) - .value("FETCH_LIST", proto::VarType::FETCH_LIST) - .value("STEP_SCOPES", proto::VarType::STEP_SCOPES) - .value("LOD_RANK_TABLE", proto::VarType::LOD_RANK_TABLE) - .value("LOD_TENSOR_ARRAY", proto::VarType::LOD_TENSOR_ARRAY) - .value("CHANNEL", proto::VarType::CHANNEL) - .value("PLACE_LIST", proto::VarType::PLACE_LIST) - .value("READER", proto::VarType::READER) - .value("RAW", proto::VarType::RAW); + pybind11::enum_(var_desc, "VarType", "") + .value("BOOL", pd::proto::VarType::BOOL) + .value("INT16", pd::proto::VarType::INT16) + .value("INT32", pd::proto::VarType::INT32) + .value("INT64", pd::proto::VarType::INT64) + .value("FP16", pd::proto::VarType::FP16) + .value("FP32", pd::proto::VarType::FP32) + .value("FP64", pd::proto::VarType::FP64) + .value("LOD_TENSOR", pd::proto::VarType::LOD_TENSOR) + .value("SELECTED_ROWS", pd::proto::VarType::SELECTED_ROWS) + .value("FEED_MINIBATCH", pd::proto::VarType::FEED_MINIBATCH) + .value("FETCH_LIST", pd::proto::VarType::FETCH_LIST) + .value("STEP_SCOPES", pd::proto::VarType::STEP_SCOPES) + .value("LOD_RANK_TABLE", pd::proto::VarType::LOD_RANK_TABLE) + .value("LOD_TENSOR_ARRAY", pd::proto::VarType::LOD_TENSOR_ARRAY) + .value("CHANNEL", pd::proto::VarType::CHANNEL) + .value("PLACE_LIST", pd::proto::VarType::PLACE_LIST) + .value("READER", pd::proto::VarType::READER) + .value("RAW", pd::proto::VarType::RAW); } -void BindOpDesc(py::module &m) { - py::enum_(m, "AttrType", "") - .value("INT", proto::AttrType::INT) - .value("INTS", proto::AttrType::INTS) - .value("FLOAT", proto::AttrType::FLOAT) - .value("FLOATS", proto::AttrType::FLOATS) - .value("STRING", proto::AttrType::STRING) - .value("STRINGS", proto::AttrType::STRINGS) - .value("BOOL", proto::AttrType::BOOLEAN) - .value("BOOLS", proto::AttrType::BOOLEANS) - .value("BLOCK", proto::AttrType::BLOCK); +void BindOpDesc(pybind11::module *m) { + pybind11::enum_(*m, "AttrType", "") + .value("INT", pd::proto::AttrType::INT) + .value("INTS", pd::proto::AttrType::INTS) + .value("FLOAT", pd::proto::AttrType::FLOAT) + .value("FLOATS", pd::proto::AttrType::FLOATS) + .value("STRING", pd::proto::AttrType::STRING) + .value("STRINGS", pd::proto::AttrType::STRINGS) + .value("BOOL", pd::proto::AttrType::BOOLEAN) + .value("BOOLS", pd::proto::AttrType::BOOLEANS) + .value("BLOCK", pd::proto::AttrType::BLOCK); - py::class_ op_desc(m, "OpDesc", ""); + pybind11::class_ op_desc(*m, "OpDesc", ""); op_desc - .def("__init__", [](OpDesc &self) { new (&self) OpDesc(); }, - py::return_value_policy::reference) - .def("copy_from", &OpDesc::CopyFrom) - .def("type", &OpDesc::Type) - .def("set_type", &OpDesc::SetType) - .def("input", &OpDesc::Input) - .def("input_names", &OpDesc::InputNames) - .def("output", &OpDesc::Output) - .def("output_names", &OpDesc::OutputNames) - .def("set_input", &OpDesc::SetInput) - .def("set_output", &OpDesc::SetOutput) - .def("input_arg_names", &OpDesc::InputArgumentNames) - .def("output_arg_names", &OpDesc::OutputArgumentNames) - .def("rename_input", &OpDesc::RenameInput) - .def("rename_output", &OpDesc::RenameOutput) - .def("has_attr", &OpDesc::HasAttr) - .def("attr_type", &OpDesc::GetAttrType) - .def("attr_names", &OpDesc::AttrNames) - .def("set_attr", &OpDesc::SetAttr) - .def("attr", &OpDesc::GetAttr) - .def("set_block_attr", &OpDesc::SetBlockAttr) + .def("__init__", [](pd::OpDesc &self) { new (&self) pd::OpDesc(); }, + pybind11::return_value_policy::reference) + .def("copy_from", &pd::OpDesc::CopyFrom) + .def("type", &pd::OpDesc::Type) + .def("set_type", &pd::OpDesc::SetType) + .def("input", &pd::OpDesc::Input) + .def("input_names", &pd::OpDesc::InputNames) + .def("output", &pd::OpDesc::Output) + .def("output_names", &pd::OpDesc::OutputNames) + .def("set_input", &pd::OpDesc::SetInput) + .def("set_output", &pd::OpDesc::SetOutput) + .def("input_arg_names", &pd::OpDesc::InputArgumentNames) + .def("output_arg_names", &pd::OpDesc::OutputArgumentNames) + .def("rename_input", &pd::OpDesc::RenameInput) + .def("rename_output", &pd::OpDesc::RenameOutput) + .def("has_attr", &pd::OpDesc::HasAttr) + .def("attr_type", &pd::OpDesc::GetAttrType) + .def("attr_names", &pd::OpDesc::AttrNames) + .def("set_attr", &pd::OpDesc::SetAttr) + .def("attr", &pd::OpDesc::GetAttr) + .def("set_block_attr", &pd::OpDesc::SetBlockAttr) .def("set_serialized_attr", - [](OpDesc &self, const std::string &name, - const py::bytes &seriralized) { + [](pd::OpDesc &self, const std::string &name, + const pybind11::bytes &seriralized) { std::string ser(seriralized); self.SetAttr(name, ser); }) - .def("block_attr", &OpDesc::GetBlockAttr) - .def("check_attrs", &OpDesc::CheckAttrs) - .def("infer_shape", &OpDesc::InferShape) - .def("infer_var_type", &OpDesc::InferVarType) - .def("serialize_to_string", SerializeMessage) - .def("block", &OpDesc::Block, py::return_value_policy::reference); + .def("block_attr", &pd::OpDesc::GetBlockAttr) + .def("check_attrs", &pd::OpDesc::CheckAttrs) + .def("infer_shape", &pd::OpDesc::InferShape) + .def("infer_var_type", &pd::OpDesc::InferVarType) + .def("serialize_to_string", SerializeMessage) + .def("block", &pd::OpDesc::Block, + pybind11::return_value_policy::reference); } } // namespace pybind diff --git a/paddle/fluid/pybind/protobuf.h b/paddle/fluid/pybind/protobuf.h index d0dc8936b3df50ca12315f113fbb36b0f98bb53f..e7370672a88fcf9238cc88c6aae65c6ee643746b 100644 --- a/paddle/fluid/pybind/protobuf.h +++ b/paddle/fluid/pybind/protobuf.h @@ -11,25 +11,25 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include + #include #include + #include "paddle/fluid/platform/variant.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" -namespace py = pybind11; - namespace paddle { namespace pybind { -void BindProgramDesc(py::module& m); -void BindBlockDesc(py::module& m); -void BindVarDsec(py::module& m); -void BindOpDesc(py::module& m); +void BindProgramDesc(pybind11::module* m); +void BindBlockDesc(pybind11::module* m); +void BindVarDsec(pybind11::module* m); +void BindOpDesc(pybind11::module* m); + } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index b0a3f06a8871b1dc8c6c9d7231dfe2c9764ade3f..748ad75a99ea4955730327a10ae8468a107fed0a 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -11,11 +11,17 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include +#include +#include // NOLINT // for call_once +#include +#include +#include +#include #include "paddle/fluid/pybind/protobuf.h" -#include // for call_once -#include #include "paddle/fluid/framework/backward.h" #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/framework/executor.h" @@ -32,7 +38,6 @@ limitations under the License. */ #include "paddle/fluid/operators/cond_op.h" #include "paddle/fluid/operators/net_op.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/pybind/const_value.h" @@ -69,7 +74,7 @@ PYBIND11_PLUGIN(core) { // not cause namespace pollution. using namespace paddle::framework; // NOLINT - BindException(m); + BindException(&m); py::class_(m, "Tensor", py::buffer_protocol()) .def_buffer( @@ -100,6 +105,14 @@ PYBIND11_PLUGIN(core) { [](Tensor &self, paddle::platform::CUDAPlace &place) { self.mutable_data(place); }) + .def("alloc_int", + [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) { + self.mutable_data(place); + }) + .def("alloc_float", + [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) { + self.mutable_data(place); + }) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) @@ -113,6 +126,12 @@ PYBIND11_PLUGIN(core) { .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) #endif .def("shape", [](Tensor &self) { return vectorize(self.dims()); }) .def("set_float_element", TensorSetElement) @@ -317,7 +336,17 @@ All parameter, weight, gradient are variables in Paddle. #else return new paddle::platform::CUDADeviceContext(place); #endif - }); + }) + .def_static("create", + [](paddle::platform::CUDAPinnedPlace& place) + -> paddle::platform::DeviceContext* { +#ifndef PADDLE_WITH_CUDA + PADDLE_THROW( + "CUDAPinnedPlace is not supported in CPU device."); +#else + return new paddle::platform::CUDAPinnedDeviceContext(place); +#endif + });; // clang-format on #ifdef PADDLE_WITH_CUDA py::class_(m, "Communicator").def(py::init<>()); @@ -330,6 +359,10 @@ All parameter, weight, gradient are variables in Paddle. .def(py::init<>()) .def("__str__", string::to_string); + py::class_(m, "CUDAPinnedPlace") + .def(py::init<>()) + .def("__str__", string::to_string); + py::class_(m, "Place") .def(py::init<>()) .def("set_place", @@ -339,7 +372,11 @@ All parameter, weight, gradient are variables in Paddle. .def("set_place", [](platform::Place &self, const platform::CUDAPlace &gpu_place) { self = gpu_place; - }); + }) + .def("set_place", [](platform::Place &self, + const platform::CUDAPinnedPlace &cuda_pinned_place) { + self = cuda_pinned_place; + }); py::class_(m, "Operator") .def_static("create", @@ -363,6 +400,11 @@ All parameter, weight, gradient are variables in Paddle. .def("run", [](OperatorBase &self, const Scope &scope, const platform::CUDAPlace &place) { self.Run(scope, place); }) + .def("run", + [](OperatorBase &self, const Scope &scope, + const platform::CUDAPinnedPlace &place) { + self.Run(scope, place); + }) .def("type", [](const OperatorBase &op) -> std::string { return op.Type(); }) .def("outputs", @@ -436,11 +478,11 @@ All parameter, weight, gradient are variables in Paddle. m.def("set_feed_variable", framework::SetFeedVariable); m.def("get_fetch_variable", framework::GetFetchVariable); - BindProgramDesc(m); - BindBlockDesc(m); - BindVarDsec(m); - BindOpDesc(m); - BindConstValue(m); + BindProgramDesc(&m); + BindBlockDesc(&m); + BindVarDsec(&m); + BindOpDesc(&m); + BindConstValue(&m); py::class_(m, "LodRankTable") .def("items", [](framework::LoDRankTable &table) { @@ -511,7 +553,7 @@ All parameter, weight, gradient are variables in Paddle. }) .def("run", &ParallelExecutor::Run); - BindRecordIOWriter(m); + BindRecordIOWriter(&m); return m.ptr(); } } // namespace pybind diff --git a/paddle/fluid/pybind/recordio.cc b/paddle/fluid/pybind/recordio.cc index 16f8bfb1a2e3a840670594d3cc2970e690dce891..0644d91425af1a1ac9363b1dec9e317689331fcb 100644 --- a/paddle/fluid/pybind/recordio.cc +++ b/paddle/fluid/pybind/recordio.cc @@ -13,13 +13,19 @@ // limitations under the License. #include "paddle/fluid/pybind/recordio.h" + #include +#include +#include + #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/recordio/writer.h" namespace paddle { namespace pybind { +namespace { + class RecordIOWriter { public: RecordIOWriter(const std::string& filename, recordio::Compressor compressor, @@ -49,8 +55,10 @@ class RecordIOWriter { recordio::Writer writer_; }; -void BindRecordIOWriter(py::module& m) { - py::class_ writer(m, "RecordIOWriter", ""); +} // namespace + +void BindRecordIOWriter(py::module* m) { + py::class_ writer(*m, "RecordIOWriter", ""); py::enum_(writer, "Compressor", "") .value("Snappy", recordio::Compressor::kSnappy) .value("NoCompress", recordio::Compressor::kNoCompress); diff --git a/paddle/fluid/pybind/recordio.h b/paddle/fluid/pybind/recordio.h index 60e6a9e8595614b38375fca8c13d520739af9aaf..2555f9b719af8f73fbac10d92b890afd99fac290 100644 --- a/paddle/fluid/pybind/recordio.h +++ b/paddle/fluid/pybind/recordio.h @@ -21,6 +21,7 @@ namespace py = pybind11; namespace paddle { namespace pybind { -extern void BindRecordIOWriter(py::module& m); +void BindRecordIOWriter(py::module* m); + } // namespace pybind } // namespace paddle diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 6f8c597f8e610594851c318c122563523e4e7ea6..fbe953b2d8f12ca529f3daa01cc8e2fe8875a416 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once #include +#include +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/device_context.h" @@ -21,12 +23,8 @@ limitations under the License. */ #include "pybind11/numpy.h" #include "pybind11/pybind11.h" -namespace py = pybind11; - namespace paddle { - namespace pybind { - namespace details { template @@ -34,16 +32,16 @@ struct CastToPyBufferImpl; template struct CastToPyBufferImpl { - py::buffer_info operator()(framework::Tensor &tensor) { + pybind11::buffer_info operator()(const framework::Tensor &tensor) { PADDLE_THROW("This type of tensor cannot be expose to Python"); - return py::buffer_info(); + return pybind11::buffer_info(); } }; template struct CastToPyBufferImpl { using CUR_TYPE = typename std::tuple_element>::type; - py::buffer_info operator()(framework::Tensor &tensor) { + pybind11::buffer_info operator()(const framework::Tensor &tensor) { if (std::type_index(typeid(CUR_TYPE)) == tensor.type()) { auto dim_vec = framework::vectorize(tensor.dims()); std::vector dims_outside; @@ -82,15 +80,15 @@ struct CastToPyBufferImpl { if (std::type_index(typeid(CUR_TYPE)) == std::type_index(typeid(platform::float16))) { - return py::buffer_info(dst_tensor.data(), sizeof(CUR_TYPE), - "e", /* np.dtype('e') == np.float16 */ - (size_t)framework::arity(dst_tensor.dims()), - dims_outside, strides); + return pybind11::buffer_info( + dst_tensor.data(), sizeof(CUR_TYPE), + "e", /* np.dtype('e') == np.float16 */ + (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides); } else { - return py::buffer_info(dst_tensor.data(), sizeof(CUR_TYPE), - py::format_descriptor::format(), - (size_t)framework::arity(dst_tensor.dims()), - dims_outside, strides); + return pybind11::buffer_info( + dst_tensor.data(), sizeof(CUR_TYPE), + pybind11::format_descriptor::format(), + (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides); } } else { constexpr bool less = I + 1 < std::tuple_size>::value; @@ -101,7 +99,7 @@ struct CastToPyBufferImpl { } // namespace details -inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { +inline pybind11::buffer_info CastToPyBuffer(const framework::Tensor &tensor) { auto buffer_info = details::CastToPyBufferImpl()(tensor); @@ -109,7 +107,7 @@ inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { } template -T TensorGetElement(framework::Tensor &self, size_t offset) { +T TensorGetElement(const framework::Tensor &self, size_t offset) { if (platform::is_cpu_place(self.place())) { return self.data()[offset]; } else { @@ -121,64 +119,70 @@ T TensorGetElement(framework::Tensor &self, size_t offset) { // TODO(dzhwinter) : fix the redundent Tensor allocate and free template -void TensorSetElement(framework::Tensor &self, size_t offset, T elem) { - if (platform::is_gpu_place(self.place())) { +void TensorSetElement(framework::Tensor *self, size_t offset, T elem) { + if (platform::is_gpu_place(self->place())) { std::shared_ptr dst(new framework::Tensor); - framework::TensorCopy(self, platform::CPUPlace(), dst.get()); + framework::TensorCopy(*self, platform::CPUPlace(), dst.get()); dst->data()[offset] = elem; - framework::TensorCopy(*dst.get(), self.place(), &self); + framework::TensorCopy(*dst.get(), self->place(), self); - } else if (platform::is_cpu_place(self.place())) { - self.data()[offset] = elem; + } else if (platform::is_cpu_place(self->place())) { + self->data()[offset] = elem; } } template void PyCPUTensorSetFromArray( - framework::Tensor &self, - py::array_t array, - paddle::platform::CPUPlace &place) { + framework::Tensor *self, + pybind11::array_t + array, + paddle::platform::CPUPlace place) { std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } - self.Resize(framework::make_ddim(dims)); - auto *dst = self.mutable_data(place); + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); std::memcpy(dst, array.data(), sizeof(T) * array.size()); } template <> +// This following specialization maps uint16_t in the parameter type to +// platform::float16. void PyCPUTensorSetFromArray( - framework::Tensor &self, - py::array_t array, - paddle::platform::CPUPlace &place) { + framework::Tensor *self, + pybind11::array_t + array, + paddle::platform::CPUPlace place) { std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } - self.Resize(framework::make_ddim(dims)); - auto *dst = self.mutable_data(place); + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size()); } #ifdef PADDLE_WITH_CUDA template void PyCUDATensorSetFromArray( - framework::Tensor &self, - py::array_t array, - paddle::platform::CUDAPlace &place) { + framework::Tensor *self, + pybind11::array_t + array, + paddle::platform::CUDAPlace place) { std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } - self.Resize(framework::make_ddim(dims)); - auto *dst = self.mutable_data(place); + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto dev_ctx = @@ -188,18 +192,22 @@ void PyCUDATensorSetFromArray( } template <> +// This following specialization maps uint16_t in the parameter type to +// platform::float16. void PyCUDATensorSetFromArray( - framework::Tensor &self, - py::array_t array, - paddle::platform::CUDAPlace &place) { + framework::Tensor *self, + pybind11::array_t + array, + paddle::platform::CUDAPlace place) { std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { - dims.push_back((int)array.shape()[i]); + dims.push_back(static_cast(array.shape()[i])); } - self.Resize(framework::make_ddim(dims)); - auto *dst = self.mutable_data(place); + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto dev_ctx = @@ -208,6 +216,43 @@ void PyCUDATensorSetFromArray( sizeof(uint16_t) * array.size(), cudaMemcpyHostToDevice, dev_ctx->stream()); } + +template +void PyCUDAPinnedTensorSetFromArray( + framework::Tensor *self, + pybind11::array_t + array, + const paddle::platform::CUDAPinnedPlace &place) { + std::vector dims; + dims.reserve(array.ndim()); + for (size_t i = 0; i < array.ndim(); ++i) { + dims.push_back(static_cast(array.shape()[i])); + } + + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); + std::memcpy(dst, array.data(), sizeof(T) * array.size()); +} + +template <> +// This following specialization maps uint16_t in the parameter type to +// platform::float16. +void PyCUDAPinnedTensorSetFromArray( + framework::Tensor *self, + pybind11::array_t + array, + const paddle::platform::CUDAPinnedPlace &place) { + std::vector dims; + dims.reserve(array.ndim()); + for (size_t i = 0; i < array.ndim(); ++i) { + dims.push_back(static_cast(array.shape()[i])); + } + + self->Resize(framework::make_ddim(dims)); + auto *dst = self->mutable_data(place); + std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size()); +} #endif } // namespace pybind diff --git a/paddle/fluid/pybind/tensor_py_test.cc b/paddle/fluid/pybind/tensor_py_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..1a0ae1d65833b1097bf69befe05884cab1317a89 --- /dev/null +++ b/paddle/fluid/pybind/tensor_py_test.cc @@ -0,0 +1,44 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/pybind/tensor_py.h" + +#include + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/tensor.h" + +TEST(TensorPy, CastToPyBufferImpl) { + typedef int ElemType; + + paddle::framework::Tensor t; + auto d = paddle::framework::make_ddim({1, 2, 3}); + int* p = t.mutable_data(d, paddle::platform::CPUPlace()); + for (int i = 0; i < paddle::framework::product(d); ++i) { + p[i] = i; + } + + pybind11::buffer_info bi = paddle::pybind::CastToPyBuffer(t); + EXPECT_EQ(bi.itemsize, static_cast(sizeof(ElemType))); + EXPECT_EQ(bi.size, static_cast(paddle::framework::product(d))); + EXPECT_EQ(bi.ndim, static_cast(3)); // 3-dimensional as d. + EXPECT_EQ(bi.shape.size(), 3U); // as Dim d. + EXPECT_EQ(bi.shape[0], static_cast(1)); + EXPECT_EQ(bi.shape[1], static_cast(2)); + EXPECT_EQ(bi.shape[2], static_cast(3)); + EXPECT_EQ(bi.strides.size(), 3U); // 3-dimensional as d. + EXPECT_EQ(bi.strides[2], static_cast(sizeof(ElemType))); + EXPECT_EQ(bi.strides[1], static_cast(sizeof(ElemType) * 3)); + EXPECT_EQ(bi.strides[0], static_cast(sizeof(ElemType) * 2 * 3)); +} diff --git a/paddle/fluid/recordio/chunk.cc b/paddle/fluid/recordio/chunk.cc index 187a6a4ea7bd9d3a8ae48fa262e18f71b0f7d20d..e7ebbba452c5c37113f0962e459da65c66b70873 100644 --- a/paddle/fluid/recordio/chunk.cc +++ b/paddle/fluid/recordio/chunk.cc @@ -14,11 +14,13 @@ #include "paddle/fluid/recordio/chunk.h" +#include #include #include + #include "paddle/fluid/platform/enforce.h" -#include "snappystream.hpp" -#include "zlib.h" +#include "snappy_stream/include/snappystream.hpp" +#include "zlib/include/zlib.h" namespace paddle { namespace recordio { @@ -58,8 +60,8 @@ static void ReadStreamByBuf(std::istream& in, size_t limit, Callback callback) { * Copy stream in to another stream */ static void PipeStream(std::istream& in, std::ostream& os) { - ReadStreamByBuf( - in, 0, [&os](const char* buf, size_t len) { os.write(buf, len); }); + ReadStreamByBuf(in, 0, + [&os](const char* buf, size_t len) { os.write(buf, len); }); } /** @@ -68,8 +70,8 @@ static void PipeStream(std::istream& in, std::ostream& os) { static uint32_t Crc32Stream(std::istream& in, size_t limit = 0) { uint32_t crc = static_cast(crc32(0, nullptr, 0)); ReadStreamByBuf(in, limit, [&crc](const char* buf, size_t len) { - crc = static_cast(crc32( - crc, reinterpret_cast(buf), static_cast(len))); + crc = static_cast(crc32(crc, reinterpret_cast(buf), + static_cast(len))); }); return crc; } diff --git a/paddle/fluid/recordio/chunk.h b/paddle/fluid/recordio/chunk.h index bf20ebd455c26ddeebeeea8db04cf7103b0c085f..71a1556a33bfa5c937d6a799d2818cd5a5ef2094 100644 --- a/paddle/fluid/recordio/chunk.h +++ b/paddle/fluid/recordio/chunk.h @@ -24,7 +24,7 @@ namespace recordio { // A Chunk contains the Header and optionally compressed records. class Chunk { -public: + public: Chunk() : num_bytes_(0) {} void Add(const std::string& buf) { num_bytes_ += buf.size(); @@ -46,7 +46,7 @@ public: bool Empty() const { return records_.empty(); } -private: + private: std::vector records_; // sum of record lengths in bytes. size_t num_bytes_; diff --git a/paddle/fluid/recordio/chunk_test.cc b/paddle/fluid/recordio/chunk_test.cc index 1f0e36a14d373ca96167199d4582bc8f17290ae8..98ca99b9a018db2da9aa563741ff3cf30461c4ce 100644 --- a/paddle/fluid/recordio/chunk_test.cc +++ b/paddle/fluid/recordio/chunk_test.cc @@ -18,29 +18,27 @@ #include "gtest/gtest.h" -using namespace paddle::recordio; - TEST(Chunk, SaveLoad) { - Chunk ch; + paddle::recordio::Chunk ch; ch.Add(std::string("12345", 6)); ch.Add(std::string("123", 4)); std::stringstream ss; - ch.Write(ss, Compressor::kNoCompress); + ch.Write(ss, paddle::recordio::Compressor::kNoCompress); ss.seekg(0); ch.Parse(ss); ASSERT_EQ(ch.NumBytes(), 10U); } TEST(Chunk, Compressor) { - Chunk ch; + paddle::recordio::Chunk ch; ch.Add(std::string("12345", 6)); ch.Add(std::string("123", 4)); ch.Add(std::string("123", 4)); ch.Add(std::string("123", 4)); std::stringstream ss; - ch.Write(ss, Compressor::kSnappy); + ch.Write(ss, paddle::recordio::Compressor::kSnappy); std::stringstream ss2; - ch.Write(ss2, Compressor::kNoCompress); + ch.Write(ss2, paddle::recordio::Compressor::kNoCompress); ASSERT_LE(ss.tellp(), ss2.tellp()); // Compress should contain less data; ch.Clear(); diff --git a/paddle/fluid/recordio/header.h b/paddle/fluid/recordio/header.h index 9200ac090de4514bef3704ac502039222eef2284..245425990b93a90d7ac6b233cff54feb48308d48 100644 --- a/paddle/fluid/recordio/header.h +++ b/paddle/fluid/recordio/header.h @@ -37,7 +37,7 @@ enum class Compressor : uint32_t { // Header is the metadata of Chunk class Header { -public: + public: Header(); Header(uint32_t num, uint32_t sum, Compressor ct, uint32_t cs); @@ -51,7 +51,7 @@ public: Compressor CompressType() const { return compressor_; } uint32_t CompressSize() const { return compress_size_; } -private: + private: uint32_t num_records_; uint32_t checksum_; Compressor compressor_; diff --git a/paddle/fluid/recordio/header_test.cc b/paddle/fluid/recordio/header_test.cc index a7d627c3eb4a7af1954795f77e5f24739edadae8..00f1887dc5e1188829ef4cd42754d161f041656d 100644 --- a/paddle/fluid/recordio/header_test.cc +++ b/paddle/fluid/recordio/header_test.cc @@ -18,14 +18,12 @@ #include "gtest/gtest.h" -using namespace paddle::recordio; - TEST(Recordio, ChunkHead) { - Header hdr(0, 1, Compressor::kGzip, 3); + paddle::recordio::Header hdr(0, 1, paddle::recordio::Compressor::kGzip, 3); std::stringstream ss; hdr.Write(ss); ss.seekg(0, std::ios::beg); - Header hdr2; + paddle::recordio::Header hdr2; hdr2.Parse(ss); EXPECT_TRUE(hdr == hdr2); } diff --git a/paddle/fluid/recordio/scanner.cc b/paddle/fluid/recordio/scanner.cc index c22281dc97e05173ad76ce76959833b92f11c4ee..88b4d4001bc1b6dc935a9aabc2db5edfb55a60e4 100644 --- a/paddle/fluid/recordio/scanner.cc +++ b/paddle/fluid/recordio/scanner.cc @@ -13,10 +13,14 @@ // limitations under the License. #include "paddle/fluid/recordio/scanner.h" + +#include + #include "paddle/fluid/platform/enforce.h" namespace paddle { namespace recordio { + Scanner::Scanner(std::unique_ptr &&stream) : stream_(std::move(stream)) { Reset(); diff --git a/paddle/fluid/recordio/scanner.h b/paddle/fluid/recordio/scanner.h index f3f17b69f195ddd92f5a39ead9755a7b8e2dd329..34f1b0c78d6b5af6072a993579e1866d38c6d009 100644 --- a/paddle/fluid/recordio/scanner.h +++ b/paddle/fluid/recordio/scanner.h @@ -16,12 +16,15 @@ #include #include +#include + #include "paddle/fluid/recordio/chunk.h" + namespace paddle { namespace recordio { class Scanner { -public: + public: explicit Scanner(std::unique_ptr&& stream); explicit Scanner(const std::string& filename); @@ -32,7 +35,7 @@ public: bool HasNext() const; -private: + private: std::unique_ptr stream_; Chunk cur_chunk_; size_t offset_; diff --git a/paddle/fluid/recordio/writer.cc b/paddle/fluid/recordio/writer.cc index 196d66edff8cc6000afcd74fb945c05dcab7106a..8046f4ff7896c897ebe1de2e2bb231cad5a0e410 100644 --- a/paddle/fluid/recordio/writer.cc +++ b/paddle/fluid/recordio/writer.cc @@ -12,9 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/recordio/writer.h" + +#include + #include "paddle/fluid/platform/enforce.h" + namespace paddle { namespace recordio { + void Writer::Write(const std::string& record) { cur_chunk_.Add(record); if (cur_chunk_.NumRecords() >= max_num_records_in_chunk_) { diff --git a/paddle/fluid/recordio/writer.h b/paddle/fluid/recordio/writer.h index 0c478d507547b10b8ebaaf5e512557a5c8c13e65..ac7e50ee90e6e8671d68e0d8065e0cf06c819ad0 100644 --- a/paddle/fluid/recordio/writer.h +++ b/paddle/fluid/recordio/writer.h @@ -11,16 +11,17 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - #pragma once + +#include + #include "paddle/fluid/recordio/chunk.h" namespace paddle { namespace recordio { class Writer { -public: - Writer(std::ostream* sout, - Compressor compressor, + public: + Writer(std::ostream* sout, Compressor compressor, size_t max_num_records_in_chunk = 1000) : stream_(*sout), max_num_records_in_chunk_(max_num_records_in_chunk), @@ -32,7 +33,7 @@ public: ~Writer(); -private: + private: std::ostream& stream_; size_t max_num_records_in_chunk_; Chunk cur_chunk_; diff --git a/paddle/fluid/recordio/writer_scanner_test.cc b/paddle/fluid/recordio/writer_scanner_test.cc index 7e764f0d9439709ad101af2b8864dc0158bd359b..6583df21a20e9e034adc14b1d3eeb136899d659e 100644 --- a/paddle/fluid/recordio/writer_scanner_test.cc +++ b/paddle/fluid/recordio/writer_scanner_test.cc @@ -12,9 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" - #include +#include + +#include "gtest/gtest.h" #include "paddle/fluid/recordio/scanner.h" #include "paddle/fluid/recordio/writer.h" @@ -66,4 +67,4 @@ TEST(WriterScanner, TinyChunk) { ASSERT_EQ(scanner.Next(), "DEFG"); ASSERT_FALSE(scanner.HasNext()); } -} \ No newline at end of file +} diff --git a/paddle/fluid/string/.clang-format b/paddle/fluid/string/.clang-format deleted file mode 120000 index 7d28cb3924707d39dafe20f4664fb17b5538996c..0000000000000000000000000000000000000000 --- a/paddle/fluid/string/.clang-format +++ /dev/null @@ -1 +0,0 @@ -../framework/.clang-format \ No newline at end of file diff --git a/paddle/fluid/string/piece.cc b/paddle/fluid/string/piece.cc index 454f5d8d38c5f02598cddaab555334a1e8a398da..8e8cfb0e91389490895835ed09ef36adf756d3ca 100644 --- a/paddle/fluid/string/piece.cc +++ b/paddle/fluid/string/piece.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "piece.h" +#include "paddle/fluid/string/piece.h" #include diff --git a/paddle/fluid/string/printf.h b/paddle/fluid/string/printf.h index 693cf9d6dfeea0735801e64fe74b9770c258c553..062095a1c3e977c0bcc89346ead765acb023bcf7 100644 --- a/paddle/fluid/string/printf.h +++ b/paddle/fluid/string/printf.h @@ -71,6 +71,8 @@ #include #include +#include + #include "tinyformat/tinyformat.h" // https://github.com/c42f/tinyformat namespace paddle { diff --git a/paddle/fluid/string/printf_test.cc b/paddle/fluid/string/printf_test.cc index b6a60c8d6b7f15f8e5572cf5bb1e7f04ee1c1598..678029f93534ab374bd29083f8991d632ccdd5a1 100644 --- a/paddle/fluid/string/printf_test.cc +++ b/paddle/fluid/string/printf_test.cc @@ -11,7 +11,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "printf.h" + +#include "paddle/fluid/string/printf.h" #include @@ -21,7 +22,7 @@ TEST(StringPrintf, StringPrintf) { std::string weekday = "Wednesday"; const char* month = "July"; size_t day = 27; - long hour = 14; + int hour = 14; int min = 44; EXPECT_EQ(std::string("Wednesday, July 27, 14:44"), paddle::string::Sprintf("%s, %s %d, %.2d:%.2d", weekday, month, day, diff --git a/paddle/fluid/string/to_string_test.cc b/paddle/fluid/string/to_string_test.cc index 8fc293af0e473994ac13f6615d3f6195c8c5f04c..1d9c0e5e0c2b6e7f44c1622d2828b21b0a4380ee 100644 --- a/paddle/fluid/string/to_string_test.cc +++ b/paddle/fluid/string/to_string_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "to_string.h" +#include "paddle/fluid/string/to_string.h" #include constexpr char kOutputString[] = "User Defined Output"; @@ -26,14 +26,13 @@ std::ostream& operator<<(std::ostream& s, const UserDefinedClass& ins) { } TEST(to_string, normal) { - using namespace paddle::string; + using paddle::string::to_string; ASSERT_EQ("10", to_string(10)); ASSERT_EQ("abc", to_string("abc")); ASSERT_EQ("1.2", to_string(1.2)); } TEST(to_string, user_defined) { - using namespace paddle::string; UserDefinedClass instance; - ASSERT_EQ(kOutputString, to_string(instance)); + ASSERT_EQ(kOutputString, paddle::string::to_string(instance)); } diff --git a/paddle/gserver/layers/UpsampleLayer.cpp b/paddle/gserver/layers/UpsampleLayer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3ff5332e6401acc3a28c9808fddd4812a7323544 --- /dev/null +++ b/paddle/gserver/layers/UpsampleLayer.cpp @@ -0,0 +1,108 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and + limitations under the License. */ + +#include "UpsampleLayer.h" +#include "iostream" + +namespace paddle { + +REGISTER_LAYER(upsample, UpsampleLayer); + +size_t UpsampleLayer::getOutputSize() { + if (upsampleSize_ == 0) { + upsampleSize_ = imgSize_ * scale_ - static_cast(padOutX_); + upsampleSizeY_ = imgSizeY_ * scaleY_ - static_cast(padOutY_); + } + return upsampleSize_ * upsampleSizeY_ * channels_; +} + +bool UpsampleLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + Layer::init(layerMap, parameterMap); + + CHECK_EQ(inputLayers_.size(), 2U); + CHECK_EQ(config_.inputs_size(), 2); + const auto& conf = config_.inputs(0).upsample_conf(); + const auto& img_conf = conf.image_conf(); + + imgSizeY_ = + img_conf.has_img_size_y() ? img_conf.img_size_y() : img_conf.img_size(); + imgSize_ = img_conf.img_size(); + channels_ = img_conf.channels(); + + CHECK((conf.has_upsample_size()) || (conf.has_scale())) + << "scale or upsample_size is required."; + + if (conf.has_upsample_size()) { + upsampleSize_ = conf.upsample_size(); + upsampleSizeY_ = upsampleSize_; + if (conf.has_upsample_size_y()) { + upsampleSizeY_ = conf.upsample_size_y(); + } + } else { + if (!conf.has_scale_y()) { + scale_ = scaleY_ = conf.scale_y(); + CHECK_GT(static_cast(scale_), 1); + } else { + scale_ = conf.scale(); + scaleY_ = conf.scale_y(); + } + padOutX_ = conf.pad_out_x(); + padOutY_ = conf.pad_out_y(); + CHECK(!padOutX_ || scale_ == 2) + << "Output height padding compensation requires scale_ == 2"; + CHECK(!padOutY_ || scaleY_ == 2) + << "Output width padding compensation requires scaleY_ == 2"; + upsampleSize_ = upsampleSizeY_ = 0; + } + return true; +} + +void UpsampleLayer::forward(PassType passType) { + Layer::forward(passType); + + MatrixPtr input = getInputValue(0); + MatrixPtr mask = inputLayers_[1]->getOutput("mask").value; + + size_t batchSize = input->getHeight(); + size_t outSize = getOutputSize(); + + CHECK_EQ(input->getWidth(), mask->getWidth()); + CHECK_EQ(mask->getHeight(), batchSize); + resetOutput(batchSize, outSize); + + MatrixPtr output = getOutputValue(); + output->upsampleForward(*input, + *mask, + imgSize_, + imgSizeY_, + channels_, + upsampleSize_, + upsampleSizeY_); +} + +void UpsampleLayer::backward(const UpdateCallback& callback) { + MatrixPtr mask = inputLayers_[1]->getOutput("mask").value; + MatrixPtr inputGrad = getInputGrad(0); + MatrixPtr outputGrad = getOutputGrad(); + inputGrad->upsampleBackward(*outputGrad, + *mask, + imgSize_, + imgSizeY_, + channels_, + upsampleSize_, + upsampleSizeY_); +} + +} // namespace paddle diff --git a/paddle/gserver/layers/UpsampleLayer.h b/paddle/gserver/layers/UpsampleLayer.h new file mode 100644 index 0000000000000000000000000000000000000000..25efbac5e9e6e92653f7c2b2f4dca9221737e5d6 --- /dev/null +++ b/paddle/gserver/layers/UpsampleLayer.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "Layer.h" +#include "paddle/math/Matrix.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/Stat.h" + +namespace paddle { + +/** + * This layer transpose the pooling process. + * It takes two input, the first input is the input data, and + * the second is the mask data from the max-pool-with-mask layer. + * + */ + +class UpsampleLayer : public Layer { +public: + explicit UpsampleLayer(const LayerConfig& config) : Layer(config) {} + ~UpsampleLayer() {} + + bool init(const LayerMap& layerMap, + const ParameterMap& parameterMap) override; + + void forward(PassType passType) override; + void backward(const UpdateCallback& callback) override; + + size_t getOutputSize(); + +protected: + size_t scale_, scaleY_; + size_t upsampleSize_, upsampleSizeY_; + size_t padOutX_, padOutY_; + size_t imgSize_, imgSizeY_; + size_t channels_; +}; + +} // namespace paddle diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index b578a906c2027a1169a0098b93f8d0742920f99d..9d7cad7584d1defefe38bdd4d041b98bd9e45bf0 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -14,6 +14,11 @@ function(gserver_test TARGET) COMMAND ${TARGET}) endfunction() +add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/concat_dotmul_a.conf + COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/* ${CMAKE_CURRENT_BINARY_DIR} +) +add_custom_target(copy_gserver_conf ALL DEPENDS concat_dotmul_a.conf) + gserver_test(test_LayerGrad) gserver_test(test_CRFLayerGrad) gserver_test(test_CrossEntropyOverBeamGrad) @@ -27,15 +32,16 @@ gserver_test(test_BatchNorm) gserver_test(test_KmaxSeqScore) gserver_test(test_Expand) gserver_test(test_MaxPoolingWithMaskOutput) +gserver_test(test_Upsample) set(PYTHON_PATH ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python/:${PADDLE_SOURCE_DIR}/paddle/gserver/tests) + ${PADDLE_BINARY_DIR}/python/:${PADDLE_BINARY_DIR}/paddle/gserver/tests) function(gserver_test_with_python TARGET) add_unittest_without_exec(${TARGET} ${TARGET}.cpp) add_test(NAME ${TARGET} COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/${TARGET} - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle/) endfunction() gserver_test_with_python(test_PyDataProvider2) @@ -56,7 +62,7 @@ if(WITH_MKLDNN) LayerGradUtil.cpp) add_test(NAME test_MKLDNN COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/test_MKLDNN - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle) endif() ############### test_WarpCTCLayer ####################### @@ -65,7 +71,7 @@ if(NOT WITH_DOUBLE AND NOT MOBILE_INFERENCE) test_WarpCTCLayer.cpp) add_test(NAME test_WarpCTCLayer COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${WARPCTC_LIB_DIR} - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle) endif() if(NOT MOBILE_INFERENCE) @@ -83,15 +89,15 @@ if(NOT MOBILE_INFERENCE) endif() add_test(NAME test_NetworkCompare COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=${use_gpu} - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle) ############ test_CompareSparse ################ add_unittest_without_exec(test_CompareSparse test_CompareSparse.cpp) if(NOT ON_TRAVIS) add_test(NAME test_CompareSparse - COMMAND ${PYTHON_PATH} ./.set_port.sh -p port -n 6 + COMMAND ${PYTHON_PATH} ${PADDLE_SOURCE_DIR}/paddle/.set_port.sh -p port -n 6 ${CMAKE_CURRENT_BINARY_DIR}/test_CompareSparse - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle/) endif() endif() diff --git a/paddle/gserver/tests/test_Upsample.cpp b/paddle/gserver/tests/test_Upsample.cpp new file mode 100644 index 0000000000000000000000000000000000000000..39b902fcc75e71007f855e4e258e54ed8d40f16b --- /dev/null +++ b/paddle/gserver/tests/test_Upsample.cpp @@ -0,0 +1,153 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include + +#include "LayerGradUtil.h" +#include "paddle/math/MathUtils.h" +#include "paddle/testing/TestUtil.h" + +void setPoolConfig(paddle::TestConfig* config, + paddle::PoolConfig* pool, + const string& poolType) { + (*config).biasSize = 0; + (*config).layerConfig.set_type("pool"); + (*config).layerConfig.set_num_filters(1); + + int kw = 2, kh = 2; + int pw = 0, ph = 0; + int sw = 2, sh = 2; + pool->set_pool_type(poolType); + pool->set_channels(2); + pool->set_size_x(kw); + pool->set_size_y(kh); + pool->set_start(0); + pool->set_padding(pw); + pool->set_padding_y(ph); + pool->set_stride(sw); + pool->set_stride_y(sh); + + int ow = + paddle::outputSize(pool->img_size(), kw, pw, sw, /* caffeMode */ false); + int oh = + paddle::outputSize(pool->img_size_y(), kh, ph, sh, /* caffeMode */ false); + pool->set_output_x(ow); + pool->set_output_y(oh); +} + +paddle::LayerPtr doOneUpsampleTest(const paddle::MatrixPtr& inputMat, + const string& poolType, + bool use_gpu, + real* tempGradData) { + /* prepare maxPoolWithMaskLayer */ + paddle::TestConfig config; + config.inputDefs.push_back({paddle::INPUT_DATA, "layer_0", 128, 0}); + paddle::LayerInputConfig* input = config.layerConfig.add_inputs(); + paddle::PoolConfig* pool = input->mutable_pool_conf(); + + pool->set_img_size(8); + pool->set_img_size_y(8); + setPoolConfig(&config, pool, "max-pool-with-mask"); + config.layerConfig.set_size(pool->output_x() * pool->output_y() * + pool->channels()); + + config.layerConfig.set_name("MaxPoolWithMask"); + + std::vector dataLayers; + paddle::LayerMap layerMap; + vector datas; + + initDataLayer(config, + &dataLayers, + &datas, + &layerMap, + "MaxPoolWithMask", + 1, + false, + use_gpu); + + dataLayers[0]->getOutputValue()->copyFrom(*inputMat); + + FLAGS_use_gpu = use_gpu; + std::vector parameters; + paddle::LayerPtr maxPoolingWithMaskOutputLayer; + initTestLayer(config, &layerMap, ¶meters, &maxPoolingWithMaskOutputLayer); + maxPoolingWithMaskOutputLayer->forward(paddle::PASS_GC); + + /* prepare the upsample layer */ + paddle::LayerConfig upsampleLayerConfig; + upsampleLayerConfig.set_type("upsample"); + paddle::LayerInputConfig* input1 = upsampleLayerConfig.add_inputs(); + upsampleLayerConfig.add_inputs(); + + paddle::UpsampleConfig* upsampleConfig = input1->mutable_upsample_conf(); + upsampleConfig->set_scale(2); + paddle::ImageConfig* imageConfig = upsampleConfig->mutable_image_conf(); + imageConfig->set_channels(2); + imageConfig->set_img_size(4); + imageConfig->set_img_size_y(4); + upsampleLayerConfig.set_size(2 * 8 * 8); + upsampleLayerConfig.set_name("upsample"); + + for (size_t i = 0; i < 2; i++) { + paddle::LayerInputConfig& inputTemp = + *(upsampleLayerConfig.mutable_inputs(i)); + inputTemp.set_input_layer_name("MaxPoolWithMask"); + } + + paddle::LayerPtr upsampleLayer; + paddle::ParameterMap parameterMap; + upsampleLayer = paddle::Layer::create(upsampleLayerConfig); + layerMap[upsampleLayerConfig.name()] = upsampleLayer; + upsampleLayer->init(layerMap, parameterMap); + upsampleLayer->setNeedGradient(true); + upsampleLayer->forward(paddle::PASS_GC); + upsampleLayer->getOutputGrad()->copyFrom(tempGradData, 128); + upsampleLayer->backward(); + + return upsampleLayer; +} + +TEST(Layer, maxPoolingWithMaskOutputLayerFwd) { + bool useGpu = false; + paddle::MatrixPtr inputMat; + paddle::MatrixPtr inputGPUMat; + paddle::MatrixPtr tempGradMat; + + inputMat = paddle::Matrix::create(1, 128, false, useGpu); + inputMat->randomizeUniform(); + + tempGradMat = paddle::Matrix::create(1, 128, false, useGpu); + tempGradMat->randomizeUniform(); + real* tempGradData = tempGradMat->getData(); + + paddle::LayerPtr upsampleLayerCPU = + doOneUpsampleTest(inputMat, "max-pool-with-mask", useGpu, tempGradData); + +#ifdef PADDLE_WITH_CUDA + useGpu = true; + real* data = inputMat->getData(); + inputGPUMat = paddle::Matrix::create(1, 128, false, useGpu); + inputGPUMat->copyFrom(data, 128); + paddle::LayerPtr upsampleLayerGPU = doOneUpsampleTest( + inputGPUMat, "max-pool-with-mask", useGpu, tempGradData); + paddle::checkMatrixEqual(upsampleLayerCPU->getOutput("").value, + upsampleLayerGPU->getOutput("").value); + + paddle::checkMatrixEqual(upsampleLayerCPU->getPrev(0)->getOutputGrad(), + upsampleLayerGPU->getPrev(0)->getOutputGrad()); +#endif +} diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 35359d4b5a8fb9715317257538a6e2e38fc16b60..0e84cb37392839d112448b0b3c12b042e7df838e 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -1024,6 +1024,66 @@ void GpuMatrix::check(std::ostream& os, Matrix& refMat, bool printDiff) { LOG(INFO) << "the diffCnt is " << diffCnt; } +void GpuMatrix::upsampleForward(Matrix& input, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + CHECK(input.useGpu_ == true) << "Matrix type are not equal"; + CHECK(mask.useGpu_ == true) << "Matrix type are not equal"; + + real* inputData = input.getData(); + real* maskData = mask.getData(); + real* outData = data_; + + size_t batch = input.getHeight(); + + CHECK(imgSizeH * imgSizeW * channels == input.getWidth()); + CHECK(imgSizeH * imgSizeW * channels == mask.getWidth()); + CHECK_EQ(batch, this->getHeight()); + CHECK(width_ == outputH * outputW * channels); + hl_upsample_forward(inputData, + maskData, + batch, + imgSizeH, + imgSizeW, + channels, + outputH, + outputW, + outData); +} + +void GpuMatrix::upsampleBackward(Matrix& outputGrad, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + CHECK(outputGrad.useGpu_ == true) << "Matrix type are not equal"; + CHECK(mask.useGpu_ == true) << "Matrix type are not equal"; + + real* outputGradData = outputGrad.getData(); + real* maskData = mask.getData(); + real* inputGradData = data_; + size_t batch = outputGrad.getHeight(); + + CHECK(imgSizeH * imgSizeW == this->getWidth() / channels); + CHECK_EQ(batch, this->getHeight()); + CHECK_EQ(channels * outputH * outputW, outputGrad.getWidth()); + hl_upsample_backward(outputGradData, + maskData, + batch, + imgSizeH, + imgSizeW, + channels, + outputH, + outputW, + inputGradData); +} + void GpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, @@ -1986,6 +2046,72 @@ void CpuMatrix::inverse(MatrixPtr& matInv, bool memAlloc) { CHECK_EQ(info, 0); } +void CpuMatrix::upsampleForward(Matrix& input, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + real* inputData = input.getData(); + real* maskData = mask.getData(); + real* outData = data_; + size_t inLength = imgSizeH * imgSizeW; + size_t outLength = outputH * outputW; + size_t batch = input.getHeight(); + CHECK(inLength == input.getWidth() / channels); + CHECK_EQ(batch, this->getHeight()); + CHECK_EQ(channels * outLength, this->getWidth()); + + for (size_t k = 0; k < batch; k++) { + for (size_t c = 0; c < channels; c++) { + for (size_t i = 0; i < inLength; i++) { + size_t out_index = static_cast(maskData[i]); + if (out_index >= outLength) { + LOG(FATAL) << "upsample index " << out_index << " out of range."; + } + outData[out_index] = inputData[i]; + } + inputData += inLength; + maskData += inLength; + outData += outLength; + } + } +} + +void CpuMatrix::upsampleBackward(Matrix& outputGrad, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + real* outputGradData = outputGrad.getData(); + real* maskData = mask.getData(); + real* inputGradData = data_; + size_t inLength = imgSizeH * imgSizeW; + size_t outLength = outputH * outputW; + size_t batch = outputGrad.getHeight(); + CHECK(inLength == this->getWidth() / channels); + CHECK_EQ(batch, this->getHeight()); + CHECK_EQ(channels * outLength, outputGrad.getWidth()); + + for (size_t k = 0; k < batch; k++) { + for (size_t c = 0; c < channels; c++) { + for (size_t i = 0; i < inLength; i++) { + size_t out_index = static_cast(maskData[i]); + if (out_index >= outLength) { + LOG(FATAL) << "upsample index " << out_index << " out of range."; + } + inputGradData[i] = outputGradData[out_index]; + } + inputGradData += inLength; + maskData += inLength; + outputGradData += outLength; + } + } +} + void CpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 631e69edc1b0f5c4ef4a115d4bd5ea29fc418018..04e9614eabc47c4c661ace2106e8ca96f45a1d49 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -859,6 +859,26 @@ public: LOG(FATAL) << "Not implemented"; } + virtual void upsampleForward(Matrix& input, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + LOG(FATAL) << "Not implemeted"; + } + + virtual void upsampleBackward(Matrix& outputGrad, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW) { + LOG(FATAL) << "Not implemeted"; + } + /** * Pooling forward operation, pick out the largest element * in the sizeX of value, if the maskMatP is not NULL, it will @@ -1420,6 +1440,22 @@ public: void classificationError(Matrix& output, IVector& label, size_t topkSize = 1); + void upsampleForward(Matrix& input, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW); + + void upsampleBackward(Matrix& outputGrad, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW); + void maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, @@ -1694,6 +1730,22 @@ public: MatrixPtr clone(size_t height, size_t width, bool useGpu = false); + void upsampleForward(Matrix& input, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW); + + void upsampleBackward(Matrix& outputGrad, + Matrix& mask, + size_t imgSizeH, + size_t imgSizeW, + size_t channels, + size_t outputH, + size_t outputW); + void maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, diff --git a/paddle/trainer/tests/CMakeLists.txt b/paddle/trainer/tests/CMakeLists.txt index bd518d8598f5aa7c32298ed2110a96a2743536b3..12c9ea8cef79a6bdbd6e26c35612d0abbe00257b 100644 --- a/paddle/trainer/tests/CMakeLists.txt +++ b/paddle/trainer/tests/CMakeLists.txt @@ -1,11 +1,16 @@ +add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/sample_trainer_config.conf + COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/* ${CMAKE_CURRENT_BINARY_DIR} +) +add_custom_target(copy_trainer_conf ALL DEPENDS sample_trainer_config.conf) + set(PYTHON_PATH ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python/:${PADDLE_SOURCE_DIR}/paddle/trainer/tests) + ${PADDLE_BINARY_DIR}/python/:${PADDLE_BINARY_DIR}/paddle/trainer/tests) function(trainer_test TARGET) add_unittest_without_exec(${TARGET} ${TARGET}.cpp) add_test(NAME ${TARGET} COMMAND ${PYTHON_PATH} ${CMAKE_CURRENT_BINARY_DIR}/${TARGET} - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle/) endfunction() trainer_test(test_Compare) @@ -22,11 +27,11 @@ if(WITH_PYTHON) add_test(NAME test_TrainerOnePass COMMAND ${PYTHON_PATH} ${PADDLE_SOURCE_DIR}/paddle/.set_port.sh -p port ${CMAKE_CURRENT_BINARY_DIR}/test_TrainerOnePass - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle/) endif() #################### test_config_parser ######################### add_test(NAME test_config_parser COMMAND ${PYTHON_PATH} ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/paddle/trainer/tests/config_parser_test.py - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) + WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle/) diff --git a/paddle/utils/CMakeLists.txt b/paddle/utils/CMakeLists.txt index 7a4977935ede4878c07f4fb6ba0dd76bf50acd42..6292e7fa52cd86c71724d9fe84ea622e98ff1e08 100644 --- a/paddle/utils/CMakeLists.txt +++ b/paddle/utils/CMakeLists.txt @@ -2,8 +2,8 @@ file(GLOB UTIL_HEADERS . *.h) file(GLOB UTIL_SOURCES . *.cpp) create_resources(${CMAKE_CURRENT_SOURCE_DIR}/enable_virtualenv.py - ${CMAKE_CURRENT_SOURCE_DIR}/enable_virtualenv.c) -set(UTIL_RES ${CMAKE_CURRENT_SOURCE_DIR}/enable_virtualenv.c) + ${CMAKE_CURRENT_BINARY_DIR}/enable_virtualenv.c) +set(UTIL_RES ${CMAKE_CURRENT_BINARY_DIR}/enable_virtualenv.c) if(APPLE) file(GLOB UTIL_ARCH_SOURCES . arch/osx/*.cpp) diff --git a/proto/CMakeLists.txt b/proto/CMakeLists.txt index 556bcd1d7e60c27fece43de666e9531ab4203414..a075eeb83bda64133920f9ab0275eb6c0e0fb8c4 100644 --- a/proto/CMakeLists.txt +++ b/proto/CMakeLists.txt @@ -15,13 +15,14 @@ foreach(filename ${proto_filenames}) get_filename_component(ABS_FIL ${filename} ABSOLUTE) get_filename_component(FIL_WE ${filename} NAME_WE) set(CUR_PROTO_GEN_PY - ${PADDLE_SOURCE_DIR}/paddle/python/paddle/proto/${FIL_WE}_pb2.py) + ${PADDLE_BINARY_DIR}/paddle/python/paddle/proto/${FIL_WE}_pb2.py) set(PROTO_GEN_PY ${CUR_PROTO_GEN_PY} ${PROTO_GEN_PY}) add_custom_command(OUTPUT ${CUR_PROTO_GEN_PY} + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/proto COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} - ARGS "--python_out=${PADDLE_SOURCE_DIR}/python/paddle/proto" + ARGS "--python_out=${PADDLE_BINARY_DIR}/python/paddle/proto" "-I" ${CMAKE_CURRENT_SOURCE_DIR} ${ABS_FIL} DEPENDS ${ABS_FIL} protoc) endforeach() diff --git a/proto/ModelConfig.proto b/proto/ModelConfig.proto index d699984ff2d3345fc91b1c4080d7f16af1366a39..d78ee9c9f39ed09825dffdfa0a442c0ffac5958f 100644 --- a/proto/ModelConfig.proto +++ b/proto/ModelConfig.proto @@ -323,6 +323,16 @@ message ClipConfig { required double max = 2; } +message UpsampleConfig { + required ImageConfig image_conf = 1; + optional uint32 scale = 2 [ default = 2 ]; + optional uint32 scale_y = 3 [ default = 2 ]; + optional bool pad_out_x = 4 [ default = false ]; + optional bool pad_out_y = 5 [ default = false ]; + optional uint32 upsample_size = 6; + optional uint32 upsample_size_y = 7; +} + message ROIPoolConfig { required uint32 pooled_width = 1; required uint32 pooled_height = 2; @@ -359,6 +369,7 @@ message LayerInputConfig { optional ClipConfig clip_conf = 18; optional ScaleSubRegionConfig scale_sub_region_conf = 19; optional ROIPoolConfig roi_pool_conf = 20; + optional UpsampleConfig upsample_conf = 21; } message LayerConfig { diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index d074b0136d77fa5a1ce5c29cd52347d04475b029..7cbd7f22bf2968b29dc0665e893101b892808b5e 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -47,14 +47,16 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py) -add_custom_command(OUTPUT ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so - COMMAND cmake -E copy $ ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so +add_custom_command(OUTPUT ${PADDLE_BINARY_DIR}/python/paddle/fluid/core.so + COMMAND cmake -E copy $ ${PADDLE_BINARY_DIR}/python/paddle/fluid/core.so DEPENDS paddle_pybind) -add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_SOURCE_DIR}/python/paddle/fluid/core.so) +add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_BINARY_DIR}/python/paddle/fluid/core.so) add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND touch stub.cc + COMMAND ${CMAKE_COMMAND} -E copy_directory ${PADDLE_SOURCE_DIR}/python/paddle ${PADDLE_BINARY_DIR}/python/paddle + COMMAND cp -r ${PADDLE_SOURCE_DIR}/paddle/py_paddle ${PADDLE_BINARY_DIR}/python/ COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND ${CMAKE_COMMAND} -E remove_directory ${PADDLE_PYTHON_BUILD_DIR}/lib-python diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 5ea4d977f4d8d9eb56b1fefa16f429df6e2a15bb..f01d638efddd471d5667fded183b90c2d7d0a856 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -31,7 +31,7 @@ import regularizer import average from param_attr import ParamAttr, WeightNormParamAttr from data_feeder import DataFeeder -from core import LoDTensor, CPUPlace, CUDAPlace +from core import LoDTensor, CPUPlace, CUDAPlace, CUDAPinnedPlace from distribute_transpiler import DistributeTranspiler from distribute_transpiler_simple import SimpleDistributeTranspiler from concurrency import (Go, make_channel, channel_send, channel_recv, @@ -57,6 +57,7 @@ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ 'LoDTensor', 'CPUPlace', 'CUDAPlace', + 'CUDAPinnedPlace', 'Tensor', 'ParamAttr', 'WeightNormParamAttr', diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 9311fc9904eb730aa56e94a4e45a1479a67df641..7a2a81be9f269f262160cd082ec3a1d8e8e46811 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -17,7 +17,7 @@ import framework from framework import Program, default_main_program, default_startup_program, Parameter, Variable import optimizer from layer_helper import LayerHelper -from distributed_spliter import * +import distributed_splitter as splitter import math from . import core import debuger @@ -36,7 +36,7 @@ class VarBlock: class UnionFind(object): """ Union-find data struct. - + Union-find is a data struct that keeps track of a set of elements partitioned into a number of disjoint (non-overlapping) subsets. @@ -138,7 +138,7 @@ class DistributeTranspiler: program=None, pservers="127.0.0.1:6174", trainers=1, - split_method=round_robin): + split_method=splitter.round_robin): """ Transpile the program to distributed data-parallelism programs. The main_program will be transformed to use a remote parameter server @@ -303,7 +303,7 @@ class DistributeTranspiler: # If two ops are connected, we could add these two ops # into one set. ufind = self._create_ufind(self.optimize_ops) - # step 4.2 + # step 4.2 # Iterate through the ops and append optimize op which # located on current pserver opt_op_on_pserver = [] @@ -312,7 +312,7 @@ class DistributeTranspiler: opt_op_on_pserver.append(op) # step 4.3 # Iterate through the ops, and if an op and the optimize ops - # which located on current pserver are in one set, then + # which located on current pserver are in one set, then # append it into the sub program. # We try to put optimization program run parallelly, assume @@ -408,11 +408,7 @@ class DistributeTranspiler: pserver_vars = pserver_program.global_block().vars created_var_map = dict() for _, var in pserver_vars.iteritems(): - tmpvar = s_prog.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) + tmpvar = s_prog.global_block().clone_variable(var) created_var_map[var.name] = tmpvar # 2. rename op outputs @@ -708,11 +704,7 @@ class DistributeTranspiler: varlist = [varlist] for var in varlist: - program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) + program.global_block().clone_variable(var) optimize_block.append_op( type=opt_op.type, @@ -760,7 +752,7 @@ class DistributeTranspiler: def _is_opt_op(self, op): # NOTE: It's a HACK implement. - # optimize op: SGDOptimize, MomentumOptimizer, AdamOptimizer and etc... + # optimize op: SGDOptimize, MomentumOptimizer, AdamOptimizer and etc... if "Param" in op.input_names and \ "LearningRate" in op.input_names: return True diff --git a/python/paddle/fluid/distributed_spliter.py b/python/paddle/fluid/distributed_splitter.py similarity index 78% rename from python/paddle/fluid/distributed_spliter.py rename to python/paddle/fluid/distributed_splitter.py index d288b27ba00970897d8121b82a9d51d5cf4ece09..060c1df8ad2badc5132f45ff0f44d136d828faa1 100644 --- a/python/paddle/fluid/distributed_spliter.py +++ b/python/paddle/fluid/distributed_splitter.py @@ -17,8 +17,10 @@ def hash_name(varlist, pserver_endpoints): """ hash variable names to several endpoints. - :param varlist: a list of Variables - :return: a map of pserver endpoint -> varname + Args: + varlist(list): a list of Variables + + Returns(dict): a map of pserver endpoint -> varname """ def _hash_block(block_str, total): @@ -34,9 +36,14 @@ def hash_name(varlist, pserver_endpoints): def round_robin(varlist, pserver_endpoints): """ - distribute variables to several endpoints. + Distribute variables to several endpoints. + Args: + varlist(list): a list of variables + pserver_endpoints(list): a list of pserver endpoints + + Returns(list[int]): the endpoint for each variable """ - assert (len(varlist) > len(pserver_endpoints)) + assert (len(varlist) >= len(pserver_endpoints)) eplist = [] pserver_idx = 0 diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index e15456bfc0835066e3c899aea7e2cf642b4797d8..39d4017861f4d2ac2e8e85c3d70440a43e6cdc71 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -946,13 +946,20 @@ class Block(object): The new variable cloned from 'var' in current block. """ assert isinstance(var, Variable) - return self.create_var( - name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - lod_level=var.lod_level, - persistable=True) + ret_var = None + # make STEP_SCOPES var can be safely cloned. + if var.type == core.VarDesc.VarType.STEP_SCOPES: + ret_var = self.create_var( + name=var.name, persistable=var.persistable, type=var.type) + else: + ret_var = self.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=True) + return ret_var class Program(object): diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3d13133bf25aa3f538f6f574bd2ae682e1bc7e39..d2e7d58524bfb11627b6acb36ef873c41b348f0f 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -133,6 +133,8 @@ def fc(input, bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias of this layer. If it is set to None, no bias will be added to the output units. act (str, default None): Activation to be applied to the output of this layer. + use_mkldnn(bool): Use mkldnn kernel or not, it is valid only when the mkldnn + library is installed. Default: False name (str, default None): The name of this layer. Returns: @@ -153,38 +155,64 @@ def fc(input, dtype = helper.input_dtype() mul_results = [] - for input_var, param_attr in helper.iter_inputs_and_params(): - input_shape = input_var.shape + if use_mkldnn: + tmp = helper.create_tmp_variable(dtype) + input_shape = input.shape param_shape = [ reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1) ] + [size] w = helper.create_parameter( - attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) - tmp = helper.create_tmp_variable(dtype) + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + is_bias=False) + if bias_attr is None or bias_attr is False: + bias_attr = False + else: + bias_attr = True helper.append_op( - type="mul", - inputs={"X": input_var, - "Y": w}, + type="fc", + inputs={"Input": input, + "W": w}, outputs={"Out": tmp}, - attrs={ - "x_num_col_dims": num_flatten_dims, - "y_num_col_dims": 1, - 'use_mkldnn': use_mkldnn - }) - mul_results.append(tmp) - - # sum - if len(mul_results) == 1: - pre_bias = mul_results[0] + attrs={"use_mkldnn": use_mkldnn, + "bias_attr": bias_attr}) + return helper.append_activation(tmp) else: - pre_bias = helper.create_tmp_variable(dtype) - helper.append_op( - type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias}) - # add bias - pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims) - # add activation - return helper.append_activation(pre_activation) + for input_var, param_attr in helper.iter_inputs_and_params(): + input_shape = input_var.shape + param_shape = [ + reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1) + ] + [size] + + w = helper.create_parameter( + attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) + tmp = helper.create_tmp_variable(dtype) + helper.append_op( + type="mul", + inputs={"X": input_var, + "Y": w}, + outputs={"Out": tmp}, + attrs={ + "x_num_col_dims": num_flatten_dims, + "y_num_col_dims": 1, + }) + mul_results.append(tmp) + + if len(mul_results) == 1: + pre_bias = mul_results[0] + else: + pre_bias = helper.create_tmp_variable(dtype) + helper.append_op( + type="sum", + inputs={"X": mul_results}, + outputs={"Out": pre_bias}) + # add bias + pre_activation = helper.append_bias_op( + pre_bias, dim_start=num_flatten_dims) + # add activation + return helper.append_activation(pre_activation) def embedding(input, diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index a2c830b3c943b114f3024f23f73f78bf87e1da34..1b3ba414ecb50cc4d75dcaecd1f31265334c9aec 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -26,25 +26,29 @@ class ParallelExecutor(object): use_cuda, num_threads=None, allow_op_delay=False): - places = [] + self._places = [] + self._act_places = [] if use_cuda: for i in xrange(core.get_cuda_device_count()): p = core.Place() - p.set_place(core.CUDAPlace(i)) - places.append(p) + self._act_places.append(core.CUDAPlace(i)) + p.set_place(self._act_places[-1]) + self._places.append(p) else: for i in xrange(multiprocessing.cpu_count()): p = core.Place() - p.set_place(core.CPUPlace()) - places.append(p) + self._act_places.append(core.CPUPlace(i)) + p.set_place(self._act_places[-1]) + self._places.append(p) + assert self._places, "no place for execution" if num_threads is None: if use_cuda: # Experiments on se-resnext shows that too many threads hurt # performance. Worth tunning for other models in the future. - num_threads = len(places) + num_threads = len(self._places) else: - min(len(places) * 2, multiprocessing.cpu_count()) + min(len(self._places) * 2, multiprocessing.cpu_count()) startup = framework.default_startup_program() main = framework.default_main_program() @@ -53,7 +57,7 @@ class ParallelExecutor(object): self.executor = core.ParallelExecutor( num_threads, True if use_cuda else False, # use_event - places, + self._places, set([ p.name for p in main.global_block().iter_parameters() if not p.stop_gradient @@ -65,8 +69,25 @@ class ParallelExecutor(object): allow_op_delay) self.scope = scope - def run(self, fetch_list): + def run(self, fetch_list, feed_dict={}): + """ + :param fetch_list: A list of variable names that will be fetched. + :param feed_dict: A dict mapping for feed variable name to LoDTensor + or numpy array. + :return: fetched value list. + """ + if not isinstance(feed_dict, dict): + raise TypeError("feed_dict should be a dict") + + feed_tensor_dict = {} + for i, feed_name in enumerate(feed_dict): + feed_tensor = feed_dict[feed_name] + if not isinstance(feed_tensor, core.LoDTensor): + feed_tensor = core.LoDTensor() + feed_tensor.set(feed_dict[feed_name], self._act_places[0]) + feed_tensor_dict[feed_name] = feed_tensor + fetch_var_name = '@FETCHED_VAR_NAME@' - self.executor.run(fetch_list, fetch_var_name) + self.executor.run(fetch_list, fetch_var_name, feed_tensor_dict) arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array() return [arr[i] for i in range(len(arr))] diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 1b2d29a47fd050e40f83443432f8194984c71214..f10ef9b63412ecf74471f4fb94eb91ac72d5f8f9 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -22,9 +22,9 @@ function(py_test_modules TARGET_NAME) set(multiValueArgs MODULES DEPS ARGS ENVS) cmake_parse_arguments(py_test_modules "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} - COMMAND env PYTHONPATH=${PADDLE_PYTHON_BUILD_DIR}/lib-python ${py_test_modules_ENVS} + COMMAND env PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_modules_ENVS} ${PYTHON_EXECUTABLE} -u -m unittest --verbose ${py_test_modules_MODULES} ${py_test_modules_ARGS} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() endfunction() diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 4b6e3fb69a12095c77f343515fe3b6d1f3fccb14..65606a0b4373b28036096cf046da5143a3b8bcd0 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -97,8 +97,11 @@ class TestConv2dOp(OpTest): } self.outputs = {'Output': output} + def testcudnn(self): + return core.is_compiled_with_cuda() and self.use_cudnn + def test_check_output(self): - if self.use_cudnn: + if self.testcudnn(): place = core.CUDAPlace(0) self.check_output_with_place(place, atol=1e-5) else: @@ -107,7 +110,7 @@ class TestConv2dOp(OpTest): def test_check_grad(self): if self.dtype == np.float16: return - if self.use_cudnn: + if self.testcudnn(): place = core.CUDAPlace(0) self.check_grad_with_place( place, @@ -121,7 +124,7 @@ class TestConv2dOp(OpTest): def test_check_grad_no_filter(self): if self.dtype == np.float16: return - if self.use_cudnn: + if self.testcudnn(): place = core.CUDAPlace(0) self.check_grad_with_place( place, ['Input'], @@ -138,7 +141,7 @@ class TestConv2dOp(OpTest): def test_check_grad_no_input(self): if self.dtype == np.float16: return - if self.use_cudnn: + if self.testcudnn(): place = core.CUDAPlace(0) self.check_grad_with_place( place, ['Filter'], diff --git a/python/paddle/fluid/tests/unittests/test_fc_op.py b/python/paddle/fluid/tests/unittests/test_fc_op.py new file mode 100644 index 0000000000000000000000000000000000000000..3f547f3c484bf034a87823a75d946ef130a5cb70 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fc_op.py @@ -0,0 +1,99 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +def fully_connected_naive(input, weights, bias_data=None): + in_n, in_c, in_h, in_w = input.shape + w_h, w_c = weights.shape + + x_data = np.reshape(input, [in_n, in_c * in_h * in_w]) + w_data = np.transpose(np.reshape(weights, (w_c, in_c * in_h * in_w))) + result = None + + if not bias_data: + result = np.dot(x_data, w_data) + else: + result = np.dot(x_data, w_data) + bias_data + + return result + + +class MatrixGenerate: + def __init__(self, mb, ic, oc, h, w): + self.input = np.random.random((mb, ic, h, w)).astype("float32") + self.weights = np.random.random((ic * h * w, oc)).astype("float32") + + +class TestFCMKLDNNOp(OpTest): + def setUp(self): + self.op_type = "fc" + self.use_mkldnn = True + self.with_bias = True + self.matrix = MatrixGenerate(1, 10, 15, 3, 3) + + self.inputs = {'Input': self.matrix.input, 'W': self.matrix.weights} + + self.attrs = { + 'use_mkldnn': self.use_mkldnn, + 'with_bias': self.with_bias + } + + self.outputs = { + 'Out': fully_connected_naive(self.matrix.input, self.matrix.weights) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(set(['Input', 'W']), 'Out', max_relative_error=0.9) + + def test_check_grad_no_weight(self): + self.check_grad( + ['Input'], 'Out', max_relative_error=0.5, no_grad_set=set('W')) + + +class TestFCMKLDNNOp1(TestFCMKLDNNOp): + def init_op_type(self): + self.matrix = MatrixGenerate(2, 15, 48, 2, 2) + + +class TestFCMKLDNNOp2(TestFCMKLDNNOp): + def init_op_type(self): + self.matrix = MatrixGenerate(2, 32, 40, 1, 1) + + +class TestFCMKLDNNOp3(TestFCMKLDNNOp): + def init_op_type(self): + self.matrix = MatrixGenerate(2, 2, 4, 1, 1) + + +class TestFCMKLDNNOp4(TestFCMKLDNNOp): + def init_op_type(self): + self.with_bias = False + self.matrix = MatrixGenerate(2, 32, 48, 2, 2) + + +class TestFCMKLDNNOp4(TestFCMKLDNNOp): + def init_op_type(self): + self.with_bias = False + self.matrix = MatrixGenerate(2, 32, 1000, 6, 6) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index 3f739afd2516fdc2bdf3711d4780a1196c6f3f13..f8d5785fbfe64843f4aa3b96b24809df60980c74 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -115,18 +115,18 @@ class TestLookupTableWIsSelectedRows(OpTest): w_array = np.ones((len(rows), row_numel)).astype("float32") for i in range(len(rows)): w_array[i] *= i - ids_tensor = w_selected_rows.get_tensor() - ids_tensor.set(w_array, place) + w_tensor = w_selected_rows.get_tensor() + w_tensor.set(w_array, place) # create Out Variable - Out_tensor = scope.var('Out').get_tensor() + out_tensor = scope.var('Out').get_tensor() # create and run lookup_table operator lookup_table = Operator("lookup_table", W='W', Ids='Ids', Out='Out') lookup_table.run(scope, place) # get result from Out - result_array = np.array(Out_tensor) + result_array = np.array(out_tensor) # all(): return True if all elements of the iterable are true (or if the iterable is empty) for idx, row in enumerate(ids_array): assert (row[0] == result_array[idx]).all() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor.py b/python/paddle/fluid/tests/unittests/test_parallel_executor.py index a79e4b3e183eaef06be27a724893799923e84ac1..0f90e0e4df5da93f427b892d1be69f14625d2e29 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor.py @@ -21,13 +21,17 @@ import paddle.dataset.mnist as mnist import paddle.dataset.wmt16 as wmt16 -def simple_fc_net(): - reader = fluid.layers.open_recordio_file( - filename='./mnist.recordio', - shapes=[[-1, 784], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64']) - img, label = fluid.layers.read_file(reader) +def simple_fc_net(use_feed): + if use_feed: + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + else: + reader = fluid.layers.open_recordio_file( + filename='./mnist.recordio', + shapes=[[-1, 784], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + img, label = fluid.layers.read_file(reader) hidden = img for _ in xrange(4): hidden = fluid.layers.fc( @@ -42,13 +46,18 @@ def simple_fc_net(): return loss -def fc_with_batchnorm(): - reader = fluid.layers.open_recordio_file( - filename='./mnist.recordio', - shapes=[[-1, 784], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64']) - img, label = fluid.layers.read_file(reader) +def fc_with_batchnorm(use_feed): + if use_feed: + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + else: + reader = fluid.layers.open_recordio_file( + filename='./mnist.recordio', + shapes=[[-1, 784], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + img, label = fluid.layers.read_file(reader) + hidden = img for _ in xrange(1): hidden = fluid.layers.fc( @@ -135,7 +144,9 @@ def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio): return fluid.layers.elementwise_add(x=short, y=scale, act='relu') -def SE_ResNeXt152Small(batch_size=2): +def SE_ResNeXt50Small(batch_size=2, use_feed=False): + assert not use_feed, "SE_ResNeXt doesn't support feed yet" + img = fluid.layers.fill_constant( shape=[batch_size, 3, 224, 224], dtype='float32', value=0.0) label = fluid.layers.fill_constant( @@ -150,9 +161,9 @@ def SE_ResNeXt152Small(batch_size=2): conv = fluid.layers.pool2d( input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') - cardinality = 64 + cardinality = 32 reduction_ratio = 16 - depth = [3, 8, 36, 3] + depth = [3, 4, 6, 3] num_filters = [128, 256, 512, 1024] for block in range(len(depth)): @@ -185,30 +196,28 @@ class TestParallelExecutorBase(unittest.TestCase): memory_opt=True, iter=10, batch_size=None, - allow_op_delay=False): + allow_op_delay=False, + feed_dict={}): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): - loss = method() + loss = method(use_feed=len(feed_dict) > 0) adam = fluid.optimizer.Adam() adam.minimize(loss) if memory_opt: fluid.memory_optimize(main) - exe = fluid.ParallelExecutor( - loss_name=loss.name, - use_cuda=True, - allow_op_delay=allow_op_delay) + exe = fluid.ParallelExecutor(loss_name=loss.name, use_cuda=True) if batch_size is not None: batch_size *= fluid.core.get_cuda_device_count() begin = time.time() - first_loss, = exe.run([loss.name]) + first_loss, = exe.run([loss.name], feed_dict=feed_dict) first_loss = numpy.array(first_loss) for i in xrange(iter): - exe.run([]) + exe.run([], feed_dict=feed_dict) - last_loss, = exe.run([loss.name]) + last_loss, = exe.run([loss.name], feed_dict=feed_dict) end = time.time() if batch_size is not None: @@ -242,9 +251,19 @@ class TestMNIST(TestParallelExecutorBase): self.check_network_convergence(simple_fc_net) self.check_network_convergence(simple_fc_net, allow_op_delay=True) + img = numpy.zeros(shape=[32, 784], dtype='float32') + label = numpy.ones(shape=[32, 1], dtype='int64') + self.check_network_convergence( + simple_fc_net, feed_dict={"image": img, + "label": label}) + def test_batchnorm_fc(self): self.check_network_convergence(fc_with_batchnorm) - self.check_network_convergence(fc_with_batchnorm, allow_op_delay=True) + img = numpy.zeros(shape=[32, 784], dtype='float32') + label = numpy.ones(shape=[32, 1], dtype='int64') + self.check_network_convergence( + fc_with_batchnorm, feed_dict={"image": img, + "label": label}) class TestResnet(TestParallelExecutorBase): @@ -271,7 +290,7 @@ class TestResnet(TestParallelExecutorBase): batch_size = 2 self.check_network_convergence( functools.partial( - SE_ResNeXt152Small, batch_size=batch_size), + SE_ResNeXt50Small, batch_size=batch_size), iter=20, batch_size=batch_size) @@ -400,7 +419,8 @@ def prepare_batch_input(insts, src_pad_idx, trg_pad_idx, n_head): import transformer_model -def transformer(): +def transformer(use_feed): + assert not use_feed, "transfomer doesn't support feed yet" return transformer_model.transformer( ModelHyperParams.src_vocab_size + 1, ModelHyperParams.trg_vocab_size + 1, ModelHyperParams.max_length + 1, diff --git a/python/paddle/fluid/tests/unittests/test_prior_box_op.py b/python/paddle/fluid/tests/unittests/test_prior_box_op.py index c21138c13e6753f9dfcbd7d439269f7cf9a04f23..bcbc02a2baa46b9ab583ecf3006bd3262e6038fd 100644 --- a/python/paddle/fluid/tests/unittests/test_prior_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_prior_box_op.py @@ -28,7 +28,6 @@ class TestPriorBoxOp(OpTest): self.attrs = { 'min_sizes': self.min_sizes, - 'max_sizes': self.max_sizes, 'aspect_ratios': self.aspect_ratios, 'variances': self.variances, 'flip': self.flip, @@ -37,25 +36,28 @@ class TestPriorBoxOp(OpTest): 'step_h': self.step_h, 'offset': self.offset } + if len(self.max_sizes) > 0: + self.attrs['max_sizes'] = self.max_sizes self.outputs = {'Boxes': self.out_boxes, 'Variances': self.out_var} def test_check_output(self): self.check_output() - def test_check_grad(self): - return - def setUp(self): self.op_type = "prior_box" self.set_data() + def set_max_sizes(self): + max_sizes = [5, 10] + self.max_sizes = np.array(max_sizes).astype('float32').tolist() + def init_test_params(self): - self.layer_w = 4 - self.layer_h = 4 + self.layer_w = 32 + self.layer_h = 32 - self.image_w = 20 - self.image_h = 20 + self.image_w = 40 + self.image_h = 40 self.step_w = float(self.image_w) / float(self.layer_w) self.step_h = float(self.image_h) / float(self.layer_h) @@ -66,8 +68,7 @@ class TestPriorBoxOp(OpTest): self.min_sizes = [2, 4] self.min_sizes = np.array(self.min_sizes).astype('float32').tolist() - self.max_sizes = [5, 10] - self.max_sizes = np.array(self.max_sizes).astype('float32').tolist() + self.set_max_sizes() self.aspect_ratios = [2.0, 3.0] self.flip = True self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] @@ -79,7 +80,7 @@ class TestPriorBoxOp(OpTest): self.clip = True self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes) - if len(self.max_sizes) > 1: + if len(self.max_sizes) > 0: self.num_priors += len(self.max_sizes) self.offset = 0.5 @@ -105,35 +106,27 @@ class TestPriorBoxOp(OpTest): idx = 0 for s in range(len(self.min_sizes)): min_size = self.min_sizes[s] - c_w = c_h = min_size / 2. - out_boxes[h, w, idx, :] = [ - (c_x - c_w) / self.image_w, (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, (c_y + c_h) / self.image_h - ] - idx += 1 - - if len(self.max_sizes) > 0: - max_size = self.max_sizes[s] - # second prior: aspect_ratio = 1, - c_w = c_h = math.sqrt(min_size * max_size) / 2 + # rest of priors + for r in range(len(self.real_aspect_ratios)): + ar = self.real_aspect_ratios[r] + c_w = min_size * math.sqrt(ar) / 2 + c_h = (min_size / math.sqrt(ar)) / 2 out_boxes[h, w, idx, :] = [(c_x - c_w) / self.image_w, (c_y - c_h) / self.image_h, (c_x + c_w) / self.image_w, (c_y + c_h) / self.image_h] idx += 1 - # rest of priors - for r in range(len(self.real_aspect_ratios)): - ar = self.real_aspect_ratios[r] - if math.fabs(ar - 1.) < 1e-6: - continue - c_w = min_size * math.sqrt(ar) / 2 - c_h = (min_size / math.sqrt(ar)) / 2 + if len(self.max_sizes) > 0: + max_size = self.max_sizes[s] + # second prior: aspect_ratio = 1, + c_w = c_h = math.sqrt(min_size * max_size) / 2 out_boxes[h, w, idx, :] = [(c_x - c_w) / self.image_w, (c_y - c_h) / self.image_h, (c_x + c_w) / self.image_w, (c_y + c_h) / self.image_h] idx += 1 + # clip the prior's coordidate such that it is within[0, 1] if self.clip: out_boxes = np.clip(out_boxes, 0.0, 1.0) @@ -144,5 +137,10 @@ class TestPriorBoxOp(OpTest): self.out_var = out_var.astype('float32') +class TestPriorBoxOpWithMaxSize(TestPriorBoxOp): + def set_max_sizes(self): + self.max_sizes = [] + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py index e4cf4a8bce8a53c0348130716dc18c61ac9a5913..f98a8bbc68a4315df3ae761f2e52b8f11cb620c6 100644 --- a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py +++ b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py @@ -19,9 +19,9 @@ from paddle.fluid.framework import Program class TestOpDesc(unittest.TestCase): def test_op_desc(self): - prog = core.ProgramDesc() - self.assertIsNotNone(prog) - block = prog.block(0) + program_desc = core.ProgramDesc() + self.assertIsNotNone(program_desc) + block = program_desc.block(0) self.assertIsNotNone(block) op = block.append_op() self.assertIsNotNone(op) @@ -67,7 +67,7 @@ class TestOpDesc(unittest.TestCase): self.assertEqual(8, len(op.attr_names())) - op.set_block_attr("block_attr", prog.block(0)) + op.set_block_attr("block_attr", program_desc.block(0)) self.assertEqual(0, op.block_attr("block_attr")) mul_op = block.append_op() @@ -88,20 +88,20 @@ class TestProgramDesc(unittest.TestCase): del program_desc def test_append_block(self): - prog_desc = core.ProgramDesc() - self.assertIsNotNone(prog_desc) - block_root = prog_desc.block(0) + program_desc = core.ProgramDesc() + self.assertIsNotNone(program_desc) + block_root = program_desc.block(0) self.assertIsNotNone(block_root) self.assertEqual(block_root.id, 0) - block1 = prog_desc.append_block(block_root) - block2 = prog_desc.append_block(block1) + block1 = program_desc.append_block(block_root) + block2 = program_desc.append_block(block1) self.assertIsNotNone(block1) self.assertEqual(block1.id, block2.parent) self.assertEqual(block_root.id, block1.parent) - block3 = prog_desc.append_block(block_root) + block3 = program_desc.append_block(block_root) self.assertEqual(block3.parent, block_root.id) - self.assertEqual(prog_desc.block(1).id, 1) - self.assertEqual(4, prog_desc.num_blocks()) + self.assertEqual(program_desc.block(1).id, 1) + self.assertEqual(4, program_desc.num_blocks()) class TestVarDesc(unittest.TestCase): @@ -162,9 +162,9 @@ class TestVarDesc(unittest.TestCase): class TestBlockDesc(unittest.TestCase): def test_add_var(self): - prog = core.ProgramDesc() - self.assertIsNotNone(prog) - block = prog.block(0) + program_desc = core.ProgramDesc() + self.assertIsNotNone(program_desc) + block = program_desc.block(0) self.assertIsNotNone(block) var1 = block.var("var1") var2 = block.var("var2") @@ -175,9 +175,9 @@ class TestBlockDesc(unittest.TestCase): self.assertEqual(var2_re, var2) def test_add_op(self): - prog = core.ProgramDesc() - self.assertIsNotNone(prog) - block = prog.block(0) + program_desc = core.ProgramDesc() + self.assertIsNotNone(program_desc) + block = program_desc.block(0) self.assertIsNotNone(block) op1 = block.append_op() op2 = block.append_op() @@ -189,9 +189,9 @@ class TestBlockDesc(unittest.TestCase): def test_remove_op(self): program = Program() - prog = program.desc - self.assertIsNotNone(prog) - block = prog.block(0) + program_desc = program.desc + self.assertIsNotNone(program_desc) + block = program_desc.block(0) self.assertIsNotNone(block) op0 = block.append_op() diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index c498b23db12cd83304f4c3a3d1f15bd68ad4f0b6..3126293f9d8e52daa866be5fc1533648a33f3363 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -97,5 +97,72 @@ class TestSparseSGDOp(unittest.TestCase): self.check_with_place(place) +class TestSGDOpOptimizeSelectedRows(unittest.TestCase): + def check_with_place(self, place): + scope = core.Scope() + + row_width = 12 + # create and initialize Grad Variable + grad_height = 10 + grad_rows = [0, 4, 7] + + grad_selected_rows = scope.var('Grad').get_selected_rows() + grad_selected_rows.set_height(grad_height) + grad_selected_rows.set_rows(grad_rows) + grad_array = np.ones((len(grad_rows), row_width)).astype("float32") + grad_array[0, 0] = 2.0 + grad_array[2, 8] = 4.0 + + grad_tensor = grad_selected_rows.get_tensor() + grad_tensor.set(grad_array, place) + + # create and initialize Param Variable + # create and initialize W Variable + param_rows = [0, 1, 2, 3, 4, 5, 6, 7] + + # init Param + w_selected_rows = scope.var('Param').get_selected_rows() + w_selected_rows.set_height(len(param_rows)) + w_selected_rows.set_rows(param_rows) + w_array = np.ones((len(param_rows), row_width)).astype("float32") + for i in range(len(param_rows)): + w_array[i] *= i + w_tensor = w_selected_rows.get_tensor() + w_tensor.set(w_array, place) + + w_before_optimize = np.array(w_tensor) + + # create and initialize LeraningRate Variable + lr_value = 0.1 + lr = scope.var('LearningRate').get_tensor() + lr_array = np.full((1), lr_value).astype("float32") + lr.set(lr_array, place) + + # optimize with Python + w_after_optimize = np.copy(w_before_optimize) + for index, id in enumerate(grad_rows): + w_after_optimize[id] = w_before_optimize[ + id] - lr_value * grad_array[index] + + # create and run sgd operator + sgd_op = Operator( + "sgd", + Param='Param', + Grad='Grad', + ParamOut='Param', + LearningRate='LearningRate') + sgd_op.run(scope, place) + + # get and compare result + result_array = np.array(w_tensor) + assert (result_array == w_after_optimize).all() + + def test_sparse_parameter_sgd(self): + places = [core.CPUPlace()] + # do not support GPU kernel currently + for place in places: + self.check_with_place(place) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 33d60c7e31ce0817ad26ea1c1c974339936052d3..279f3073f73d1c36f54bb901d92441a7403ac23f 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -68,6 +68,17 @@ class TestSoftmaxCUDNNOp(TestSoftmaxOp): self.use_cudnn = True +class TestSoftmaxFP16Op(TestSoftmaxOp): + def init_kernel_type(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + + class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp): def init_kernel_type(self): self.use_cudnn = True diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 186b91c226accbe1c2d5465d6244b9438eec9979..460eb3b3491a0626eb6ecbf89132e24177a2adaa 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -471,6 +471,7 @@ class Input(Cfg): maxout=None, spp=None, pad=None, + upsample=None, format=None, nnz=None, is_static=None, @@ -983,6 +984,13 @@ class Pad(Cfg): self.add_keys(locals()) +@config_class +class Upsample(Cfg): + def __init__(self, scale, scale_y, pad_out_x, pad_out_y, upsample_size, + upsample_size_y): + self.add_keys(locals()) + + @config_class class Norm(Cfg): def __init__(self, @@ -2380,6 +2388,46 @@ class SpatialPyramidPoolLayer(LayerBase): self.set_cnn_layer(name, 1, output_x, spp_conf.image_conf.channels) +@config_layer('upsample') +class UpsampleLayer(LayerBase): + def __init__(self, name, inputs, **xargs): + super(UpsampleLayer, self).__init__( + name, 'upsample', 0, inputs=inputs, **xargs) + + input_layer = self.get_input_layer(0) + image_conf = self.config.inputs[0].upsample_conf.image_conf + image_conf.img_size = input_layer.width + image_conf.img_size_y = input_layer.height + image_conf.channels = input_layer.size / (input_layer.width * + input_layer.height) + + upsample = self.inputs[0].upsample + output_x = 0 + output_y = 0 + output_size = 0 + + if upsample.scale: + self.config.inputs[0].upsample_conf.scale = upsample.scale + self.config.inputs[0].upsample_conf.scale_y = upsample.scale_y + output_x = input_layer.width * upsample.scale + output_y = input_layer.height * upsample.scale_y + self.config.inputs[0].upsample_conf.pad_out_x = upsample.pad_out_x + self.config.inputs[0].upsample_conf.pad_out_y = upsample.pad_out_y + if upsample.upsample_size: + self.config.inputs[ + 0].upsample_conf.upsample_size = upsample.upsample_size + self.config.inputs[ + 0].upsample_conf.upsample_size_y = upsample.upsample_size_y + output_x = upsample.upsample_size + output_y = upsample.upsample_size_y + + output_size = image_conf.channels * output_x * output_y + + self.set_layer_height_width(output_y, output_x) + self.set_layer_depth(input_layer.depth) + self.set_layer_size(output_size) + + @config_layer('pad') class PadLayer(LayerBase): def __init__(self, name, inputs, **xargs): diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 3684d1e8f73a21d9c6f2a5985f8b40ed6984057b..ebc31b23e0f5504b4bebccabe996b054c7fbce3b 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -148,6 +148,7 @@ __all__ = [ 'resize_layer', 'sub_seq_layer', 'scale_sub_region_layer', + 'upsample_layer', 'factorization_machine', ] @@ -166,6 +167,7 @@ class LayerType(object): SEQUENCE_RESHAPE = 'seqreshape' POOLING_MAX = 'max' POOLING_AVG = 'average' + UPSAMPLE_LAYER = 'upsample' FC_LAYER = 'fc' COST = 'cost' COSINE_SIM_VEC = 'cos_vm' @@ -3014,6 +3016,83 @@ def img_pool3d_layer(input, size=l.config.size) +@wrap_name_default("upsample") +@layer_support() +def upsample_layer(input, + name=None, + scale=None, + scale_y=None, + upsample_size=None, + upsample_size_y=None, + pad_out_x=False, + pad_out_y=False, + layer_attr=None): + """ + The DePooling process. + Inputs should be a list of length 2. The first input is a layer, + and the second input should be the MaxWithMaskPoolingLayer + + The example usage is: + + .. code-block:: python + pool1 = paddle.v2.layer.img_pool(input=input, pool_size=2, stride=2, + pool_type=paddle.pooling.MaxWithMask()) + upsample = paddle.v2.layer.upsample(input=[layer1, pool1]) + + :param name: The name of this layer. It is optional. + :type name: basestring + :param input: contains an input layer and a MaxWithMaskPoolingLayer + :type input: list | tuple | collections.Sequence + :param scale: outputSize = scale * inputSize + :type scale: int | list | tuple | . + :param scale_y: scale_y will be equal to scale, if it's value is None, + :type scale: int | None. + :param upsample_size: specify the outputSize. + :type upsample_size: int | list | tuple. + :param upsample_size_y: specify the y dimension outputSize. + :type upsample_size_y: int. + :param pad_out_x: specify exact x dimension size. This parameter only works when scale is 2 + :type pad_out_x: bool. + :param pad_out_y: specify exact y dimension size. This parameter only works when scale is 2 + :type pad_out_y: bool. + :param layer_attr: Extra Layer Attribute. + :type layer_attr: ExtraLayerAttribute + :return: LayerOutput object. + :rtype: LayerOutput + """ + + assert (scale is not None) or (upsample_size is not None), \ + 'scale or upsample_size, there must be one to be designated' + + assert len(input) == 2, 'layer input size must be 2' + + assert input[1].layer_type == LayerType.POOL_LAYER, \ + 'the second input should be the MaxPoolWithMaskLayer' + + scale_y = scale \ + if scale is not None else scale_y + upsample_size_y = upsample_size \ + if upsample_size is not None else upsample_size_y + + layer_type = LayerType.UPSAMPLE_LAYER + + layer = Layer( + name=name, + type=layer_type, + inputs=[ + Input( + input[0].name, + upsample=Upsample(scale, scale_y, pad_out_x, pad_out_y, + upsample_size, upsample_size_y)), + Input(input[1].name) + ], + **ExtraLayerAttribute.to_kwargs(layer_attr)) + + sz = layer.config.size + + return LayerOutput(name, layer_type=layer_type, parents=input, size=sz) + + @wrap_name_default("spp") @layer_support() def spp_layer(input, diff --git a/python/paddle/trainer_config_helpers/tests/CMakeLists.txt b/python/paddle/trainer_config_helpers/tests/CMakeLists.txt index 580aef935b5cec385a88fb0b4f5b9a5ddeddb40c..30e0b9906c406d846d4b086a1a1c89587394afea 100644 --- a/python/paddle/trainer_config_helpers/tests/CMakeLists.txt +++ b/python/paddle/trainer_config_helpers/tests/CMakeLists.txt @@ -1,17 +1,17 @@ #################### test_config_parser ######################### add_test(NAME layers_test - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/ + COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_BINARY_DIR}/python/ ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/python/paddle/trainer_config_helpers/tests/layers_test.py WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/python/paddle) add_test(NAME test_reset_hook - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python/ + COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_BINARY_DIR}/python/ ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/python/paddle/trainer_config_helpers/tests/test_reset_hook.py WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/python/paddle) add_paddle_exe(protobuf_equal ProtobufEqualMain.cpp) add_test(NAME test_layerHelpers - COMMAND - ${PADDLE_SOURCE_DIR}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} + COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_BINARY_DIR}/python/ + ${PADDLE_BINARY_DIR}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/protobuf_equal ) diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 8a318879630cd491573afcaf798dda2ca75e335d..44a75a60cc78e85f85d111a911999b7812db0f49 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -2,7 +2,6 @@ set -e cd `dirname $0` -export PYTHONPATH=$PWD/../../../../ protostr=$PWD/protostr . file_list.sh diff --git a/python/setup.py.in b/python/setup.py.in index 08a448934d3248b46618acdef9e1894f94a93893..2707d34a2ab327ab4282aa7473d78a3f5c08e890 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -58,7 +58,7 @@ def mkl(): 'istaged': ISTAGED, 'with_mkl': '@WITH_MKL@'}) -write_version_py(filename='@PADDLE_SOURCE_DIR@/python/paddle/version.py') +write_version_py(filename='@PADDLE_BINARY_DIR@/python/paddle/version.py') packages=['paddle', @@ -109,7 +109,7 @@ package_dir={ 'paddle.fluid.proto': '${PADDLE_BINARY_DIR}/paddle/fluid/framework', } if '${WITH_FLUID_ONLY}'== 'OFF': - package_dir['py_paddle']='${PADDLE_SOURCE_DIR}/paddle/py_paddle' + package_dir['py_paddle']='${PADDLE_BINARY_DIR}/python/py_paddle' paddle_rt_lib_dir = 'lib'