diff --git a/cmake/external/grpc.cmake b/cmake/external/grpc.cmake index 1330ef82dc547aecb93e8390cf45ae2de1caaa66..219ea1b90881ccdbaf3fd41510fb4f2a8b6ec0f4 100644 --- a/cmake/external/grpc.cmake +++ b/cmake/external/grpc.cmake @@ -42,7 +42,7 @@ ExternalProject_Add( # Disable -Werror, otherwise the compile will fail in MacOS. # It seems that we cannot configure that by make command. # Just dry run make command and remove `-Werror`, then use a shell to run make commands - BUILD_COMMAND ${BUILD_CMD} + BUILD_COMMAND ${BUILD_CMD} HAS_SYSTEM_PROTOBUF=false -s -j8 static grpc_cpp_plugin INSTALL_COMMAND make prefix=${GRPC_INSTALL_DIR} install ) diff --git a/doc/design/refactor/distributed_architecture.md b/doc/design/refactor/distributed_architecture.md index ac7e98ccf1aadbb973a4801fde842375cf63448c..2b4f921ae93c3b443ed62a28b1fa9fbda14f73ab 100644 --- a/doc/design/refactor/distributed_architecture.md +++ b/doc/design/refactor/distributed_architecture.md @@ -2,106 +2,70 @@ ## Abstract -PaddlePaddle v0.10.0 uses the "trainer-parameter server" -architecture. We run multiple replicated instances of trainers (runs -the same code written by the user) and parameter servers for -distributed training. This architecture served us well, but has some -limitations: +PaddlePaddle version 0.10.0 uses the "trainer-parameter server" architecture. We run multiple instances of trainers (where each trainer runs the same model) and parameter servers for distributed training. This architecture serves well, but has few limitations: -1. Need to write special code to handle tasks which should only be run - by a single trainer. E.g., initializing model and saving model. +1. There is a need to write special code that handles tasks which should only be run on a single trainer. E.g., initializing the model, saving the model etc. -2. Model parallelism is hard: need to write if-else branches conditioned - on the trainer ID to partition model onto each trainer, and manually - write the inter-model-shard communication code. +2. Model parallelism is hard: It would need all the if-else branches conditioned on the trainer ID to partition the model onto the trainers, and eventually manually writing out the inter-model-shard communication code to communicate between different trainers. -3. The user can not directly specify the parameter update rule: need - to modify the parameter server C++ code and compile a new - binary. This adds complication for researchers: A lot of extra - effort is required. Besides, the training job submission program - may not allow running arbitrary binaries. +3. The user can not directly specify the parameter update rule: This would need to modify the parameter server code and compile a new binary. This makes things more complicated for researchers: A lot of extra effort is required to make this work. Besides, the training job submission program may not allow running arbitrary binaries. -This design doc discusses PaddlePaddle's new distributed training -architecture that addresses the above limitations. +This design doc discusses PaddlePaddle's new distributed training architecture that addresses the above mentioned limitations. ## Analysis -We will assume the user writes the trainer program by Python, the same -analysis holds if the trainer program is written in C++. +The assumption is that the user writes the trainer program in either Python or C++. ### Limitation 1 -If we look at the Python code that the user writes, there are two -kinds of functionalities: +There are two basic functionalities in the trainer program: -- The training logic such as load / save model and print log. -- The neural network definition such as the definition of the data - layer, the fully connected layer, the cost function and the +1. The training logic such as loading / saving the model and printing out the logs. +2. The neural network definition such as the definition of the data layer, the fully connected layer, the cost function and the optimizer. -When we training with PaddlePaddle v0.10.0 distributedly, multiple -replicated Python instances are running on different nodes: both the -training logic and the neural network computation is replicated. +When we train using PaddlePaddle v0.10.0 in a distributed fashion, multiple instances of the same Python code are run on different nodes, hence both: the +training logic as well as the neural network computation logic, is replicated. -The tasks that should only run once all belong to the training logic, -if we only replicate the neural network computation, but do **not** -replicate the training logic, the limitation could be solved. +The tasks that only need to be run once belong to the training logic. Hence if we only replicate the neural network computation part, and do **not** +replicate the training logic, the limitation mentioned above can be avoided. ### Limitation 2 -Model parallelism means running a single model on multiple nodes by -partitioning the model onto different nodes and managing the -inter-model-shard communications. +Model parallelism means that a single model is partitioned into different components and each node runs one of the component separately. This comes at the extra cost of managing the +inter-model-shard communication between nodes. -PaddlePaddle should be able to modify the nerual network computation -definition to support model parallelism automatically. However, the -computation is only specified in Python code, and PaddlePaddle can not -modify Python code. +PaddlePaddle should ideally be able to modify the neural network computation and figure out the support for model parallelism automatically. However, the +computation is only specified in Python code which sits outside of PaddlePaddle, hence PaddlePaddle can not support the feature in this setup. -Just like compiler uses a intermediate representation (IR) so that -programmer does not need to manually optimize their code in most of -the cases - the compiler will optimize the IR: +Similar to how a compiler uses an intermediate representation (IR) so that the programmer does not need to manually optimize their code for most of the cases, we can have an intermediate representation in PaddlePaddle as well. The compiler optimizes the IR as follows: -We can have our own IR too: PaddlePaddle can support model parallel by -converting the IR so the user no longer need to manually do it in -Python: +PaddlePaddle can support model parallelism by converting the IR so that the user no longer needs to manually perform the computation and operations in the Python component: -The IR for PaddlePaddle after refactor is called `Block`, it specifies -the computation dependency graph and the variables used in the -computation. +The IR for PaddlePaddle after refactoring is called a `Block`, it specifies the computation dependency graph and the variables used in the computation. ### Limitation 3 -The user can not directly specify the parameter update rule for the -parameter server because the parameter server does not use the same -computation definition as the trainer. Instead, the update rule is -baked in the parameter server. The user can not specify the update -rule in the same way of specifying the trainer computation. +The user can not directly specify the parameter update rule for the parameter server in the Python module, since the parameter server does not use the same computation definition as the trainer. Instead, the update rule is baked inside the parameter server. The user can not specify the update rule explicitly. -This could be fixed by making the parameter server run the same -computation definition as the trainer. For a detailed explanation, -please -see +This could be fixed by making the parameter server run the same computation definition as the trainer (the user's Python module). For a detailed explanation, refer to this document - [Design Doc: Operation Graph Based Parameter Server](./dist_train.md) ## Distributed Training Architecture -The new distributed training architecture can address the above -limitations. Below is the illustration: +The revamped distributed training architecture can address the above discussed limitations. Below is the illustration of how it does so: -The architecture includes major components: *PaddlePaddle Python*, -*PaddlePaddle converter* and *PaddlePaddle runtime*: +The major components in the architecture are: *PaddlePaddle Python*, *PaddlePaddle converter* and *PaddlePaddle runtime*. ### PaddlePaddle Python -PaddlePaddle Python is the Python library that user's Python trainer -invoke to build the neural network topology, start training, etc. +PaddlePaddle Python is the Python library that user's Python code invokes, to read the data. build the neural network topology, start training, etc. ```Python paddle.init() @@ -117,102 +81,60 @@ for i in range(1000): print cost_val ``` -The code above is a typical Python trainer code, the neural network -topology is built using helper functions such as -`paddle.layer.fc`. The training is done by calling `session.eval` -iteratively. +The above code is what a typical Python trainer code is, the neural network topology is built using the helper functions such as `paddle.layer.fc`. Training is done by calling `session.eval` iteratively. #### session.eval -As shown in the graph, `session.eval` sends the IR and the evaluation -inputs/targets to the PaddlePaddle cluster for evaluation. The -targets can be any variable in the computation graph. When the target -is the `optimizer` variable, the neural network will be optimized -once. When the target is the `cost` variable, `session.eval` returns -the cost value. +As shown in the graph, `session.eval` sends the IR and the evaluation inputs or targets to the PaddlePaddle cluster for evaluation. +The targets can be any variable in the computation graph. When the target is say, the `optimizer` variable, the neural network will be optimized once. When the target is the `cost` variable, `session.eval` returns the cost value. Based on what the target is, an appropriate action is taken. -The Python `session` is a wrapper of the C++ `Session` class. For more -information about `Session`, please -see [Design Doc: Session](./session.md). +The Python `session` is a wrapper of the C++ `Session` class. For more information about `Session`, refer to this document - [Design Doc: Session](./session.md). ### PaddlePaddle Converter -PaddlePaddle converter automatically converts the IR in the request -(IR and evaluation inputs/targets) from PaddlePaddle Python to new -partitioned IRs and dispatch the new IRs and evaluation inputs/targets -to different PaddlePaddle runtimes. Below are the steps: +The PaddlePaddle converter automatically converts the IR in the request (IR and evaluation inputs/targets) from PaddlePaddle Python to partitioned IRs and dispatches the new IRs and evaluation inputs/targets to different PaddlePaddle runtimes. Below are the steps that are followed : -1. Add `feed` OP that feeds the eval inputs, and `fetch` OP that - fetches the eval targets to the IR. +1. Add a `feed` OP that feeds the eval inputs, and a `fetch` OP that fetches the eval targets to the IR. -1. Extract a new computation (sub)graph with `feed` and `fetch` OP as - the boundary. The runtime does not need to run the OP that is not - dependent by the `fetch` OP. +2. Extract a new computation (sub)graph with the `feed` and `fetch` OPs as the boundary. The runtime does not need to run the OP that is not dependent on the `fetch` OP. -1. Optimizes the computation graph. +3. Optimize the computation graph. -1. Place the OPs in the graph onto different devices on different - PaddlePaddle runtime according to a placement algorithm and device - constraint specified by the user. +4. Place the OPs in the graph onto different devices on different PaddlePaddle runtime according to a placement algorithm and the device constraints specified by the user. -1. Partition the graph according to runtime boundaries and add `send` / - `recv` OP pair on the runtime boundaries. +5. Partition the graph according to runtime boundaries and add `send` / `recv` OP pair on the runtime boundaries. -1. Dispatch the partitioned graph to different PaddlePaddle runtimes. +6. Dispatch the partitioned graph to different PaddlePaddle runtimes. + +7. PaddlePaddle runtimes with the `fetch` OP reports evaluation results back to the converter, the converter reports the evaluation results back to the PaddlePaddle Python. -1. PaddlePaddle runtimes with the `fetch` OP reports evaluation - results back to the converter, the convert reports the evaluation - results back to the PaddlePaddle Python. - The output IRs will be cached to optimize the conversion latency. #### Placement Algorithm -Our first implementation will only support "trainer-parameter server" -placement: the parameters, initializers, and optimizers are placed on -the PaddlePaddle runtimes with the parameter server role. And -everything else will be placed on the PaddlePaddle runtimes with the -trainer role. This has the same functionality of our -"trainer-parameter server" architecture of PaddlePaddle v0.10.0, but -is more general and flexible. +Our first implementation will only support "trainer-parameter server" placement: the parameters, initializers, and optimizers are all placed on the PaddlePaddle runtimes with the parameter server role. Everything else will be placed on the PaddlePaddle runtimes with the trainer role. This has the same functionality as the "trainer-parameter server" architecture of PaddlePaddle v0.10.0, but is more generic and flexible. -In the future, we will implement the general placement algorithm, -which makes placements according to the input IR, and a model of -device computation time and device communication time. Model -parallelism requires the general placement algorithm. +In the future, a more general placement algorithm should be implemented, which makes placements according to the input IR, and a model of device computation time and device communication time. Model parallelism requires the generic placement algorithm. ### PaddlePaddle Runtime -The PaddlePaddle runtime owns multiple devices (e.g., CPUs, GPUs) and -runs the IR. The runtime does not need to do OP placement since it's -already done by the converter. +The PaddlePaddle runtime owns multiple devices (e.g., CPUs, GPUs) and runs the IR. The runtime does not need to do OP placement since it is already done by the converter. ### Local Training Architecture -The local training architecture will be the same as the distributed -training architecture, the differences are everything runs locally, -and there is just one PaddlePaddle runtime: +The local training architecture will be the same as the distributed training architecture, the difference is that everything runs locally, and there is just one PaddlePaddle runtime: ### Training Data -In PaddlePaddle v0.10.0, training data is typically read -with [data reader](../reader/README.md) from Python. This approach is -no longer efficient when training distributedly since the Python -process no longer runs on the same node with the trainer processes, -the Python reader will need to read from the distributed filesystem -(assuming it has the access) and send to the trainers, doubling the -network traffic. - -When doing distributed training, the user can still use Python data -reader: the training data are sent with `session.eval`. However should -be used for debugging purpose only. The users are encouraged to use -the read data OPs. +In PaddlePaddle v0.10.0, training data is typically read with a [data reader](../reader/README.md) from Python. This approach is no longer efficient when training in a distributed fashion since the Python process no longer runs on the same node with the trainer processes. The Python reader will need to read from the distributed filesystem (assuming it has the required access) and send to the trainers, doubling the network traffic. + +When doing distributed training, the user can still use Python data reader: the training data are sent with `session.eval`. However this should be used for debugging purpose only. The users are encouraged to use the read data OPs. ## References: diff --git a/doc/getstarted/build_and_install/build_from_source_cn.rst b/doc/getstarted/build_and_install/build_from_source_cn.rst index 55665ac8edfcf20290936fba4c3e410b33e1f3d4..3c525bdad6f6118dcd560e2cb7bfaf89737c1362 100644 --- a/doc/getstarted/build_and_install/build_from_source_cn.rst +++ b/doc/getstarted/build_and_install/build_from_source_cn.rst @@ -1,4 +1,4 @@ -从源码编译PaddlePaddle +从源码编译 ====================== .. _build_step: @@ -7,8 +7,11 @@ ---------------- PaddlePaddle主要使用 `CMake `_ 以及GCC, G++作为编译工具。 -我们推荐您使用PaddlePaddle编译环境镜像完成编译,这样可以免去单独安装编译依赖的步骤,可选的不同编译环境 +我们推荐您使用PaddlePaddle Docker编译环境镜像完成编译,这样可以免去单独安装编译依赖的步骤,可选的不同编译环境Docker镜像 可以在 `这里 `_ 找到。 + +如果您选择不使用Docker镜像,则需要在本机安装下面章节列出的 `编译依赖`_ 之后才能开始编译的步骤。 + 编译PaddlePaddle,需要执行: .. code-block:: bash @@ -22,7 +25,6 @@ PaddlePaddle主要使用 `CMake `_ 以及GCC, G++作为编译 cd build cmake -DWITH_GPU=OFF -DWITH_TESTING=OFF .. make - 编译完成后会在build/python/dist目录下生成输出的whl包,可以选在在当前机器安装也可以拷贝到目标机器安装: @@ -31,7 +33,33 @@ PaddlePaddle主要使用 `CMake `_ 以及GCC, G++作为编译 pip install python/dist/*.whl -.. _build_step: +.. _run_test: + +执行单元测试 +---------------- + +如果您期望在编译完成后立即执行所有的单元测试,可以按照下面的方法: + +使用Docker的情况下,设置 :code:`RUN_TEST=ON` 和 :code:`WITH_TESTING=ON` 就会在完成编译之后,立即执行单元测试。 +开启 :code:`WITH_GPU=ON` 可以指定同时执行GPU上的单元测试。 + +.. code-block:: bash + + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x paddle/scripts/docker/build.sh + +如果不使用Docker,可以执行ctest命令即可: + +.. code-block:: bash + + mkdir build + cd build + cmake -DWITH_GPU=OFF -DWITH_TESTING=OFF .. + make + ctest + # 指定执行其中一个单元测试 test_mul_op + ctest -R test_mul_op + +.. _compile_deps: 编译依赖 ---------------- diff --git a/doc/getstarted/build_and_install/build_from_source_en.rst b/doc/getstarted/build_and_install/build_from_source_en.rst index 9a3ed7dd57137ddf3d6213222c17433822b01dbb..76fbc43de2e83580dd79b874507c103533022436 100644 --- a/doc/getstarted/build_and_install/build_from_source_en.rst +++ b/doc/getstarted/build_and_install/build_from_source_en.rst @@ -1,4 +1,4 @@ -Build PaddlePaddle from Sources +Build from Sources ========================== .. _build_step: @@ -9,14 +9,18 @@ How To Build PaddlePaddle mainly uses `CMake `_ and GCC, G++ as compile tools. We recommend you to use our pre-built Docker image to run the build to avoid installing dependencies by yourself. We have several build environment -Docker images `here `_. +Docker images `here `_ . + +If you choose not to use Docker image for your build, you need to install the +below `Compile Dependencies`_ before run the build. + Then run: .. code-block:: bash git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle - # run the following command to build CPU-Only binaries if you are using docker + # run the following command to build a CPU-Only binaries if you are using docker docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x paddle/scripts/docker/build.sh # else run these commands mkdir build @@ -32,7 +36,35 @@ machine or copy it to the target machine. pip install python/dist/*.whl -.. _build_step: + +.. _run_test: + +Run Tests +---------------- + +If you wish to run the tests, you may follow the below steps: + +When using Docker, set :code:`RUN_TEST=ON` and :code:`WITH_TESTING=ON` will run test immediately after the build. +Set :code:`WITH_GPU=ON` Can also run tests on GPU. + +.. code-block:: bash + + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x paddle/scripts/docker/build.sh + +If you don't use Docker, just run ctest will start the tests: + +.. code-block:: bash + + mkdir build + cd build + cmake -DWITH_GPU=OFF -DWITH_TESTING=ON .. + make + ctest + # run a single test like test_mul_op + ctest -R test_mul_op + + +.. _compile_deps: Compile Dependencies ---------------- diff --git a/doc/getstarted/build_and_install/docker_install_cn.rst b/doc/getstarted/build_and_install/docker_install_cn.rst index 07933b2e0bbca809f6c4e90e7ff8f71d1b3304b2..f78b1fb0e11aa028a4b7abb5270740b97f8039e9 100644 --- a/doc/getstarted/build_and_install/docker_install_cn.rst +++ b/doc/getstarted/build_and_install/docker_install_cn.rst @@ -1,4 +1,4 @@ -使用Docker安装运行PaddlePaddle +使用Docker安装运行 ================================ 使用Docker安装和运行PaddlePaddle可以无需考虑依赖环境即可运行。并且也可以在Windows的docker中运行。 diff --git a/doc/getstarted/build_and_install/docker_install_en.rst b/doc/getstarted/build_and_install/docker_install_en.rst index 9b977c9c72e36b4b47cbf56ae848ab83d9895783..d7acc7aeb744b19d83acb520d07c8551168dd096 100644 --- a/doc/getstarted/build_and_install/docker_install_en.rst +++ b/doc/getstarted/build_and_install/docker_install_en.rst @@ -1,4 +1,4 @@ -PaddlePaddle in Docker Containers +Run in Docker Containers ================================= Run PaddlePaddle in Docker container so that you don't need to care about diff --git a/doc/getstarted/build_and_install/pip_install_cn.rst b/doc/getstarted/build_and_install/pip_install_cn.rst index 41312da48c055826186a560ef9653653e45d1047..b26bf4c95cb18f36408eb75894e8b9b674efc67b 100644 --- a/doc/getstarted/build_and_install/pip_install_cn.rst +++ b/doc/getstarted/build_and_install/pip_install_cn.rst @@ -1,4 +1,4 @@ -使用pip安装PaddlePaddle +使用pip安装 ================================ PaddlePaddle可以使用常用的Python包管理工具 diff --git a/doc/getstarted/build_and_install/pip_install_en.rst b/doc/getstarted/build_and_install/pip_install_en.rst index 4f295e14baa1465a93b8eef1b3f3b6b47eeea905..113790e4e4ca116e91f11f8a233eae874d9d1b7a 100644 --- a/doc/getstarted/build_and_install/pip_install_en.rst +++ b/doc/getstarted/build_and_install/pip_install_en.rst @@ -1,4 +1,4 @@ -Install PaddlePaddle Using pip +Install Using pip ================================ You can use current widely used Python package management diff --git a/doc/howto/index_cn.rst b/doc/howto/index_cn.rst index 76d3e0a0092f89005605a23e14e712530112a5ac..eb95356c67c5df22e4f543f958eb31d79f2c6195 100644 --- a/doc/howto/index_cn.rst +++ b/doc/howto/index_cn.rst @@ -19,7 +19,6 @@ .. toctree:: :maxdepth: 1 - dev/build_cn.rst dev/write_docs_cn.rst 模型配置 diff --git a/doc/howto/index_en.rst b/doc/howto/index_en.rst index 1b6034be4edffd2cbc822018b733b9a3836ea84a..1fbfcd260b912078f00ed5b720ed607db725c4e2 100644 --- a/doc/howto/index_en.rst +++ b/doc/howto/index_en.rst @@ -18,7 +18,6 @@ Development .. toctree:: :maxdepth: 1 - dev/build_en.rst dev/new_layer_en.rst dev/contribute_to_paddle_en.md diff --git a/paddle/operators/conv_cudnn_op.cu.cc b/paddle/operators/conv_cudnn_op.cu.cc index a9763d424801cfced5fe4c4718a335a24b81cfdc..3f97dc7ee0a61944a8a57314b5ec7f33df619bf3 100644 --- a/paddle/operators/conv_cudnn_op.cu.cc +++ b/paddle/operators/conv_cudnn_op.cu.cc @@ -63,7 +63,7 @@ class CudnnConvOpKernel : public framework::OpKernel { cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor(paddings, strides, dilations); -#if CUDNN_VERSION_MIN(7, 0, 0) +#if CUDNN_VERSION_MIN(7, 0, 1) // cudnn 7 can support groups, no need to do it mannually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. @@ -180,7 +180,7 @@ class CudnnConvGradOpKernel : public framework::OpKernel { cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor(paddings, strides, dilations); -#if CUDNN_VERSION_MIN(7, 0, 0) +#if CUDNN_VERSION_MIN(7, 0, 1) // cudnn 7 can support groups, no need to do it mannually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. diff --git a/paddle/platform/cuda_profiler.h b/paddle/platform/cuda_profiler.h new file mode 100644 index 0000000000000000000000000000000000000000..b6311cb23d695c3cd851bcca120c24cced7fdd62 --- /dev/null +++ b/paddle/platform/cuda_profiler.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include +#include + +namespace paddle { +namespace platform { + +void CudaProfilerInit(std::string output_file, std::string output_mode, + std::vector config_flags) { + std::array buf; + std::string tmpl = "/tmp/cuda_profile_config.XXXXXX"; + PADDLE_ENFORCE_LT(tmpl.size(), buf.size()); + memcpy(buf.data(), tmpl.data(), tmpl.size()); + auto result = mktemp(buf.data()); + PADDLE_ENFORCE(strlen(result) != 0); + std::string config_file = result; + + { + std::ofstream ofs(config_file, std::ios::out | std::ios::trunc); + PADDLE_ENFORCE(ofs.is_open(), "ofstream: ", ofs.rdstate()); + for (const auto& line : config_flags) { + ofs << line << std::endl; + } + } + + PADDLE_ENFORCE(output_mode == "kvp" || output_mode == "csv"); + cudaOutputMode_t mode = output_mode == "csv" ? cudaCSV : cudaKeyValuePair; + PADDLE_ENFORCE( + cudaProfilerInitialize(config_file.c_str(), output_file.c_str(), mode)); +} + +void CudaProfilerStart() { PADDLE_ENFORCE(cudaProfilerStart()); } + +void CudaProfilerStop() { PADDLE_ENFORCE(cudaProfilerStop()); } + +} // namespace platform +} // namespace paddle diff --git a/paddle/platform/dynload/cudnn.cc b/paddle/platform/dynload/cudnn.cc index d3e4cb567d71b987724366b6a0896f5df0eb6055..761d9edd87f428ba140d29a566fc3401199bab15 100644 --- a/paddle/platform/dynload/cudnn.cc +++ b/paddle/platform/dynload/cudnn.cc @@ -37,6 +37,10 @@ CUDNN_DNN_ROUTINE_EACH_AFTER_R4(DEFINE_WRAP); CUDNN_DNN_ROUTINE_EACH_R5(DEFINE_WRAP); #endif +#ifdef CUDNN_DNN_ROUTINE_EACH_R7 +CUDNN_DNN_ROUTINE_EACH_R7(DEFINE_WRAP); +#endif + } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/platform/dynload/cudnn.h b/paddle/platform/dynload/cudnn.h index b2d69da93bcd4a5c8e694a18ca648ddc4bd947af..61caac545014db2a09e2ada0b508419578c49740 100644 --- a/paddle/platform/dynload/cudnn.h +++ b/paddle/platform/dynload/cudnn.h @@ -135,6 +135,12 @@ CUDNN_DNN_ROUTINE_EACH_AFTER_R4(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) CUDNN_DNN_ROUTINE_EACH_R5(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) #endif +#if CUDNN_VERSION >= 7001 +#define CUDNN_DNN_ROUTINE_EACH_R7(__macro) \ + __macro(cudnnSetConvolutionGroupCount); +CUDNN_DNN_ROUTINE_EACH_R7(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) +#endif + } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index f55a1edce31ccf2498dcfcf0b30ba1012d7a7d1a..c16d3e0cbe01f90a5aa9a5d7a523cd4e282e4771 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -37,6 +37,7 @@ limitations under the License. */ #ifdef PADDLE_WITH_CUDA #include "paddle/operators/nccl/nccl_gpu_common.h" +#include "paddle/platform/cuda_profiler.h" #include "paddle/platform/gpu_info.h" #endif @@ -460,6 +461,10 @@ All parameter, weight, gradient are variables in Paddle. m.def("op_support_gpu", OpSupportGPU); #ifdef PADDLE_WITH_CUDA m.def("get_cuda_device_count", platform::GetCUDADeviceCount); + + m.def("nvprof_init", platform::CudaProfilerInit); + m.def("nvprof_start", platform::CudaProfilerStart); + m.def("nvprof_stop", platform::CudaProfilerStop); #endif return m.ptr(); diff --git a/python/paddle/v2/fluid/profiler.py b/python/paddle/v2/fluid/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..2dbba72c644d1b9d06e65e19cc7e581b9a668cca --- /dev/null +++ b/python/paddle/v2/fluid/profiler.py @@ -0,0 +1,46 @@ +import paddle.v2.fluid.core as core +from contextlib import contextmanager + +__all__ = ['CudaProfiler'] + +NVPROF_CONFIG = [ + "gpustarttimestamp", + "gpuendtimestamp", + "gridsize3d", + "threadblocksize", + "streamid", + "enableonstart 0", + "conckerneltrace", +] + + +@contextmanager +def cuda_profiler(output_file, output_mode=None, config=None): + """The CUDA profiler. + This fuctions is used to profile CUDA program by CUDA runtime application + programming interface. The profiling result will be written into + `output_file` with Key-Value pair format or Comma separated values format. + The user can set the output mode by `output_mode` argument and set the + counters/options for profiling by `config` argument. The default config + caontains 'gpustarttimestamp', 'gpustarttimestamp', 'gridsize3d', + 'threadblocksize', 'streamid', 'enableonstart 0', 'conckerneltrace'. + + Args: + output_file (string) : The output file name, the result will be + written into this file. + output_mode (string) : The output mode has Key-Value pair format and + Comma separated values format. It should be 'kv' or 'csv'. + config (string) : The profiler options and counters can refer to + "Compute Command Line Profiler User Guide". + """ + if output_mode is None: + output_mode = 'csv' + if output_mode not in ['kv', 'csv']: + raise ValueError("The output mode must be 'key-value' or 'csv'.") + config = NVPROF_CONFIG if config is None else config + core.nvprof_init(output_file, output_mode, config) + # Enables profiler collection by the active CUDA profiling tool. + core.nvprof_start() + yield + # Disables profiler collection. + core.nvprof_stop() diff --git a/python/paddle/v2/fluid/tests/test_profiler.py b/python/paddle/v2/fluid/tests/test_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..e8f24251b9daeb60d689835308d0e908aa502d51 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_profiler.py @@ -0,0 +1,28 @@ +import unittest +import numpy as np +import paddle.v2.fluid as fluid +import paddle.v2.fluid.profiler as profiler +import paddle.v2.fluid.layers as layers + + +class TestProfiler(unittest.TestCase): + def test_nvprof(self): + if not fluid.core.is_compile_gpu(): + return + epoc = 8 + dshape = [4, 3, 28, 28] + data = layers.data(name='data', shape=[3, 28, 28], dtype='float32') + conv = layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) + + place = fluid.GPUPlace(0) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: + for i in range(epoc): + input = np.random.random(dshape).astype("float32") + exe.run(fluid.default_main_program(), feed={'data': input}) + + +if __name__ == '__main__': + unittest.main()