diff --git a/.travis.yml b/.travis.yml index 8c772030925dcad3909f142b08e4d8057a3f89b7..a406841f6abf01f15826f34fe4c63b4c24486ccd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,7 +31,7 @@ script: if [[ "$JOB" != "doc" ]]; then exit 0; fi; # For document only if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit 0; fi; - if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then exit 0; fi; + if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v|release/[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then exit 0; fi; export DEPLOY_DOCS_SH=https://raw.githubusercontent.com/PaddlePaddle/PaddlePaddle.org/master/scripts/deploy/deploy_docs.sh export DOCS_DIR=`pwd` cd .. diff --git a/Dockerfile b/Dockerfile index fc5069a6c080ed23317695e6822c4c46b5b5c7f9..48c750358cfcb227667c429f19befcaa2f51ebbd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,7 +23,7 @@ ENV HOME /root COPY ./paddle/scripts/docker/root/ /root/ RUN apt-get update && \ - apt-get install -y --allow-downgrades \ + apt-get install -y --allow-downgrades patchelf \ git python-pip python-dev python-opencv openssh-server bison \ libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ diff --git a/cmake/external/anakin.cmake b/cmake/external/anakin.cmake index d205e3958234cabfbfeba8c3d725fe618ce48ace..fb3d8ef8d53436f387acc3069a0eb887e6f07c59 100644 --- a/cmake/external/anakin.cmake +++ b/cmake/external/anakin.cmake @@ -7,7 +7,17 @@ set(ANAKIN_INSTALL_DIR "${THIRD_PARTY_PATH}/install/anakin" CACHE PATH set(ANAKIN_INCLUDE "${ANAKIN_INSTALL_DIR}" CACHE STRING "root of Anakin header files") set(ANAKIN_LIBRARY "${ANAKIN_INSTALL_DIR}" CACHE STRING "path of Anakin library") -set(ANAKIN_COMPILE_EXTRA_FLAGS -Wno-error=unused-variable -Wno-error=format-extra-args -Wno-error=comment -Wno-error=format -Wno-error=switch -Wno-error=return-type -Wno-error=non-virtual-dtor -Wno-reorder -Wno-error=cpp) +set(ANAKIN_COMPILE_EXTRA_FLAGS + -Wno-error=unused-variable -Wno-unused-variable + -Wno-error=format-extra-args -Wno-format-extra-args + -Wno-error=comment -Wno-comment + -Wno-error=format -Wno-format + -Wno-error=switch -Wno-switch + -Wno-error=return-type -Wno-return-type + -Wno-error=non-virtual-dtor -Wno-non-virtual-dtor + -Wno-sign-compare + -Wno-reorder + -Wno-error=cpp) set(ANAKIN_LIBRARY_URL "https://github.com/pangge/Anakin/releases/download/3.0/anakin_release_simple.tar.gz") diff --git a/cmake/generic.cmake b/cmake/generic.cmake index fd7fc16bff5651f022b484623243048fbd225b5a..eafb11b6f21e226fc68556a78d675dea94080140 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -257,8 +257,8 @@ function(cc_test TARGET_NAME) set(multiValueArgs SRCS DEPS ARGS) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_executable(${TARGET_NAME} ${cc_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) - add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) + target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) + add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME} ${cc_test_ARGS} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) @@ -324,8 +324,8 @@ function(nv_test TARGET_NAME) set(multiValueArgs SRCS DEPS) cmake_parse_arguments(nv_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cuda_add_executable(${TARGET_NAME} ${nv_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main memory gtest gflags glog) - add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main memory gtest gflags glog) + target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) + add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) add_test(${TARGET_NAME} ${TARGET_NAME}) if (nv_test_SERIAL) set_property(TEST ${TARGET_NAME} PROPERTY SERIAL 1) diff --git a/cmake/version.cmake b/cmake/version.cmake index cde650128a068faf32f4abfff5cdfdeb656d8577..79b8e8ac496250d85427b77fbd6a9924a962a15b 100644 --- a/cmake/version.cmake +++ b/cmake/version.cmake @@ -1,16 +1,21 @@ # Get the latest git tag. set(PADDLE_VERSION $ENV{PADDLE_VERSION}) set(tmp_version "HEAD") +set(TAG_VERSION_REGEX "[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?") +set(COMMIT_VERSION_REGEX "[0-9a-f]+[0-9a-f]+[0-9a-f]+[0-9a-f]+[0-9a-f]+") while ("${PADDLE_VERSION}" STREQUAL "") execute_process( - COMMAND ${GIT_EXECUTABLE} describe --tags --abbrev=0 ${tmp_version} + COMMAND ${GIT_EXECUTABLE} describe --tags --abbrev=0 --always ${tmp_version} WORKING_DIRECTORY ${PADDLE_SOURCE_DIR} OUTPUT_VARIABLE GIT_TAG_NAME RESULT_VARIABLE GIT_RESULT ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) if (NOT ${GIT_RESULT}) # Check the tag is a correct version - if (${GIT_TAG_NAME} MATCHES "v[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?") + if (${GIT_TAG_NAME} MATCHES "${COMMIT_VERSION_REGEX}") + # if no tag was found, set PADDLE_VERSION to latest + set(PADDLE_VERSION "latest") + elseif (${GIT_TAG_NAME} MATCHES "v${TAG_VERSION_REGEX}") string(REPLACE "v" "" PADDLE_VERSION ${GIT_TAG_NAME}) else() # otherwise, get the previous git tag name. set(tmp_version "${GIT_TAG_NAME}~1") diff --git a/doc/fluid/api/transpiler.rst b/doc/fluid/api/transpiler.rst index 943d39331d26c05764c90cb24f6774997c976bfe..d2ac04f1449c32cb414cea1b76d7469bbe9ccb85 100644 --- a/doc/fluid/api/transpiler.rst +++ b/doc/fluid/api/transpiler.rst @@ -14,6 +14,15 @@ DistributeTranspiler :members: :noindex: +.. _api_fluid_transpiler_InferenceTranspiler: + +InferenceTranspiler +------------------- + +.. autoclass:: paddle.fluid.transpiler.InferenceTranspiler + :members: + :noindex: + .. _api_fluid_transpiler_memory_optimize: memory_optimize diff --git a/doc/fluid/design/dist_train/dist_train_nccl2.md b/doc/fluid/design/dist_train/dist_train_nccl2.md new file mode 100644 index 0000000000000000000000000000000000000000..aa7455ec5de0d46d7c2b0cef3b7ebf4754af3cb1 --- /dev/null +++ b/doc/fluid/design/dist_train/dist_train_nccl2.md @@ -0,0 +1,35 @@ +# Distributed Training with NCCL2 + +We design a pattern that can enable training with `ParallelExecutor` and +using [NCCL2](https://developer.nvidia.com/nccl) as it's collective +communication library. + +In `ParallelExecutor` we can use `AllReduce` or `Reduce` and `Broadcast` +to do multi GPU training. And if we initialize NCCL2 communicators as +ranks in a distributed environment, we can simply run the `ParallelExecutor` +as a distributed program! The only thing that may be different than in +the single node version is that we need to broadcast the NCCL unique ID +to all the nodes, and initialize communicators using that ID, so NCCL2 +will know each other as ranks. + +To achieve this feature, we introduce a new operator: `gen_nccl_id` op, +so we are ***not*** "bind to" running NCCL2 with MPI, we can run it in +what ever platform you like. + +It have two running modes: + +1. Generate and broadcast mode, which should be used on trainer 0; +1. Listen and fetch mode, which should be used on trainers other than 0. + +In both two modes, this op can save the NCCL ID into current scope as a +persistable variable, Then we can insert this op at the end of +"startup program" of fluid, so that all workers can get the same ID to +initialize NCCL communicator objects. + + + +The above figure indicates the general process when training with NCCL2 +distributed. Each trainer have the number of communicators equal to the +number of GPUs, but the ranks should match the global ranks number: here +we have total 8 GPUs, so `nranks==8`, for each trainer, the ranks should +be from 0 ~ 3 on trainer 0 and 4 ~ 7 on trainer 1. diff --git a/doc/fluid/design/dist_train/distributed_lookup_table_design.md b/doc/fluid/design/dist_train/distributed_lookup_table_design.md index 988729138926f035750b59eb245dde82502a3ad2..e284e1ec5cdd18d0049ce3c1a8349bbe1248cb48 100644 --- a/doc/fluid/design/dist_train/distributed_lookup_table_design.md +++ b/doc/fluid/design/dist_train/distributed_lookup_table_design.md @@ -1,6 +1,6 @@ # Design Doc: Distributed Lookup Table Operator -A lookup table operator in PaddlePaddle where the table could be out +A distribute lookup table operator in PaddlePaddle where the table could be out of the memory of a computer. ## Background @@ -24,14 +24,14 @@ memory, so we'd need a distributed storage service, which supports the lookup of rows. The following figure illustrates the multiplication of x with two -non-zero elements, or say, two symbols, and a lookup table W: +non-zero elements, or say two symbols, and a lookup table W: ![lookup table](./src/lookup_table.png) ### The Backward Algorithm The backward algorithm computes W'(x) using W(x). W'(x) has the same -scale of size as W(x) and is much smaller than W. +the scale of size as W(x) and is much smaller than W. To optimize W given W', we can do simple SGD update: @@ -44,85 +44,46 @@ $$W = f(W, W')$$ The following figure illustrates the backward pass of the lookup operator: ![lookup table training](./src/lookup_table_training.png) -## Distributed Storage Service - -The forward algorithm requires a distributed storage service for W. -The backward algorithm prefers that the storage system can apply the -optimization algorithm on W. The following two sections describe two -solutions -- the former doesn't require that the storage service can -do optimization, the latter does. - -### Storage Service Doesn't Optimize - -In this design, we use highly-optimized distributed storage, e.g., -memcached, as the storage service, and we run the optimization -algorithm on parameter servers of PaddlePaddle. The following figure -illustrates the training process. - - - - - -Each trainer runs the forward and backward passes using their local -data: - -1. In the forward pass, when a trainer runs the forward algorithm of a - lookup operator, it retrieves W(x) from the storage service. -1. The trainer computes W'(x) in the backward pass using W(x). - -During the global update process: - -1. Each trainer uploads its W'(x) to parameter servers. -1. The parameter server runs the optimization algorithm, e.g., the - Adam optimization algorithm, which requires that - 1. The parameter server retrieves W(x) from memcached, and - 1. The parameter server pushes $\Delta W(x)=f(W(x), lambda \sum_j - W'(x))$ to memcached, where $f$ denotes the optimization - algorithm. - -### Storage Service Does Optimize - -This design is very similar to the above one, except that the -optimization algorithm $f$ runs on the storage service. - -- Pro: parameter servers do not retrieve W(x) from the storage - service, thus saves half network communication. -- Con: the storage service needs to be able to run the optimization - algorithm. - -## Conclusion - -Let us do the "storage service does not optimize" solution first, as a -baseline at least, because it is easier to use a well-optimized -distributed storage service like memcached. We can do the "storage -service does optimize" solution later or at the same time, which, if -implemented carefully, should have better performance than the former. +## Distributed Lookup Table +### Problem 1: The lookup table may be very large. + + In the condition like the search engine and recommendation system, the number of feature Id may be very large, say 100,000,000,000, then for a float value lookup table of size 8, the total size of the table is: + + ``` + 100,000,000,000 * 8 * 4(Bytes) = 2980.23 GB + ``` + +### Solution: Distributed storage + +1. Paddle use [SelectedRows](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/selected_rows.md) as the storage format for the lookup table, the lookup table parameter will be split to multi-machine according to the hash of the feature ID, and data will also be split and send to the same machine to prefetch the parameter. + +1. For common parameters, the trainer will get the whole parameter for training, but for the big lookup table, the trainer can not store the whole parameter. Because the input data feature is very sparse, every time we only need a few parameters for training, so we use `prefetch_op` to only prefetch the parameter needed to trainer. + +### Problem 2. The Id in the lookup table is not sure before training. + + The feature Id is calculated by the hash function because the feature data source is so large, we can not get all the Id before training. So we can not initialize the table before training. + +### Solution: Id auto growth + +At the beginning of training, paddle only malloc the memory for the lookup table at parameter server side, the Id and it's value will not be initialized. During training, when a parameter server received an Id, if it is already in the lookup table, it will return the existing parameter, if the Id does not exist, paddle will add it into the lookup table and initialize the value for it. + +### Problem 3: parameter load and save + +For common parameters, paddle use trainer to save and load them. But for distributed lookup table, trainer cannot do this because it's large size. + +### Solution: Parameter server side save and load + +Paddle support parameter server side save and load for distribute lookup table. Each machine of parameter servers will only save and load part of the whole table. + +## Architecture +The whole architecture of the distribute lookup table is as below: + +### Training steps: +1. Read a batch of data, the data is feature ids. +1. The input ids will be split by `split_ids_op` with the same hash function of the lookup table. +1. The `prefetch_op` use the split result to prefetch parameters back from the lookup table. +1. Run forward-backward to get the gradient of the lookup table. +1. `split_ids_op` split the gradient and then use `send_op` to the parameter server. +1. parameter server update the table with the received gradient. + +![distribute lookup table](./src/distributed_lookup_table.jpeg) diff --git a/doc/fluid/design/dist_train/src/distributed_lookup_table.graffle b/doc/fluid/design/dist_train/src/distributed_lookup_table.graffle new file mode 100644 index 0000000000000000000000000000000000000000..65dfdbbacd219739db6ddfdf243cc16c3c4e8d1e Binary files /dev/null and b/doc/fluid/design/dist_train/src/distributed_lookup_table.graffle differ diff --git a/doc/fluid/design/dist_train/src/distributed_lookup_table.jpeg b/doc/fluid/design/dist_train/src/distributed_lookup_table.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..5353a16fd329f62ff893d32706b9c3c0bcc46a07 Binary files /dev/null and b/doc/fluid/design/dist_train/src/distributed_lookup_table.jpeg differ diff --git a/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.graffle b/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.graffle new file mode 100644 index 0000000000000000000000000000000000000000..96ca6d48f43bd9f49c6861dab006e2037873db87 Binary files /dev/null and b/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.graffle differ diff --git a/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.png b/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.png new file mode 100644 index 0000000000000000000000000000000000000000..afa25ab3b4e427bc595a855b12ab966478e01ed0 Binary files /dev/null and b/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.png differ diff --git a/doc/fluid/design/dist_train/src/ncc2_design.graffle b/doc/fluid/design/dist_train/src/ncc2_design.graffle new file mode 100644 index 0000000000000000000000000000000000000000..7d2753bbb03bc28c7a0054bb0aa424deb072ffbf Binary files /dev/null and b/doc/fluid/design/dist_train/src/ncc2_design.graffle differ diff --git a/doc/fluid/design/dist_train/src/ncc2_design.png b/doc/fluid/design/dist_train/src/ncc2_design.png new file mode 100644 index 0000000000000000000000000000000000000000..da0d5ee81f5dfeb4ca1356601b0bb5870456e3d6 Binary files /dev/null and b/doc/fluid/design/dist_train/src/ncc2_design.png differ diff --git a/doc/v2/howto/capi/workflow_of_capi_cn.md b/doc/v2/howto/capi/workflow_of_capi_cn.md index 3acdbae28e9b35f8a9104a89c9a5799f8c892334..db1568a2afbea3cca0d4e1fe053ba9536a60ab3d 100644 --- a/doc/v2/howto/capi/workflow_of_capi_cn.md +++ b/doc/v2/howto/capi/workflow_of_capi_cn.md @@ -28,9 +28,9 @@ ### 准备预测模型 -准备预测模型部分,我们以手写数字识别任务为例进行介绍。手写数字识别任务定义了一个含有[两个隐层的简单全连接网络](https://github.com/PaddlePaddle/book/blob/develop/02.recognize_digits/README.cn.md#softmax回归softmax-regression),网络接受一幅图片作为输入,将图片分类到 0 ~ 9 类别标签之一。完整代码可以查看[此目录](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense) 中的相关脚本。 +准备预测模型部分,我们以手写数字识别任务为例进行介绍。手写数字识别任务定义了一个含有[两个隐层的简单全连接网络](https://github.com/PaddlePaddle/book/blob/develop/02.recognize_digits/README.cn.md#softmax回归softmax-regression),网络接受一幅图片作为输入,将图片分类到 0 ~ 9 类别标签之一。完整代码可以查看[此目录](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense) 中的相关脚本。 -调用C-API开发预测程序需要一个训练好的模型,运行[MNIST手写数字识别目录](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)下的[mnist_v2.py](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/examples/model_inference/dense/mnist_v2.py)脚本,在终端执行`python mnist_v2.py`,会使用 PaddlePaddle 内置的 [MNIST 数据集](http://yann.lecun.com/exdb/mnist/)进行训练。训练好的模型默认保存在当前运行目录下的`models`目录中。 +调用C-API开发预测程序需要一个训练好的模型,运行[MNIST手写数字识别目录](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense)下的[mnist_v2.py](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/examples/model_inference/dense/mnist_v2.py)脚本,在终端执行`python mnist_v2.py`,会使用 PaddlePaddle 内置的 [MNIST 数据集](http://yann.lecun.com/exdb/mnist/)进行训练。训练好的模型默认保存在当前运行目录下的`models`目录中。 下面,我们将训练结束后存储下来的模型转换成预测模型。 @@ -48,7 +48,7 @@ dump_v2_config(predict, "trainer_config.bin", True) ``` - 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)这个示例,[`mnist_v2.py`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense/mnist_v2.py)脚本集成了序列化神经网络结构的过程,可以直接运行 `python mnist_v2.py --task dump_config` 对神经网络结构进行序列化,结果会写入当前运行目录下的`trainer_config.bin`文件中。 + 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense)这个示例,[`mnist_v2.py`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense/mnist_v2.py)脚本集成了序列化神经网络结构的过程,可以直接运行 `python mnist_v2.py --task dump_config` 对神经网络结构进行序列化,结果会写入当前运行目录下的`trainer_config.bin`文件中。 使用这种方式,需要**在运行时将神经网络的多个可学习参数放在同一个目录中**,C-API可以通过分别指定序列化后的网络结构文件和参数目录来加载训练好的模型。 @@ -68,7 +68,7 @@ merge_v2_model(net, param_file, output_file) ``` - 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)这个示例,可直接运行 `python` [merge_v2_model.py](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense/merge_v2_model.py)。序列化结果会写入当前运行目录下的`output.paddle.model`文件中。使用这种方式,运行时C-API可以通过指定`output.paddle.model`文件的路径来加载预测模型。 + 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense)这个示例,可直接运行 `python` [merge_v2_model.py](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense/merge_v2_model.py)。序列化结果会写入当前运行目录下的`output.paddle.model`文件中。使用这种方式,运行时C-API可以通过指定`output.paddle.model`文件的路径来加载预测模型。 #### 注意事项 1. 为使用C-API,在调用`dump_v2_config`序列化神经网络结构时,参数`binary`必须指定为`True`。 @@ -77,10 +77,10 @@ ### 编写预测代码 -预测代码更多详细示例代码请参考[C-API使用示例](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference) 目录下的代码示例。这一节对图1中预测代码编写的5个步骤进行介绍和说明。 +预测代码更多详细示例代码请参考[C-API使用示例](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference) 目录下的代码示例。这一节对图1中预测代码编写的5个步骤进行介绍和说明。 #### step 1. 初始化PaddlePaddle运行环境 -第一步需调用[`paddle_init`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/main.h#L27) 初始化PaddlePaddle运行环境,该接口接受两个参数:参数的个数和参数列表。 +第一步需调用[`paddle_init`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/main.h#L27) 初始化PaddlePaddle运行环境,该接口接受两个参数:参数的个数和参数列表。 #### step2. 加载模型 @@ -88,8 +88,8 @@ 概念上,在 PaddlePaddle 内部,一个GradientMachine类的对象管理着一组计算层(PaddlePaddle Layers)来完成前向和反向计算,并处理与之相关的所有细节。在调用C-API预测时,只需进行前向计算而无需调用反向计算。这篇文档之后部分会使用`gradient machine`来特指调用PaddlePaddle C-API创建的GradientMachine类的对象。每一个 `gradient machine` 都会管理维护一份训练好的模型,下面是C-API提供的,两种常用的模型加载方式: -1. 调用[`paddle_gradient_machine_load_parameter_from_disk`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/gradient_machine.h#L61)接口,从磁盘加载预测模型。这时`gradient machine`会独立拥有一份训练好的模型; -1. 调用[`paddle_gradient_machine_create_shared_param`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/gradient_machine.h#L88)接口,与其它`gradient machine`的共享已经加载的预测模型。这种情况多出现在使用多线程预测时,通过多个线程共享同一个模型来减少内存开销。可参考[此示例](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/examples/model_inference/multi_thread/main.c)。 +1. 调用[`paddle_gradient_machine_load_parameter_from_disk`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/gradient_machine.h#L61)接口,从磁盘加载预测模型。这时`gradient machine`会独立拥有一份训练好的模型; +1. 调用[`paddle_gradient_machine_create_shared_param`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/gradient_machine.h#L88)接口,与其它`gradient machine`的共享已经加载的预测模型。这种情况多出现在使用多线程预测时,通过多个线程共享同一个模型来减少内存开销。可参考[此示例](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/examples/model_inference/multi_thread/main.c)。 - 注意事项 @@ -117,7 +117,7 @@ C-API支持的所有输入数据类型和他们的组织方式,请参考“输 #### step 4. 前向计算 -完成上述准备之后,通过调用 [`paddle_gradient_machine_forward`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/gradient_machine.h#L73) 接口完成神经网络的前向计算。 +完成上述准备之后,通过调用 [`paddle_gradient_machine_forward`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/gradient_machine.h#L73) 接口完成神经网络的前向计算。 #### step 5. 清理 diff --git a/paddle/contrib/inference/CMakeLists.txt b/paddle/contrib/inference/CMakeLists.txt index a8bbb4eb8081420ae0bbaf761bd27303c0d043cb..c30eff5010748685838feb984c9c817ffcf14c11 100644 --- a/paddle/contrib/inference/CMakeLists.txt +++ b/paddle/contrib/inference/CMakeLists.txt @@ -46,9 +46,14 @@ cc_library(paddle_inference_api SRCS paddle_inference_api.cc paddle_inference_api_impl.cc DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) +# Here the shared library doesn't depend on other fluid libraries, or double free will occur. cc_library(paddle_inference_api_shared SHARED - SRCS paddle_inference_api.cc paddle_inference_api_impl.cc - DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) + SRCS paddle_inference_api.cc paddle_inference_api_impl.cc) +set_target_properties(paddle_inference_api_shared PROPERTIES OUTPUT_NAME paddle_inference_api) +if(NOT APPLE) + set(LINK_FLAGS "-fPIC -fvisibility=hidden") + set_target_properties(paddle_inference_api_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") +endif() cc_test(test_paddle_inference_api SRCS test_paddle_inference_api.cc diff --git a/paddle/contrib/inference/paddle_inference_api.cc b/paddle/contrib/inference/paddle_inference_api.cc index ea46b3006f8d0964cc8229d3683ee7b602d6ef0d..4fe198ad7d4a752882965e9e7fc460741de53d22 100644 --- a/paddle/contrib/inference/paddle_inference_api.cc +++ b/paddle/contrib/inference/paddle_inference_api.cc @@ -23,7 +23,6 @@ int PaddleDtypeSize(PaddleDType dtype) { case PaddleDType::INT64: return sizeof(int64_t); default: - // assert(false); return -1; } diff --git a/paddle/contrib/inference/paddle_inference_api_impl.h b/paddle/contrib/inference/paddle_inference_api_impl.h index ba266b608da342fb71faf05d02ddf74330e21e98..f9ec6f55449fc46b4a44b9563980cb5f8e80a951 100644 --- a/paddle/contrib/inference/paddle_inference_api_impl.h +++ b/paddle/contrib/inference/paddle_inference_api_impl.h @@ -22,9 +22,9 @@ #include "paddle/contrib/inference/paddle_inference_api.h" #include "paddle/fluid/framework/ddim.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/inference/io.h" +#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { diff --git a/paddle/contrib/inference/test_paddle_inference_api_impl.cc b/paddle/contrib/inference/test_paddle_inference_api_impl.cc index 88c4e665a3daed0ed34b23b75d360acbd586401f..c3649dcb96c77f449d876bef34c4aea7afb31daa 100644 --- a/paddle/contrib/inference/test_paddle_inference_api_impl.cc +++ b/paddle/contrib/inference/test_paddle_inference_api_impl.cc @@ -249,7 +249,7 @@ void MainThreadsImageClassification(bool use_gpu) { const size_t len = local_outputs[0].data.length(); float* data = static_cast(local_outputs[0].data.data()); float* ref_data = refs[tid].data(); - EXPECT_EQ(refs[tid].numel(), len / sizeof(float)); + EXPECT_EQ((size_t)refs[tid].numel(), len / sizeof(float)); for (int i = 0; i < refs[tid].numel(); ++i) { EXPECT_NEAR(ref_data[i], data[i], 1e-3); } diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 6286dda4a54991b7a1042aed9886fdcb694198ba..ec252929d5584c211cea7fa52004ecdfdf586a85 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -21,12 +21,13 @@ endif() cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) -nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place memory device_context init) +nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place memory device_context tensor) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto recordio) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor memory) -nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor init) +nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor) cc_library(reader SRCS reader.cc DEPS lod_tensor ddim) +cc_test(reader_test SRCS reader_test.cc DEPS reader) cc_test(variable_test SRCS variable_test.cc) @@ -38,7 +39,7 @@ cc_test(scope_test SRCS scope_test.cc DEPS scope) cc_library(data_device_transform SRCS data_device_transform.cc DEPS tensor) nv_test(data_device_transform_test SRCS data_device_transform_test.cu - DEPS operator op_registry init math_function) + DEPS operator op_registry device_context math_function) if(WITH_GPU) nv_library(data_type_transform SRCS data_type_transform.cu DEPS tensor) @@ -63,7 +64,7 @@ cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) cc_library(shape_inference SRCS shape_inference.cc DEPS ddim attribute device_context) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope glog shape_inference data_transform lod_tensor profiler) -cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry init) +cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry device_context) cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS shape_inference op_info operator glog) cc_library(op_registry SRCS op_registry.cc DEPS op_proto_maker op_info operator glog proto_desc) @@ -101,14 +102,14 @@ cc_test(var_type_inference_test SRCS var_type_inference_test.cc DEPS op_registry cc_library(selected_rows SRCS selected_rows.cc DEPS tensor) cc_test(selected_rows_test SRCS selected_rows_test.cc DEPS selected_rows) -cc_library(init SRCS init.cc DEPS gflags device_context place stringpiece operator) -cc_test(init_test SRCS init_test.cc DEPS init) - cc_test(op_kernel_type_test SRCS op_kernel_type_test.cc DEPS place device_context framework_proto) cc_test(cow_ptr_tests SRCS details/cow_ptr_test.cc) # cc_test(channel_test SRCS channel_test.cc) cc_test(tuple_test SRCS tuple_test.cc ) -cc_test(concurrency_test SRCS concurrency_test.cc DEPS go_op channel_close_op channel_create_op - channel_send_op channel_recv_op sum_op select_op elementwise_add_op compare_op - conditional_block_op while_op assign_op print_op executor proto_desc) + +# disable test temporarily. +# TODO https://github.com/PaddlePaddle/Paddle/issues/11971 +# cc_test(concurrency_test SRCS concurrency_test.cc DEPS go_op channel_close_op channel_create_op +# channel_send_op channel_recv_op sum_op select_op elementwise_add_op compare_op +# conditional_block_op while_op assign_op print_op executor proto_desc) diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index a91fe5c99d397ef1bf04f6d22e988b6d3f33e500..f2c55e533a2747325b1b16fdada37945a8ed3c42 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -14,13 +14,13 @@ limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/elementwise_op_function.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/init.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h index 9c2c845c6efb206fb1ad5150189430b9a6fe9ea3..b2e5399e2376a86c1cd310b29c768832665af87f 100644 --- a/paddle/fluid/framework/details/build_strategy.h +++ b/paddle/fluid/framework/details/build_strategy.h @@ -34,7 +34,7 @@ struct BuildStrategy { std::string debug_graphviz_path_{""}; - bool enable_data_balance_{true}; + bool enable_data_balance_{false}; }; } // namespace details diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc index d07235df5856591f8ad707c86fa5b3b65868c3d1..68896c8ac1bae7d4bfcfa79cc8ec5c26bf2d93ee 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.cc +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -86,9 +86,9 @@ std::vector> DataBalanceOpHandle::GetBalancePlan( } void DataBalanceOpHandle::RunImpl() { - if (places_.size() == 1) { - return; - } + PADDLE_ENFORCE_GT(places_.size(), 1, + "Data balance can only be enabled when the number of " + "places to run larger than 1."); auto in_var_handles = DynamicCast(inputs_); auto out_var_handles = DynamicCast(outputs_); PADDLE_ENFORCE(in_var_handles.size() % places_.size() == 0); diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 46d0c2769cb334f5cb75ae0ef5e48da45448c48f..b82c2ef4082110f1621eb38d50361396511a4825 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -59,6 +59,11 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( grad_names_.insert(GradVarName(p)); } balance_vars_.resize(places_.size(), 0); + if (strategy_.enable_data_balance_ && places_.size() == 1) { + LOG(WARNING) << "It is no need to enable data balance when there is only " + "one place. enable_data_balance is set to False."; + strategy_.enable_data_balance_ = false; + } } void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc index eb4e7ec52f907f9403e21ec2734d61824f51a58b..e55858803612c595ffda4f4e50a7f8946ba91dff 100644 --- a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h" +#include #include #include #include "paddle/fluid/framework/executor.h" @@ -54,7 +55,13 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run( } } - auto fetch_data = underlying_executor_->Run(fetch_tensors); + std::vector fetch_data; + std::exception_ptr eptr; + try { + fetch_data = underlying_executor_->Run(fetch_tensors); + } catch (...) { + eptr = std::current_exception(); + } drop_scope_counter_ += 1; if (!fetch_tensors.empty() || drop_scope_counter_ == strategy_.num_iteration_per_drop_scope_) { @@ -69,7 +76,12 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run( scope->DeleteScope(local_scope); } } - return fetch_data; + + if (eptr) { + std::rethrow_exception(eptr); + } else { + return fetch_data; + } } } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 99b10254a7961bf7b27b256acaece573a71c4115..4586d927a06df5e467531ba97bd02291a6ebca5c 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -78,6 +78,9 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( set.clear(); }; + run_op_futures_.clear(); + exception_.reset(); + // Step 3. Execution while (!pending_vars.empty()) { // 1. Run All Ready ops @@ -98,6 +101,9 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( if (timeout) { std::lock_guard l(exception_mu_); if (exception_) { + for (auto &run_op_future : run_op_futures_) { + run_op_future.wait(); + } std::exception *exp = exception_.get(); if (dynamic_cast(exp)) { auto e = *static_cast(exp); @@ -222,7 +228,7 @@ void ThreadedSSAGraphExecutor::RunOp( } }; if (pool_) { - pool_->enqueue(op_run); + run_op_futures_.emplace_back(pool_->enqueue(op_run)); } else { op_run(); } diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index c69e0487e2e503a0d445300aa2fd6bb9c30b06c9..168293a3291283a9905c12b9ee22c3136157e9ee 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include #include @@ -77,6 +78,7 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { private: ExecutionStrategy strategy_; + std::list> run_op_futures_; }; } // namespace details diff --git a/paddle/fluid/framework/lod_tensor_test.cu b/paddle/fluid/framework/lod_tensor_test.cu index e3efbe4c464493af87e33510647d8c67d457a76d..b9950627ca378cb9607681799bd7fe5bfce2bf50 100644 --- a/paddle/fluid/framework/lod_tensor_test.cu +++ b/paddle/fluid/framework/lod_tensor_test.cu @@ -17,9 +17,9 @@ #include #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" __global__ void test(size_t* a, int size) { diff --git a/paddle/fluid/framework/op_info.cc b/paddle/fluid/framework/op_info.cc index f1261dee0319440995951d1bee145404186a8ad4..af75baa5c4b98f7d092834c05eb57e9c7e131b29 100644 --- a/paddle/fluid/framework/op_info.cc +++ b/paddle/fluid/framework/op_info.cc @@ -21,8 +21,8 @@ namespace framework { // a static local variable is already being initialized. // https://stackoverflow.com/questions/11711920/how-to-implement-multithread-safe-singleton-in-c11-without-using-mutex OpInfoMap& OpInfoMap::Instance() { - static OpInfoMap* g_op_info_map = new OpInfoMap(); - return *g_op_info_map; + static OpInfoMap g_op_info_map; + return g_op_info_map; } } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/op_registry.h b/paddle/fluid/framework/op_registry.h index 3314e41cc51d74f87be0e2cd5eba9bb260c16be7..e7dfa608b48f89a2155e43c7e63e31154675cd38 100644 --- a/paddle/fluid/framework/op_registry.h +++ b/paddle/fluid/framework/op_registry.h @@ -182,21 +182,15 @@ struct OpKernelRegistrarFunctorEx \ - __op_registrar_##op_type##__(#op_type); \ - int TouchOpRegistrar_##op_type() { \ - __op_registrar_##op_type##__.Touch(); \ - return 0; \ +#define REGISTER_OPERATOR(op_type, op_class, ...) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + __reg_op__##op_type, \ + "REGISTER_OPERATOR must be called in global namespace"); \ + static ::paddle::framework::OperatorRegistrar \ + __op_registrar_##op_type##__(#op_type); \ + int TouchOpRegistrar_##op_type() { \ + __op_registrar_##op_type##__.Touch(); \ + return 0; \ } #define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \ diff --git a/paddle/fluid/framework/op_registry_test.cc b/paddle/fluid/framework/op_registry_test.cc index 18b1649cc71d5edd5b07740bbad1fe8f81128898..04996d7b09cecc3c330a47153c9b10310f1792f4 100644 --- a/paddle/fluid/framework/op_registry_test.cc +++ b/paddle/fluid/framework/op_registry_test.cc @@ -193,15 +193,10 @@ TEST(OpRegistry, CustomChecker) { ASSERT_EQ(test_attr, 4); } -class CosineOpComplete : public paddle::framework::CosineOp { - public: - DEFINE_OP_CONSTRUCTOR(CosineOpComplete, paddle::framework::CosineOp); - DEFINE_OP_CLONE_METHOD(CosineOpComplete); -}; - TEST(OperatorRegistrar, Test) { paddle::framework::OperatorRegistrar< - CosineOpComplete, paddle::framework::CosineOpProtoAndCheckerMaker> + paddle::framework::CosineOp, + paddle::framework::CosineOpProtoAndCheckerMaker> reg("cos"); } diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 3cf8e8696d739e3f2894e490161b9fb5b459bc41..d1dc5fcd97b77fb7707c7d48f6eaeef140d3f306 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -633,6 +633,16 @@ void OperatorWithKernel::RunImpl(const Scope& scope, VLOG(3) << "expected_kernel_key:" << expected_kernel_key; auto kernel_iter = kernels.find(expected_kernel_key); +#ifdef PADDLE_WITH_MKLDNN + // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set + if (kernel_iter == kernels.end() && + expected_kernel_key.library_type_ == LibraryType::kMKLDNN) { + VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one"; + expected_kernel_key.library_type_ = LibraryType::kPlain; + expected_kernel_key.data_layout_ = DataLayout::kAnyLayout; + kernel_iter = kernels.find(expected_kernel_key); + } +#endif if (kernel_iter == kernels.end()) { PADDLE_THROW("op %s does not have kernel for %s", type_, KernelTypeToString(expected_kernel_key)); diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 01d750efbb8aaa35701f6caa7ec103ec21dd529e..1040eb882baea624e972faf4af3094119df72308 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -121,10 +121,6 @@ class OperatorBase { //! Get all outputs variable names virtual std::vector OutputVars(bool has_intermediate) const; - // Return a new operator instance, which is as same as this. - // Use unique_ptr to prevent caller forget to delete this pointer. - virtual std::unique_ptr Clone() const = 0; - protected: std::string type_; // NOTE: in case of OpGrad, inputs_ contains: @@ -145,37 +141,6 @@ class OperatorBase { const platform::Place& place) const = 0; }; -// Macro for define a clone method. -// If you are writing an kernel operator, `Clone` will be defined when you -// register it. i.e. `Clone` method is not needed to define by yourself. -#define DEFINE_OP_CLONE_METHOD(cls) \ - std::unique_ptr<::paddle::framework::OperatorBase> Clone() const final { \ - return std::unique_ptr<::paddle::framework::OperatorBase>(new cls(*this)); \ - } - -// Macro for define a default constructor for Operator. -// You can also use -// using PARENT_CLASS::PARENT_CLASS; -// to use parent's constructor. -#define DEFINE_OP_CONSTRUCTOR(cls, parent_cls) \ - cls(const std::string& type, \ - const ::paddle::framework::VariableNameMap& inputs, \ - const ::paddle::framework::VariableNameMap& outputs, \ - const paddle::framework::AttributeMap& attrs) \ - : parent_cls(type, inputs, outputs, attrs) {} - -class NOP : public OperatorBase { - public: - using OperatorBase::OperatorBase; - std::unique_ptr Clone() const override { - return std::unique_ptr(new NOP(*this)); - } - - private: - void RunImpl(const Scope& scope, - const platform::Place& place) const override {} -}; - class ExecutionContext { public: ExecutionContext(const OperatorBase& op, const Scope& scope, diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc index 74043b5d7990178976baf2fad991ae03f9c8dd25..ac9dd8245ad4e0e8842f219b23d3866b03fdaedb 100644 --- a/paddle/fluid/framework/operator_test.cc +++ b/paddle/fluid/framework/operator_test.cc @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/init.h" namespace paddle { namespace framework { @@ -247,26 +247,3 @@ TEST(OpKernel, multi_inputs) { auto op = paddle::framework::OpRegistry::CreateOp(op_desc); op->Run(scope, cpu_place); } - -class OperatorClone : public paddle::framework::OperatorBase { - public: - DEFINE_OP_CLONE_METHOD(OperatorClone); - OperatorClone(const std::string& type, - const paddle::framework::VariableNameMap& inputs, - const paddle::framework::VariableNameMap& outputs, - const paddle::framework::AttributeMap& attrs) - : OperatorBase(type, inputs, outputs, attrs) {} - - private: - void RunImpl(const paddle::framework::Scope& scope, - const paddle::platform::Place& place) const override {} -}; - -TEST(Operator, Clone) { - paddle::framework::InitDevices(true); - OperatorClone a("ABC", paddle::framework::VariableNameMap{}, - paddle::framework::VariableNameMap{}, - paddle::framework::AttributeMap{}); - auto b = a.Clone(); - ASSERT_EQ(a.Type(), b->Type()); -} diff --git a/paddle/fluid/framework/reader.cc b/paddle/fluid/framework/reader.cc index 0b36f1116d15004b355e854e101abb9ad3297836..5897d320a8b7e5af541098cadff8e78f8324949c 100644 --- a/paddle/fluid/framework/reader.cc +++ b/paddle/fluid/framework/reader.cc @@ -13,29 +13,61 @@ // limitations under the License. #include "paddle/fluid/framework/reader.h" +#include namespace paddle { namespace framework { -ReaderBase::~ReaderBase() {} -FileReader::FileReader(const std::vector &dims) : dims_(dims) {} - -void FileReader::ReadNext(std::vector *out) { +void ReaderBase::ReadNext(std::vector *out) { + std::lock_guard lock(mu_); + PADDLE_ENFORCE_EQ(status_, ReaderStatus::kRunning); ReadNextImpl(out); - if (out->empty()) { - return; - } +} - PADDLE_ENFORCE_EQ(out->size(), dims_.size()); - for (size_t i = 0; i < dims_.size(); ++i) { - auto &actual = (*out)[i].dims(); - auto &expect = dims_[i]; +void ReaderBase::InsertDecoratedReader( + const std::shared_ptr &decorated_reader) { + std::lock_guard guard(mu_); + decorated_readers_.emplace_back(decorated_reader); +} - PADDLE_ENFORCE_EQ(actual.size(), expect.size()); - for (int j = 0; j < actual.size(); ++j) { - // PADDLE_ENFORCE(actual[i] == expect[i] || expect[i] == -1); +std::unordered_set ReaderBase::GetEndPoints() { + std::unordered_set result; + std::deque queue; + queue.emplace_back(this); + while (!queue.empty()) { // BFS search + auto *front = queue.front(); + queue.pop_front(); + if (front->decorated_readers_.empty()) { + result.emplace(front); + } else { + for (auto &reader : front->decorated_readers_) { + if (auto *reader_ptr = reader.lock().get()) { + queue.emplace_back(reader_ptr); + } + } } } + + return result; } + +void ReaderBase::Shutdown() { + std::lock_guard lock(mu_); + if (status_ != ReaderStatus::kStopped) { + ShutdownImpl(); + status_ = ReaderStatus::kStopped; + } +} + +void ReaderBase::Start() { + std::lock_guard lock(mu_); + if (status_ != ReaderStatus::kRunning) { + StartImpl(); + status_ = ReaderStatus::kRunning; + } +} + +ReaderBase::~ReaderBase() { Shutdown(); } + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/reader.h b/paddle/fluid/framework/reader.h index 64d4ceab624312ed366d7e835072899f1f033a88..a8d04feb42456607159bcbede0574fe90dfe995c 100644 --- a/paddle/fluid/framework/reader.h +++ b/paddle/fluid/framework/reader.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include "paddle/fluid/framework/ddim.h" @@ -24,61 +25,116 @@ namespace paddle { namespace framework { +enum ReaderStatus { kRunning, kStopped }; + class ReaderBase { public: - virtual void ReadNext(std::vector* out) = 0; + virtual void ReadNext(std::vector* out); + + virtual void Shutdown(); - virtual void ReInit() = 0; + virtual void Start(); + + // Return the readers which are the end of decorating chain. Basically + // they are readers just before read op. + std::unordered_set GetEndPoints(); virtual ~ReaderBase(); + + protected: + virtual void ReadNextImpl(std::vector* out) {} + + virtual void ShutdownImpl() {} + + virtual void StartImpl() {} + + ReaderStatus status_{kRunning}; + + mutable std::mutex mu_; + + private: + friend class DecoratedReader; + // These methods can be only invoked inside DecoratedReader to record the + // decorating chain. + void InsertDecoratedReader( + const std::shared_ptr& decorated_reader); + // A set of which readers that decorated this reader. + std::vector> decorated_readers_; }; -class DecoratedReader : public ReaderBase { +class DecoratedReader : public ReaderBase, + public std::enable_shared_from_this { public: explicit DecoratedReader(const std::shared_ptr& reader) : ReaderBase(), reader_(reader) { PADDLE_ENFORCE_NOT_NULL(reader_); } - void ReInit() override { reader_->ReInit(); } + void RegisterDecorateChain() { + reader_->InsertDecoratedReader(shared_from_this()); + } protected: - std::shared_ptr reader_; -}; - -class FileReader : public ReaderBase { - public: - explicit FileReader(const std::vector& dims); - - void ReadNext(std::vector* out) override; + void ShutdownImpl() override { reader_->Shutdown(); } - protected: - virtual void ReadNextImpl(std::vector* out) = 0; + void StartImpl() override { reader_->Start(); } - private: - std::vector dims_; + std::shared_ptr reader_; }; +// FileReader is just a conceptual class. +class FileReader : public ReaderBase {}; + // The ReaderHolder is used as reader' unified wrapper, // making it easier to access different type reader in Variables. class ReaderHolder { public: - void Reset(ReaderBase* reader) { reader_.reset(reader); } + template + void Reset(const std::shared_ptr& reader) { + auto reader_base = std::dynamic_pointer_cast(reader); + PADDLE_ENFORCE_NOT_NULL(reader_base); + reader_ = reader_base; + } - std::shared_ptr Get() const { return reader_; } + const std::shared_ptr& Get() const { return reader_; } void ReadNext(std::vector* out) { PADDLE_ENFORCE_NOT_NULL(reader_); reader_->ReadNext(out); } - void ReInit() { + + void ResetAll() { + auto end_readers = reader_->GetEndPoints(); + for (auto* reader : end_readers) { + reader->Shutdown(); + } + for (auto* reader : end_readers) { + reader->Start(); + } + } + + void Shutdown() { PADDLE_ENFORCE_NOT_NULL(reader_); - reader_->ReInit(); + reader_->Shutdown(); } + void Start() { + PADDLE_ENFORCE_NOT_NULL(reader_); + reader_->Start(); + } + + operator const std::shared_ptr&() const { return this->reader_; } + private: std::shared_ptr reader_; }; +template +inline std::shared_ptr MakeDecoratedReader(ARGS&&... args) { + std::shared_ptr reader(new T(std::forward(args)...)); + reader->RegisterDecorateChain(); + return reader; +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/reader_test.cc b/paddle/fluid/framework/reader_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..f0d07cb7c1367576084b9494e7758103bb45d1e5 --- /dev/null +++ b/paddle/fluid/framework/reader_test.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/reader.h" +#include +#include "gtest/gtest.h" + +class StubDecoratedReader : public paddle::framework::DecoratedReader { + public: + explicit StubDecoratedReader(const std::shared_ptr &reader) + : DecoratedReader(reader) {} + + void ReadNextImpl(std::vector *out) override {} +}; + +class StubRootReader : public paddle::framework::ReaderBase { + public: + void ReadNextImpl(std::vector *out) override {} +}; + +TEST(READER, decorate_chain) { + auto root = std::make_shared(); + auto end_point1 = + paddle::framework::MakeDecoratedReader(root); + auto end_point2 = + paddle::framework::MakeDecoratedReader(root); + + { + auto endpoints = root->GetEndPoints(); + ASSERT_EQ(endpoints.size(), 2U); + ASSERT_NE(endpoints.count(end_point1.get()), 0); + ASSERT_NE(endpoints.count(end_point2.get()), 0); + } + + { + auto end_point3 = + paddle::framework::MakeDecoratedReader(root); + ASSERT_EQ(root->GetEndPoints().size(), 3U); + } + { ASSERT_EQ(root->GetEndPoints().size(), 2U); } +} diff --git a/paddle/fluid/framework/var_type_inference_test.cc b/paddle/fluid/framework/var_type_inference_test.cc index 14b81ddfecb8c996ae8709910c022a074e91eb3c..7842168f603885ce7dc87d2a01dfa4f544389faa 100644 --- a/paddle/fluid/framework/var_type_inference_test.cc +++ b/paddle/fluid/framework/var_type_inference_test.cc @@ -22,6 +22,17 @@ limitations under the License. */ namespace paddle { namespace framework { +class NOP : public OperatorBase { + public: + NOP(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + private: + void RunImpl(const Scope &scope, + const platform::Place &place) const override {} +}; + class SumOpMaker : public OpProtoAndCheckerMaker { public: void Make() { diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 7071eea19c355c04711a11c224985be96c6589f4..1895aea7f98cb1ad12b2ce16545339252349ea37 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -1,4 +1,4 @@ -set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor init) +set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor ) # TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal? cc_library(paddle_fluid_api diff --git a/paddle/fluid/inference/analysis/README.md b/paddle/fluid/inference/analysis/README.md index 6fd73958bc480fe3983b9622c03ac77fba9ec8a7..70adb4a974cc5f9911cb302840bbef7ec2591505 100644 --- a/paddle/fluid/inference/analysis/README.md +++ b/paddle/fluid/inference/analysis/README.md @@ -54,4 +54,5 @@ It can be used as a helper class that draws the modified graph after each pass. There is some helper legacy/function/class for analysis. - [dot.h](./dot.h) give a easy to use interface for generating `DOT` codes, -- [graph_traits.h](./graph_traits.h) contains the graph traversal algorithms, it uses `iterator` to make the algorithms easy to share across different passes. +- [graph_traits.h](./graph_traits.h) contains the interfaces of the graph traversal algorithms, it uses `iterator`to make the algorithms easy to share across different passes, +there are some implementations in [data_flow_graph.cc](./data_flow_graph.cc) , such as BFS and DFS.. diff --git a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc index cfbbc284e491bd62a6108d6d14e7896a57d1b63e..cbca5abdd5fff1672ba5d47a8876489c54ad6947 100644 --- a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc +++ b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc @@ -27,7 +27,7 @@ TEST_F(DFG_Tester, Init) { DataFlowGraph graph; pass.Run(&graph); // Analysis is sensitive to ProgramDesc, careful to change the original model. - ASSERT_EQ(graph.nodes.size(), 37); + ASSERT_EQ(graph.nodes.size(), 37UL); pass.Finalize(); LOG(INFO) << '\n' << graph.DotString(); } diff --git a/paddle/fluid/inference/analysis/pass.h b/paddle/fluid/inference/analysis/pass.h index 25c566ebfa41abe3a247bc6c6e5583c8620a6abb..6b4dbb3bb5ddd9f15f26758beef1d1b5bbf49142 100644 --- a/paddle/fluid/inference/analysis/pass.h +++ b/paddle/fluid/inference/analysis/pass.h @@ -32,19 +32,6 @@ class Pass { public: Pass() = default; virtual ~Pass() = default; - // Virtual method overridden by subclasses to do only necessary initialization - // before any pass is run. - // virtual bool Initialize() { return false; } - // There is some passes such as FlowToDataFlowGraphPass that needs a - // ProgramDesc. Here use the native ProgramDesc ProtoBuf message, so that it - // only couple with the proto file. - // virtual bool Initialize(const framework::proto::ProgramDesc &desc) { return - // false; } - // There are some Passes such as DataFlowGraphToFluidPass that will output a - // ProgramDesc. - // virtual bool Initialize(framework::proto::ProgramDesc *desc) { return - // false; } - // Mutable Pass. virtual bool Initialize(Argument *argument) { return false; } // Readonly Pass. diff --git a/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc b/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc index 8134494f8bccb132f2ed7d1ba1fb615a298596ed..67dd4da54b95add703428e1fded61065f60353e8 100644 --- a/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc +++ b/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc @@ -82,7 +82,7 @@ TEST_F(DFG_Tester, Fuse) { // At least one nodes should be deleted. ASSERT_EQ(dfg.nodes.size(), count0 + 1); // added a new FunctionBlock - ASSERT_EQ(6UL, count1); + ASSERT_EQ(6, count1); } } // namespace analysis diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 6b03ac7119b117e442e6af34c719c8a4f736bde9..181868977dd8f2568486ed0c4e1f260a69795896 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/platform/cpu_helper.h" #include "paddle/fluid/pybind/pybind.h" DEFINE_string(devices, "", "The devices to be used which is joined by comma."); @@ -33,7 +33,7 @@ namespace inference { void Init(const std::vector argv) { framework::InitGflags(argv); - operators::math::SetNumThreads(FLAGS_math_num_threads); + platform::SetNumThreads(FLAGS_math_num_threads); // init devices std::vector devices; std::string token; diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index caf599b1a68783f155cd134c2a29e9ffa49a0895..01b50b3670cb9da2e0be232a61ea6129dd83aa20 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -18,9 +18,9 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/platform/init.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/tests/book/test_inference_nlp.cc b/paddle/fluid/inference/tests/book/test_inference_nlp.cc index 03b0b6946339772ac535b3471d50fbd74554239d..5cc1db12bb71e428d493e7c6f718b1c6ed431858 100644 --- a/paddle/fluid/inference/tests/book/test_inference_nlp.cc +++ b/paddle/fluid/inference/tests/book/test_inference_nlp.cc @@ -19,7 +19,7 @@ limitations under the License. */ #include "gflags/gflags.h" #include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" -#include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/platform/cpu_helper.h" #ifdef PADDLE_WITH_MKLML #include #endif @@ -164,7 +164,7 @@ TEST(inference, nlp) { // only use 1 thread number per std::thread omp_set_dynamic(0); omp_set_num_threads(1); - paddle::operators::math::SetNumThreads(1); + paddle::platform::SetNumThreads(1); #endif double start_ms = 0, stop_ms = 0; diff --git a/paddle/fluid/memory/detail/buddy_allocator.cc b/paddle/fluid/memory/detail/buddy_allocator.cc index 4194ba197948b47003863196efdac1c08a7ae4f6..01a8501dd4abe73cbc71dc4c08734cae66df08ef 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.cc +++ b/paddle/fluid/memory/detail/buddy_allocator.cc @@ -19,8 +19,9 @@ namespace paddle { namespace memory { namespace detail { -BuddyAllocator::BuddyAllocator(SystemAllocator* system_allocator, - size_t min_chunk_size, size_t max_chunk_size) +BuddyAllocator::BuddyAllocator( + std::unique_ptr system_allocator, size_t min_chunk_size, + size_t max_chunk_size) : min_chunk_size_(min_chunk_size), max_chunk_size_(max_chunk_size), cache_(system_allocator->UseGpu()), diff --git a/paddle/fluid/memory/detail/buddy_allocator.h b/paddle/fluid/memory/detail/buddy_allocator.h index 2f39d774d6fb6a2bc37877eb2f8b90bebd3cda28..f0c83efc23ce39c4fc89296d672e1e55751851bf 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.h +++ b/paddle/fluid/memory/detail/buddy_allocator.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include // NOLINT #include #include @@ -32,8 +33,8 @@ namespace detail { class BuddyAllocator { public: - BuddyAllocator(SystemAllocator* system_allocator, size_t min_chunk_size, - size_t max_chunk_size); + BuddyAllocator(std::unique_ptr system_allocator, + size_t min_chunk_size, size_t max_chunk_size); ~BuddyAllocator(); @@ -103,7 +104,7 @@ class BuddyAllocator { private: /*! Allocate CPU/GPU memory from system */ - SystemAllocator* system_allocator_; + std::unique_ptr system_allocator_; std::mutex mutex_; }; diff --git a/paddle/fluid/memory/malloc.cc b/paddle/fluid/memory/malloc.cc index bd98ed81899440a46415d30b6d74fec2dac4c155..7c800b3c164049244770ceb2070b177d8307e85e 100644 --- a/paddle/fluid/memory/malloc.cc +++ b/paddle/fluid/memory/malloc.cc @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include + #include "paddle/fluid/memory/malloc.h" #include "glog/logging.h" @@ -34,12 +36,15 @@ namespace memory { using BuddyAllocator = detail::BuddyAllocator; BuddyAllocator* GetCPUBuddyAllocator() { + static std::once_flag init_flag; static detail::BuddyAllocator* a = nullptr; - if (a == nullptr) { - a = new detail::BuddyAllocator(new detail::CPUAllocator, - platform::CpuMinChunkSize(), - platform::CpuMaxChunkSize()); - } + + std::call_once(init_flag, []() { + a = new detail::BuddyAllocator( + std::unique_ptr(new detail::CPUAllocator), + platform::CpuMinChunkSize(), platform::CpuMaxChunkSize()); + }); + return a; } @@ -68,27 +73,33 @@ size_t Used(platform::CPUPlace place) { #ifdef PADDLE_WITH_CUDA BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { - static BuddyAllocator** as = NULL; - if (as == NULL) { + static std::once_flag init_flag; + static detail::BuddyAllocator** a_arr = nullptr; + + std::call_once(init_flag, [gpu_id]() { int gpu_num = platform::GetCUDADeviceCount(); - as = new BuddyAllocator*[gpu_num]; - for (int gpu = 0; gpu < gpu_num; gpu++) { - as[gpu] = nullptr; + PADDLE_ENFORCE(gpu_id < gpu_num, "gpu_id:%d should < gpu_num:%d", gpu_id, + gpu_num); + + a_arr = new BuddyAllocator*[gpu_num]; + for (int i = 0; i < gpu_num; i++) { + a_arr[i] = nullptr; + platform::SetDeviceId(i); + a_arr[i] = new BuddyAllocator( + std::unique_ptr(new detail::GPUAllocator(i)), + platform::GpuMinChunkSize(), platform::GpuMaxChunkSize()); + + VLOG(10) << "\n\nNOTE: each GPU device use " + << FLAGS_fraction_of_gpu_memory_to_use * 100 + << "% of GPU memory.\n" + << "You can set GFlags environment variable '" + << "FLAGS_fraction_of_gpu_memory_to_use" + << "' to change the fraction of GPU usage.\n\n"; } - } + }); + platform::SetDeviceId(gpu_id); - if (!as[gpu_id]) { - as[gpu_id] = new BuddyAllocator(new detail::GPUAllocator(gpu_id), - platform::GpuMinChunkSize(), - platform::GpuMaxChunkSize()); - VLOG(10) << "\n\nNOTE: each GPU device use " - << FLAGS_fraction_of_gpu_memory_to_use * 100 - << "% of GPU memory.\n" - << "You can set GFlags environment variable '" - << "FLAGS_fraction_of_gpu_memory_to_use" - << "' to change the fraction of GPU usage.\n\n"; - } - return as[gpu_id]; + return a_arr[gpu_id]; } template <> @@ -125,12 +136,16 @@ void Free(platform::CUDAPlace place, void* p) { } BuddyAllocator* GetCUDAPinnedBuddyAllocator() { - static BuddyAllocator* ba = NULL; - if (ba == NULL) { - ba = new BuddyAllocator(new detail::CUDAPinnedAllocator, + static std::once_flag init_flag; + static BuddyAllocator* ba = nullptr; + + std::call_once(init_flag, []() { + ba = new BuddyAllocator(std::unique_ptr( + new detail::CUDAPinnedAllocator), platform::CUDAPinnedMinChunkSize(), platform::CUDAPinnedMaxChunkSize()); - } + }); + return ba; } diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 693bf973c2b8790d2c50cee9b86b365493e8c754..5912a1a17cbd29c3ebd83f37133c044f0905c8bd 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -216,6 +216,18 @@ class BatchNormKernel saved_mean_e.setZero(); saved_variance_e.setZero(); + EigenVectorArrayMap running_mean_arr( + mean_out->mutable_data(ctx.GetPlace()), C); + EigenVectorArrayMap running_var_arr( + variance_out->mutable_data(ctx.GetPlace()), C); + + if ((N * sample_size) == 1) { + LOG(WARNING) << "Only 1 element in normalization dimension, " + << "we skip the batch norm calculation, let y = x."; + framework::TensorCopySync(*x, ctx.GetPlace(), y); + return; + } + switch (data_layout) { case DataLayout::kNCHW: { ConstEigenArrayMap x_arr(x->data(), sample_size, N * C); @@ -247,10 +259,6 @@ class BatchNormKernel PADDLE_THROW("Unknown storage order: %s", data_layout_str); } - EigenVectorArrayMap running_mean_arr( - mean_out->mutable_data(ctx.GetPlace()), C); - EigenVectorArrayMap running_var_arr( - variance_out->mutable_data(ctx.GetPlace()), C); running_mean_arr = running_mean_arr * momentum + saved_mean_e * (1. - momentum); running_var_arr = @@ -427,6 +435,11 @@ class BatchNormGradKernel d_bias_arr.setZero(); d_scale_arr.setZero(); + if ((N * sample_size) == 1) { + framework::TensorCopySync(*d_y, ctx.GetPlace(), d_x); + return; + } + const auto scale_inv_var_nhw = scale_arr * inv_var_arr / (N * sample_size); switch (data_layout) { diff --git a/paddle/fluid/operators/batch_norm_op.cu.cc b/paddle/fluid/operators/batch_norm_op.cu.cc index 550dd32d36767f90e880415bfffaf01aeb623609..ca6cd8669352fd5814f25a04433ca97fe4abe9ff 100644 --- a/paddle/fluid/operators/batch_norm_op.cu.cc +++ b/paddle/fluid/operators/batch_norm_op.cu.cc @@ -72,6 +72,9 @@ class BatchNormKernel int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); + auto *y = ctx.Output("Y"); + y->mutable_data(ctx.GetPlace()); + // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; @@ -93,7 +96,7 @@ class BatchNormKernel mode_ = CUDNN_BATCHNORM_SPATIAL; #endif - VLOG(1) << "Setting descriptors."; + VLOG(3) << "Setting descriptors."; std::vector dims; std::vector strides; if (data_layout == DataLayout::kNCHW) { @@ -113,11 +116,6 @@ class BatchNormKernel const auto *scale = ctx.Input("Scale"); const auto *bias = ctx.Input("Bias"); - auto *y = ctx.Output("Y"); - - // alloc memory - y->mutable_data(ctx.GetPlace()); - auto &dev_ctx = ctx.template device_context(); auto handle = dev_ctx.cudnn_handle(); @@ -162,22 +160,28 @@ class BatchNormKernel functor(dev_ctx, saved_mean, static_cast>(0)); functor(dev_ctx, saved_variance, static_cast>(0)); - double this_factor = 1. - momentum; - - CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardTraining( - handle, mode_, CudnnDataType::kOne(), CudnnDataType::kZero(), - data_desc_, x->template data(), data_desc_, - y->template mutable_data(ctx.GetPlace()), bn_param_desc_, - scale->template data>(), - bias->template data>(), this_factor, - mean_out->template mutable_data>( - ctx.GetPlace()), - variance_out->template mutable_data>( - ctx.GetPlace()), - epsilon, saved_mean->template mutable_data>( - ctx.GetPlace()), - saved_variance->template mutable_data>( - ctx.GetPlace()))); + if ((N * H * W * D) == 1) { + LOG(WARNING) << "Only 1 element in normalization dimension, " + << "we skip the batch norm calculation, let y = x."; + framework::TensorCopySync(*x, ctx.GetPlace(), y); + } else { + double this_factor = 1. - momentum; + + CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardTraining( + handle, mode_, CudnnDataType::kOne(), CudnnDataType::kZero(), + data_desc_, x->template data(), data_desc_, + y->template mutable_data(ctx.GetPlace()), bn_param_desc_, + scale->template data>(), + bias->template data>(), this_factor, + mean_out->template mutable_data>( + ctx.GetPlace()), + variance_out->template mutable_data>( + ctx.GetPlace()), + epsilon, saved_mean->template mutable_data>( + ctx.GetPlace()), + saved_variance->template mutable_data>( + ctx.GetPlace()))); + } } // clean when exit. @@ -209,6 +213,25 @@ class BatchNormGradKernel int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); + // init output + auto *d_x = ctx.Output(framework::GradVarName("X")); + auto *d_scale = ctx.Output(framework::GradVarName("Scale")); + auto *d_bias = ctx.Output(framework::GradVarName("Bias")); + + d_x->mutable_data(ctx.GetPlace()); + d_scale->mutable_data(ctx.GetPlace()); + d_bias->mutable_data(ctx.GetPlace()); + + auto &dev_ctx = ctx.template device_context(); + if ((N * H * W * D) == 1) { + framework::TensorCopySync(*d_y, ctx.GetPlace(), d_x); + math::SetConstant> + functor; + functor(dev_ctx, d_scale, static_cast>(0)); + functor(dev_ctx, d_bias, static_cast>(0)); + return; + } + PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL); PADDLE_ENFORCE_EQ(scale->dims()[0], C); @@ -247,21 +270,11 @@ class BatchNormGradKernel CUDNN_ENFORCE(platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); - // init output - auto *d_x = ctx.Output(framework::GradVarName("X")); - auto *d_scale = ctx.Output(framework::GradVarName("Scale")); - auto *d_bias = ctx.Output(framework::GradVarName("Bias")); - - d_x->mutable_data(ctx.GetPlace()); - d_scale->mutable_data(ctx.GetPlace()); - d_bias->mutable_data(ctx.GetPlace()); - const auto *saved_mean = ctx.Input("SavedMean"); const auto *saved_var = ctx.Input("SavedVariance"); const void *saved_mean_data = saved_mean->template data(); const void *saved_var_data = saved_var->template data(); - auto &dev_ctx = ctx.template device_context(); CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationBackward( dev_ctx.cudnn_handle(), mode_, CudnnDataType::kOne(), CudnnDataType::kZero(), CudnnDataType::kOne(), diff --git a/paddle/fluid/operators/conditional_block_op.cc b/paddle/fluid/operators/conditional_block_op.cc index 8cc1d94260baccfe28d213b7e021956819e2e79e..580fde753816c30b188b8a99cc63fcbafde64e25 100644 --- a/paddle/fluid/operators/conditional_block_op.cc +++ b/paddle/fluid/operators/conditional_block_op.cc @@ -205,9 +205,10 @@ class ConditionalBlockGradInferShape : public framework::InferShapeBase { context->SetOutputsDim(framework::GradVarName("Params"), context->GetInputsDim("Params")); } - PADDLE_ENFORCE(context->HasOutputs(framework::GradVarName("X"))); - context->SetOutputsDim(framework::GradVarName("X"), - context->GetInputsDim("X")); + if (context->HasOutputs(framework::GradVarName("X"))) { + context->SetOutputsDim(framework::GradVarName("X"), + context->GetInputsDim("X")); + } } }; diff --git a/paddle/fluid/operators/cross_entropy_op.cc b/paddle/fluid/operators/cross_entropy_op.cc index d5e095f9cad95b74b8ff79e4a60ccbdf11512a5a..a3bec3da45136bca5cb2763e7ffd6b67703a1813 100644 --- a/paddle/fluid/operators/cross_entropy_op.cc +++ b/paddle/fluid/operators/cross_entropy_op.cc @@ -124,8 +124,7 @@ class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { "Tensor with shape [N x D]."); AddOutput("Y", "(Tensor, default Tensor), a 2-D tensor with shape " - "[N x 1]. The cross entropy loss.") - .Reuse("X"); + "[N x 1]. The cross entropy loss."); AddAttr("soft_label", "(bool, default false), a flag indicating whether to " "interpretate the given labels as soft labels.") diff --git a/paddle/fluid/operators/distributed/CMakeLists.txt b/paddle/fluid/operators/distributed/CMakeLists.txt index 312f80e09077f21a47985c1c936c2ac41c292ead..675ca36774beb72cc1e9b136ad0b18ce061689ac 100644 --- a/paddle/fluid/operators/distributed/CMakeLists.txt +++ b/paddle/fluid/operators/distributed/CMakeLists.txt @@ -5,7 +5,7 @@ if(WITH_GRPC) set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") set_source_files_properties(grpc_serde_test.cc rpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) cc_test(serde_test SRCS grpc_serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr - cares zlib protobuf sendrecvop_grpc SERIAL) + cares zlib protobuf sendrecvop_grpc scope profiler math_function SERIAL) cc_test(grpc_server_test SRCS rpc_server_test.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf executor proto_desc lookup_table_op SERIAL) diff --git a/paddle/fluid/operators/elementwise_add_mkldnn_op.cc b/paddle/fluid/operators/elementwise_add_mkldnn_op.cc index 3f612256840825a75f49944ab97ff957d572a863..1a5427b39241b666eeaf12b173ea00443bb5f6e4 100644 --- a/paddle/fluid/operators/elementwise_add_mkldnn_op.cc +++ b/paddle/fluid/operators/elementwise_add_mkldnn_op.cc @@ -85,7 +85,7 @@ class EltwiseAddMKLDNNKernel : public framework::OpKernel { "Wrong layout/format set for X tensor"); PADDLE_ENFORCE(y->layout() == DataLayout::kMKLDNN && y->format() != memory::format::format_undef, - "Wrong layout/format set for X tensor"); + "Wrong layout/format set for Y tensor"); std::vector src_x_tz = framework::vectorize2int(x_dims); std::vector src_y_tz = framework::vectorize2int(y_dims); diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index 53a478c1ac0bdf8c0a3f3721161779ef10cb14f8..5571ff9a7151c1f971ad1805bf001815a651202b 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -54,13 +54,13 @@ math_library(softmax DEPS math_function) math_library(unpooling) math_library(vol2col) -cc_test(math_function_test SRCS math_function_test.cc) +cc_test(math_function_test SRCS math_function_test.cc DEPS math_function) cc_test(selected_rows_functor_test SRCS selected_rows_functor_test.cc DEPS selected_rows_functor) cc_test(im2col_test SRCS im2col_test.cc DEPS im2col) cc_test(vol2col_test SRCS vol2col_test.cc DEPS vol2col) cc_test(sequence_padding_test SRCS sequence_padding_test.cc DEPS sequence_padding) if(WITH_GPU) - nv_test(math_function_gpu_test SRCS math_function_test.cu) - nv_test(selected_rows_functor_gpu_test SRCS selected_rows_functor_test.cu DEPS selected_rows_functor) + nv_test(math_function_gpu_test SRCS math_function_test.cu DEPS math_function) + nv_test(selected_rows_functor_gpu_test SRCS selected_rows_functor_test.cu DEPS selected_rows_functor math_function) endif() cc_test(concat_test SRCS concat_test.cc DEPS concat) diff --git a/paddle/fluid/operators/math/blas.h b/paddle/fluid/operators/math/blas.h index a907d6a71b7a16983e601073b039b48406853a0b..9f6c1e5c35f02cd4bc729eea78b17fac017aa90e 100644 --- a/paddle/fluid/operators/math/blas.h +++ b/paddle/fluid/operators/math/blas.h @@ -23,41 +23,12 @@ #ifdef PADDLE_USE_OPENBLAS #include -#ifdef LAPACK_FOUND -#include -#endif -#endif - -#ifndef LAPACK_FOUND -extern "C" { -#include // NOLINT -int LAPACKE_sgetrf(int matrix_layout, int m, int n, float* a, int lda, - int* ipiv); -int LAPACKE_dgetrf(int matrix_layout, int m, int n, double* a, int lda, - int* ipiv); -int LAPACKE_sgetri(int matrix_layout, int n, float* a, int lda, - const int* ipiv); -int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda, - const int* ipiv); -} #endif namespace paddle { namespace operators { namespace math { -static void SetNumThreads(int num_threads) { -#ifdef PADDLE_USE_OPENBLAS - int real_num_threads = num_threads > 1 ? num_threads : 1; - openblas_set_num_threads(real_num_threads); -#elif defined(PADDLE_WITH_MKLML) - int real_num_threads = num_threads > 1 ? num_threads : 1; - platform::dynload::MKL_Set_Num_Threads(real_num_threads); -#else - PADDLE_ENFORCE(false, "To be implemented."); -#endif -} - /** * Matrix Descriptor of a memory buffer. * diff --git a/paddle/fluid/operators/math/math_function.h b/paddle/fluid/operators/math/math_function.h index 56a039d3cec7375517573c9429801945bf99741e..7ec78d9ef8e7ff966674b043c017f2fbedb77bb9 100644 --- a/paddle/fluid/operators/math/math_function.h +++ b/paddle/fluid/operators/math/math_function.h @@ -19,23 +19,6 @@ limitations under the License. */ #ifdef PADDLE_USE_OPENBLAS #include -#ifdef LAPACK_FOUND -#include -#endif -#endif - -#ifndef LAPACK_FOUND -extern "C" { -#include // NOLINT -int LAPACKE_sgetrf(int matrix_layout, int m, int n, float* a, int lda, - int* ipiv); -int LAPACKE_dgetrf(int matrix_layout, int m, int n, double* a, int lda, - int* ipiv); -int LAPACKE_sgetri(int matrix_layout, int n, float* a, int lda, - const int* ipiv); -int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda, - const int* ipiv); -} #endif #include diff --git a/paddle/fluid/operators/merge_lod_tensor_op.cc b/paddle/fluid/operators/merge_lod_tensor_op.cc index a16861b3b77fc980ab932b9d88859b38ec36108b..2dc1467b0d4816d5cc0535eb62e936cf342a241c 100644 --- a/paddle/fluid/operators/merge_lod_tensor_op.cc +++ b/paddle/fluid/operators/merge_lod_tensor_op.cc @@ -44,8 +44,10 @@ class MergeLoDTensorOp : public framework::OperatorBase { scope.FindVar(Output("Out"))->GetMutable(); auto level = static_cast(Attr("level")); - auto &mask_dim = mask.dims(); + PADDLE_ENFORCE(in_true.numel() || in_false.numel(), + "Input(InTrue) or Input(InFalse) should be initialized."); + auto &mask_dim = mask.dims(); std::unique_ptr cpu_mask{new framework::LoDTensor()}; if (platform::is_cpu_place(mask.place())) { cpu_mask->ShareDataWith(mask); @@ -59,19 +61,27 @@ class MergeLoDTensorOp : public framework::OperatorBase { } auto *mask_data = cpu_mask->data(); - int rank = in_true.dims().size(); - platform::Place place = in_true.place(); - std::type_index data_type = in_true.type(); - framework::DDim in_true_dims = - framework::slice_ddim(in_true.dims(), 1, rank); - + platform::Place place = dev_place; int64_t batch_size = in_true.dims()[0] + in_false.dims()[0]; - auto in_true_dim_vec = framework::vectorize(in_true_dims); - in_true_dim_vec.insert(in_true_dim_vec.begin(), batch_size); + std::type_index data_type = + in_true.IsInitialized() ? in_true.type() : in_false.type(); + int rank; + framework::DDim in_dims; + if (in_true.IsInitialized()) { + rank = in_true.dims().size(); + in_dims = framework::slice_ddim(in_true.dims(), 1, rank); + } else { + rank = in_false.dims().size(); + in_dims = framework::slice_ddim(in_false.dims(), 1, rank); + } + + auto in_dim_vec = framework::vectorize(in_dims); + in_dim_vec.insert(in_dim_vec.begin(), batch_size); - framework::DDim out_dims = framework::make_ddim(in_true_dim_vec); + framework::DDim out_dims = framework::make_ddim(in_dim_vec); out->Resize(out_dims); + out->mutable_data(place, data_type); auto *out_lod = out->mutable_lod(); diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc index ef54d79fdf2becde98c68044d14bd4347773b975..d5fb7a12e5d9757f3e639f6de7f0129bd531e2a1 100644 --- a/paddle/fluid/operators/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl_op_test.cu.cc @@ -19,7 +19,6 @@ limitations under the License. */ #include // NOLINT #include -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/program_desc.h" @@ -27,6 +26,7 @@ limitations under the License. */ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" USE_NO_KERNEL_OP(ncclInit); diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index 695d7ea83df952d9f2212cc0aaca5c90c7b47ee7..65fcce8bb019965a805ad09d50be0aba64e4f24e 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -92,9 +92,13 @@ class ReadOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput("Reader", "(ReaderHolder) The executed reader."); AddOutput("Out", "(LoDTensor) The output data.").AsDuplicable(); - AddAttr("throw_eof_exp", - "If set true, an exception will be thrown when the Reader " - "yields empty (which means there is no next data).") + AddAttr( + "throw_eof_exp", + "If set true, an exception will be thrown when the Reader " + "yields empty (which means there is no next data).\n" + "NOTES: This flag must be true always. It will be set to false" + " only when the data-balance is enabled in ParallelExecutor" + " and it is set by ParallelExecutor instance, not users.") .SetDefault(true); AddComment(R"DOC( Read Operator diff --git a/paddle/fluid/operators/reader/CMakeLists.txt b/paddle/fluid/operators/reader/CMakeLists.txt index a39c8a00538875e4e3284898230a6cb0693b7a12..9dbcc35e6f5bb01c159980a49dd4b4c9d37d2aab 100644 --- a/paddle/fluid/operators/reader/CMakeLists.txt +++ b/paddle/fluid/operators/reader/CMakeLists.txt @@ -22,7 +22,6 @@ reader_library(create_batch_reader_op SRCS create_batch_reader_op.cc) reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_op.cc) reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc) reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc) -reader_library(create_threaded_reader_op SRCS create_threaded_reader_op.cc) reader_library(create_custom_reader_op SRCS create_custom_reader_op.cc) reader_library(create_py_reader_op SRCS create_py_reader_op.cc) diff --git a/paddle/fluid/operators/reader/create_batch_reader_op.cc b/paddle/fluid/operators/reader/create_batch_reader_op.cc index ecbae3894d551186f53625a6cc9cfdb36adc8d2d..1dbafd23e92732bdaf0d263a01e267227786d839 100644 --- a/paddle/fluid/operators/reader/create_batch_reader_op.cc +++ b/paddle/fluid/operators/reader/create_batch_reader_op.cc @@ -20,15 +20,19 @@ namespace reader { class BatchReader : public framework::DecoratedReader { public: - BatchReader(const std::shared_ptr& reader, int batch_size) - : DecoratedReader(reader), batch_size_(batch_size) { + BatchReader(const std::shared_ptr& reader, int batch_size, + bool discard_leftover) + : DecoratedReader(reader), + batch_size_(batch_size), + discard_leftover_(discard_leftover) { buffer_.reserve(batch_size_); } - void ReadNext(std::vector* out) override; + void ReadNextImpl(std::vector* out) override; private: int batch_size_; + bool discard_leftover_; std::vector> buffer_; }; @@ -46,8 +50,9 @@ class CreateBatchReaderOp : public framework::OperatorBase { } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - out->Reset( - new BatchReader(underlying_reader.Get(), Attr("batch_size"))); + out->Reset(framework::MakeDecoratedReader( + underlying_reader, Attr("batch_size"), + Attr("discard_leftover"))); } }; @@ -57,6 +62,10 @@ class CreateBatchReaderOpMaker : public DecoratedReaderMakerBase { AddAttr("batch_size", "How many instances the batch reader yields each time.") .GreaterThan(0); + AddAttr("discard_leftover", + "If true, the leftover instances that are not enough for a " + "new batch will be discarded.") + .SetDefault(true); AddComment(R"DOC( CreateBatchReader Operator @@ -66,7 +75,7 @@ class CreateBatchReaderOpMaker : public DecoratedReaderMakerBase { } }; -void BatchReader::ReadNext(std::vector* out) { +void BatchReader::ReadNextImpl(std::vector* out) { buffer_.clear(); buffer_.reserve(batch_size_); for (int i = 0; i < batch_size_; ++i) { @@ -77,6 +86,9 @@ void BatchReader::ReadNext(std::vector* out) { break; } } + if (discard_leftover_ && buffer_.size() < batch_size_) { + buffer_.clear(); + } // Concat instances out->clear(); if (buffer_.empty()) { diff --git a/paddle/fluid/operators/reader/create_custom_reader_op.cc b/paddle/fluid/operators/reader/create_custom_reader_op.cc index a75c6d4c567ac93f37b38070421133af305f20a3..85394b336fc967fc6973131fbedda4c796825185 100644 --- a/paddle/fluid/operators/reader/create_custom_reader_op.cc +++ b/paddle/fluid/operators/reader/create_custom_reader_op.cc @@ -33,7 +33,7 @@ class CustomReader : public framework::DecoratedReader { source_var_names_(source_var_names), sink_var_names_(sink_var_names) {} - void ReadNext(std::vector* out) override; + void ReadNextImpl(std::vector* out) override; private: const framework::ProgramDesc program_; @@ -60,10 +60,10 @@ class CreateCustomReaderOp : public framework::OperatorBase { } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - out->Reset( - new CustomReader(underlying_reader.Get(), *sub_block, - Attr>("source_var_names"), - Attr>("sink_var_names"))); + out->Reset(framework::MakeDecoratedReader( + underlying_reader, *sub_block, + Attr>("source_var_names"), + Attr>("sink_var_names"))); } }; @@ -143,7 +143,7 @@ class CustomReaderInferVarType : public framework::VarTypeInference { } }; -void CustomReader::ReadNext(std::vector* out) { +void CustomReader::ReadNextImpl(std::vector* out) { out->clear(); std::vector underlying_outs; reader_->ReadNext(&underlying_outs); diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index 5f734489a81764875988f440696682570ff4d1d7..7b14370f4fd64e8fd5b8d9038006494b88d671dc 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -23,13 +23,13 @@ namespace reader { // 'Double buffer' means we shall maintain two batches of input data at the same // time. So the kCacheSize shoul be at least 2. -static constexpr size_t kCacheSize = 5; +static constexpr size_t kCacheSize = 3; // There will be two bacthes out of the channel during training: // 1. the one waiting to be sent to the channel // 2. the one just be received from the channel, which is also being used by // subsequent operators. // So the channel size should be kChacheSize - 2 -static constexpr size_t kChannelSize = 3; // kCacheSize - 2 +static constexpr size_t kChannelSize = 1; // kCacheSize - 2 class DoubleBufferReader : public framework::DecoratedReader { public: @@ -50,12 +50,21 @@ class DoubleBufferReader : public framework::DecoratedReader { StartPrefetcher(); } - void ReadNext(std::vector* out) override; - void ReInit() override; + void ReadNextImpl(std::vector* out) override; ~DoubleBufferReader() { EndPrefetcher(); } private: + void ShutdownImpl() override { + EndPrefetcher(); + reader_->Shutdown(); + } + + void StartImpl() override { + reader_->Start(); + StartPrefetcher(); + } + void StartPrefetcher() { channel_ = new reader::BlockingQueue(kChannelSize); prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); @@ -109,7 +118,8 @@ class CreateDoubleBufferReaderOp : public framework::OperatorBase { place = platform::CUDAPlace(static_cast(num)); } - out->Reset(new DoubleBufferReader(underlying_reader.Get(), place)); + out->Reset(framework::MakeDecoratedReader( + underlying_reader, place)); } }; @@ -136,7 +146,7 @@ class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { } }; -void DoubleBufferReader::ReadNext(std::vector* out) { +void DoubleBufferReader::ReadNextImpl(std::vector* out) { size_t cached_tensor_id; if (channel_->Receive(&cached_tensor_id)) { if (platform::is_gpu_place(place_)) { @@ -150,12 +160,6 @@ void DoubleBufferReader::ReadNext(std::vector* out) { } } -void DoubleBufferReader::ReInit() { - reader_->ReInit(); - EndPrefetcher(); - StartPrefetcher(); -} - void DoubleBufferReader::PrefetchThreadFunc() { VLOG(5) << "A new prefetch thread starts."; size_t cached_tensor_id = 0; diff --git a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc index 19b54110b9aeece33b8d6c73612ae0e12dbfafbd..0a225597d34f43c7fb82aeae2552cdf16c8ba566 100644 --- a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc +++ b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc @@ -24,23 +24,22 @@ class MultiPassReader : public framework::DecoratedReader { MultiPassReader(const std::shared_ptr& reader, int pass_num) : DecoratedReader(reader), pass_num_(pass_num), pass_count_(0) {} - void ReadNext(std::vector* out) override { + void ReadNextImpl(std::vector* out) override { reader_->ReadNext(out); - if (out->empty()) { + if (out->empty() && pass_count_ < pass_num_ - 1) { + reader_->Shutdown(); + reader_->Start(); + reader_->ReadNext(out); ++pass_count_; - if (pass_count_ < pass_num_) { - reader_->ReInit(); - reader_->ReadNext(out); - } } } - void ReInit() override { + private: + void StartImpl() override { pass_count_ = 0; - reader_->ReInit(); + reader_->Start(); } - private: int pass_num_; mutable int pass_count_; }; @@ -60,7 +59,8 @@ class CreateMultiPassReaderOp : public framework::OperatorBase { const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); int pass_num = Attr("pass_num"); - out->Reset(new MultiPassReader(underlying_reader.Get(), pass_num)); + out->Reset(framework::MakeDecoratedReader( + underlying_reader, pass_num)); } }; diff --git a/paddle/fluid/operators/reader/create_py_reader_op.cc b/paddle/fluid/operators/reader/create_py_reader_op.cc index af1b2fbe14b0368c2959623aa70c240fc25bdef3..833776f56eef0ffb2ae5e963919f0482bcd511b8 100644 --- a/paddle/fluid/operators/reader/create_py_reader_op.cc +++ b/paddle/fluid/operators/reader/create_py_reader_op.cc @@ -19,9 +19,10 @@ namespace paddle { namespace operators { namespace reader { -class PyReader : public framework::ReaderBase { +class PyReader : public framework::FileReader { public: - explicit PyReader(const std::shared_ptr& queue) { + explicit PyReader(const std::shared_ptr& queue) + : framework::FileReader() { PADDLE_ENFORCE(queue != nullptr, "LoDTensorBlockingQueue must not be null"); queue_ = queue; } @@ -32,7 +33,9 @@ class PyReader : public framework::ReaderBase { if (!success) out->clear(); } - void ReInit() override { queue_->ReOpen(); } + void Shutdown() override { queue_->Close(); } + + void Start() override { queue_->ReOpen(); } private: std::shared_ptr queue_; @@ -51,14 +54,14 @@ class CreatePyReaderOp : public framework::OperatorBase { const std::string& queue_name = Input("blocking_queue"); auto* queue_holder_var = scope.FindVar(queue_name); - PADDLE_ENFORCE( - queue_holder_var != nullptr, + PADDLE_ENFORCE_NOT_NULL( + queue_holder_var, "No LoDTensorBlockingQueueHolder variable with name %s found", queue_name); auto* queue_holder = queue_holder_var->template GetMutable(); - out->Reset(new PyReader(queue_holder->GetQueue())); + out->Reset(std::make_shared(queue_holder->GetQueue())); } }; diff --git a/paddle/fluid/operators/reader/create_random_data_generator_op.cc b/paddle/fluid/operators/reader/create_random_data_generator_op.cc index 5b7e8a063a034f0be056065826fca0fe807bc9a7..e5c116dfcd71ef40597ca19d1da0b51038baaad1 100644 --- a/paddle/fluid/operators/reader/create_random_data_generator_op.cc +++ b/paddle/fluid/operators/reader/create_random_data_generator_op.cc @@ -19,11 +19,11 @@ namespace operators { namespace reader { template -class RandomDataGenerator : public framework::ReaderBase { +class RandomDataGenerator : public framework::FileReader { public: RandomDataGenerator(const std::vector& shapes, float low, float high) - : framework::ReaderBase(), low_(low), high_(high), shapes_(shapes) { + : framework::FileReader(), low_(low), high_(high), shapes_(shapes) { PADDLE_ENFORCE_LE(low, high, "'low' shouldn't be greater than 'high'.(%f vs %f)", low, high); @@ -32,7 +32,7 @@ class RandomDataGenerator : public framework::ReaderBase { dist_ = std::uniform_real_distribution(low_, high_); } - void ReadNext(std::vector* out) override { + void ReadNextImpl(std::vector* out) override { out->clear(); out->reserve(shapes_.size()); for (const framework::DDim& shape : shapes_) { @@ -51,8 +51,6 @@ class RandomDataGenerator : public framework::ReaderBase { } } - void ReInit() override { return; } - private: float low_; float high_; @@ -79,8 +77,8 @@ class CreateRandomDataGeneratorOp : public framework::OperatorBase { std::vector shapes = RestoreShapes(shape_concat, ranks); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset(new RandomDataGenerator(shapes, Attr("low"), - Attr("high"))); + out->Reset(std::make_shared>( + shapes, Attr("low"), Attr("high"))); } }; diff --git a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc index 559827f08494af6730aafa1e67c46a47c21dedf6..b32f09b22524c8b67ce57cc6022ef46efc2e828d 100644 --- a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc +++ b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc @@ -21,10 +21,8 @@ namespace reader { template class RecordIOFileReader : public framework::FileReader { public: - explicit RecordIOFileReader(const std::string& filename, - const std::vector& dims) - : FileReader(dims), - scanner_(filename), + explicit RecordIOFileReader(const std::string& filename) + : scanner_(filename), dev_ctx_(*platform::DeviceContextPool::Instance().Get( platform::CPUPlace())) { if (ThreadSafe) { @@ -33,8 +31,6 @@ class RecordIOFileReader : public framework::FileReader { LOG(INFO) << "Creating file reader" << filename; } - void ReInit() override { scanner_.Reset(); } - protected: void ReadNextImpl(std::vector* out) override { if (ThreadSafe) { @@ -45,6 +41,8 @@ class RecordIOFileReader : public framework::FileReader { } } + void StartImpl() override { scanner_.Reset(); } + private: std::unique_ptr mutex_; recordio::Scanner scanner_; @@ -58,20 +56,11 @@ class CreateRecordIOReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { - const auto& shape_concat = Attr>("shape_concat"); - const auto& ranks = Attr>("ranks"); - PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); - PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), - static_cast(shape_concat.size()), - "The accumulate of all ranks should be equal to the " - "shape concat's length."); std::string filename = Attr("filename"); - auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset(new RecordIOFileReader( - filename, RestoreShapes(shape_concat, ranks))); + out->Reset(std::make_shared>(filename)); } }; diff --git a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc index 57e8e21214b7c99e52550fe51a67c9b5201cb46f..4b308abc290c10a8a5846672e719b503dfc79b21 100644 --- a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc +++ b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc @@ -34,7 +34,7 @@ class ShuffleReader : public framework::DecoratedReader { ReloadBuffer(); } - void ReadNext(std::vector* out) override { + void ReadNextImpl(std::vector* out) override { out->clear(); if (iteration_pos_ >= buffer_.size()) { VLOG(10) << "Resetting shuffle buffer"; @@ -47,6 +47,17 @@ class ShuffleReader : public framework::DecoratedReader { } private: + void ShutdownImpl() override { + buffer_.clear(); + iteration_pos_ = 0; + reader_->Shutdown(); + } + + void StartImpl() override { + reader_->Start(); + ReloadBuffer(); + } + void ReloadBuffer() { buffer_.clear(); buffer_.reserve(buffer_size_); @@ -86,9 +97,8 @@ class CreateShuffleReaderOp : public framework::OperatorBase { } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - out->Reset( - new ShuffleReader(underlying_reader.Get(), - static_cast(Attr("buffer_size")))); + out->Reset(framework::MakeDecoratedReader( + underlying_reader, static_cast(Attr("buffer_size")))); } }; diff --git a/paddle/fluid/operators/reader/create_threaded_reader_op.cc b/paddle/fluid/operators/reader/create_threaded_reader_op.cc deleted file mode 100644 index 3798015146f4ffb085aa82e23ca3f1fb3c5cf5a4..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/reader/create_threaded_reader_op.cc +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/operators/detail/safe_ref.h" -#include "paddle/fluid/operators/reader/reader_op_registry.h" - -namespace paddle { -namespace operators { -namespace reader { - -class ThreadedReader : public framework::DecoratedReader { - public: - explicit ThreadedReader(const std::shared_ptr& reader) - : DecoratedReader(reader) {} - - void ReadNext(std::vector* out) override { - std::lock_guard lock(mutex_); - reader_->ReadNext(out); - } - - void ReInit() override { reader_->ReInit(); } - - private: - std::mutex mutex_; -}; - -class CreateThreadedReaderOp : public framework::OperatorBase { - public: - using framework::OperatorBase::OperatorBase; - - private: - void RunImpl(const framework::Scope& scope, - const platform::Place& dev_place) const override { - auto* out = detail::Ref(scope.FindVar(Output("Out"))) - .GetMutable(); - if (out->Get() != nullptr) { - return; - } - const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) - ->Get(); - out->Reset(new ThreadedReader(underlying_reader.Get())); - } -}; - -class CreateThreadedReaderOpMaker : public DecoratedReaderMakerBase { - protected: - void Apply() override { - AddComment(R"DOC( - CreateThreadedReader Operator - - This operator creates a threaded reader. A threaded reader's - 'ReadNext()' can be invoked by several threads at the same - time. - When the attribute 'safe_mode' is true, the threaded reader's - 'ReInit()' is disabled to avoid unexpected bugs in multi-thread - environment. - )DOC"); - } -}; - -} // namespace reader -} // namespace operators -} // namespace paddle - -namespace reader = paddle::operators::reader; -REGISTER_DECORATED_READER_OPERATOR(create_threaded_reader, - reader::CreateThreadedReaderOp, - reader::CreateThreadedReaderOpMaker); diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index 31e5d81e55ed9703eb3a9ef2595fa2a280f1a734..9a8d203672fa2d560440d063d93fa5f8523690ef 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -23,24 +23,26 @@ namespace reader { class MultiFileReader : public framework::ReaderBase { public: - MultiFileReader(const std::vector& file_names, - const std::vector& dims, size_t thread_num, + MultiFileReader(const std::vector& file_names, size_t thread_num, size_t buffer_size) : buffer_size_(buffer_size) { readers_.reserve(file_names.size()); for (const std::string& f_name : file_names) { - readers_.emplace_back(CreateReaderByFileName(f_name, dims)); + readers_.emplace_back(CreateReaderByFileName(f_name)); } prefetchers_.resize(thread_num); StartNewScheduler(); } - void ReadNext(std::vector* out) override; - void ReInit() override; + void ReadNextImpl(std::vector* out) override; ~MultiFileReader() { EndScheduler(); } private: + void ShutdownImpl() override { EndScheduler(); } + + void StartImpl() override { StartNewScheduler(); } + void StartNewScheduler(); void EndScheduler(); void ScheduleThreadFunc(); @@ -55,17 +57,12 @@ class MultiFileReader : public framework::ReaderBase { reader::BlockingQueue>* buffer_; }; -void MultiFileReader::ReadNext(std::vector* out) { +void MultiFileReader::ReadNextImpl(std::vector* out) { if (!buffer_->Receive(out)) { out->clear(); } } -void MultiFileReader::ReInit() { - EndScheduler(); - StartNewScheduler(); -} - void MultiFileReader::StartNewScheduler() { size_t thread_num = prefetchers_.size(); waiting_reader_idx_ = new reader::BlockingQueue(readers_.size()); @@ -120,7 +117,7 @@ void MultiFileReader::ScheduleThreadFunc() { } } } - // If users invoke ReInit() when scheduler is running, it will close the + // If users invoke Shutdown() when scheduler is running, it will close the // 'avaiable_thread_idx_' and prefecther threads have no way to tell scheduler // to release their resource. So a check is needed before scheduler ends. for (auto& p : prefetchers_) { @@ -138,7 +135,8 @@ void MultiFileReader::PrefetchThreadFunc(size_t reader_idx, size_t thread_idx) { std::vector ins; reader->ReadNext(&ins); if (ins.empty()) { - reader->ReInit(); + reader->Shutdown(); + reader->Start(); break; } try { @@ -180,9 +178,8 @@ class OpenFilesOp : public framework::OperatorBase { auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset(new MultiFileReader(file_names, - RestoreShapes(shape_concat, ranks), - thread_num, buffer_size)); + out->Reset( + std::make_shared(file_names, thread_num, buffer_size)); } }; diff --git a/paddle/fluid/operators/reader/reader_op_registry.cc b/paddle/fluid/operators/reader/reader_op_registry.cc index e11256a49ffa6adc9410376cc8a71fa017df7e9c..b82aab1214992be73d876a42424234e3cea46455 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.cc +++ b/paddle/fluid/operators/reader/reader_op_registry.cc @@ -39,7 +39,7 @@ std::unordered_map& FileReaderRegistry() { } std::unique_ptr CreateReaderByFileName( - const std::string& file_name, const std::vector& dims) { + const std::string& file_name) { size_t separator_pos = file_name.find_last_of(kFileFormatSeparator); PADDLE_ENFORCE_NE(separator_pos, std::string::npos, "File name illegal! A legal file name should be like: " @@ -49,7 +49,7 @@ std::unique_ptr CreateReaderByFileName( auto itor = FileReaderRegistry().find(filetype); PADDLE_ENFORCE(itor != FileReaderRegistry().end(), "No file reader registered for '%s' format.", filetype); - framework::ReaderBase* reader = (itor->second)(file_name, dims); + framework::ReaderBase* reader = (itor->second)(file_name); return std::unique_ptr(reader); } diff --git a/paddle/fluid/operators/reader/reader_op_registry.h b/paddle/fluid/operators/reader/reader_op_registry.h index 244bf15f068a47efc29ee54492cdbdeb10025020..25c3e7d77b788d38daf6dee1fc79e5c1c97e8842 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.h +++ b/paddle/fluid/operators/reader/reader_op_registry.h @@ -25,22 +25,21 @@ namespace reader { static constexpr char kFileFormatSeparator[] = "."; -using FileReaderCreator = std::function&)>; +using FileReaderCreator = + std::function; std::unordered_map& FileReaderRegistry(); template int RegisterFileReader(const std::string& filetype) { - FileReaderRegistry()[filetype] = []( - const std::string& fn, const std::vector& dims) { - return new Reader(fn, dims); + FileReaderRegistry()[filetype] = [](const std::string& fn) { + return new Reader(fn); }; return 0; } std::unique_ptr CreateReaderByFileName( - const std::string& file_name, const std::vector& dims); + const std::string& file_name); extern std::vector RestoreShapes( const std::vector& shape_concat, const std::vector& ranks); diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index b29035bafd34fa81dc6b59691142fe74439202b8..20037d0764056c2a093af801c9cc1eb788dd46d6 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -28,6 +28,9 @@ cc_test(place_test SRCS place_test.cc DEPS place glog gflags) add_subdirectory(dynload) +cc_library(cpu_helper SRCS cpu_helper.cc DEPS cblas enforce) +cc_test(cpu_helper_test SRCS cpu_helper_test.cc DEPS cpu_helper) + IF(WITH_GPU) set(GPU_CTX_DEPS dynload_cuda dynamic_loader) ELSE() @@ -42,10 +45,12 @@ ENDIF() # memcpy depends on device_context, here add deps individually for # avoiding cycle dependencies -cc_library(device_context SRCS device_context.cc DEPS malloc - place eigen3 ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}) +cc_library(device_context SRCS device_context.cc init.cc DEPS malloc + place eigen3 stringpiece cpu_helper ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}) nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_info) +cc_test(init_test SRCS init_test.cc DEPS device_context) + nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS memory place device_context) @@ -53,5 +58,5 @@ cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framewo cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) -nv_test(float16_gpu_test SRCS float16_test.cu) -cc_test(float16_test SRCS float16_test.cc) +nv_test(float16_gpu_test SRCS float16_test.cu DEPS lod_tensor) +cc_test(float16_test SRCS float16_test.cc DEPS lod_tensor) diff --git a/paddle/fluid/platform/cpu_helper.cc b/paddle/fluid/platform/cpu_helper.cc new file mode 100644 index 0000000000000000000000000000000000000000..77ecb170111d63f23312d06fa8a8172bc45f2a4e --- /dev/null +++ b/paddle/fluid/platform/cpu_helper.cc @@ -0,0 +1,42 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/cpu_helper.h" +#include "paddle/fluid/platform/enforce.h" + +#ifdef PADDLE_WITH_MKLML +#include "paddle/fluid/platform/dynload/mklml.h" +#endif + +#ifdef PADDLE_USE_OPENBLAS +#include +#endif + +namespace paddle { +namespace platform { + +void SetNumThreads(int num_threads) { +#ifdef PADDLE_USE_OPENBLAS + int real_num_threads = num_threads > 1 ? num_threads : 1; + openblas_set_num_threads(real_num_threads); +#elif defined(PADDLE_WITH_MKLML) + int real_num_threads = num_threads > 1 ? num_threads : 1; + platform::dynload::MKL_Set_Num_Threads(real_num_threads); +#else + PADDLE_ENFORCE(false, "To be implemented."); +#endif +} + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/cpu_helper.h b/paddle/fluid/platform/cpu_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..78fc392b632ef92d4ae08de2051041fc0bf6778b --- /dev/null +++ b/paddle/fluid/platform/cpu_helper.h @@ -0,0 +1,26 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +namespace paddle { +namespace platform { + +//! Set the number of threads in use. +void SetNumThreads(int num_threads); + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/cpu_helper_test.cc b/paddle/fluid/platform/cpu_helper_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..dc1b2b56cd98ca6259c46a76231dbc99482970c1 --- /dev/null +++ b/paddle/fluid/platform/cpu_helper_test.cc @@ -0,0 +1,22 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/cpu_helper.h" + +#include "gtest/gtest.h" + +TEST(CpuHelper, SetNumThread) { + paddle::platform::SetNumThreads(1); + paddle::platform::SetNumThreads(4); +} diff --git a/paddle/fluid/platform/device_context_test.cu b/paddle/fluid/platform/device_context_test.cu index fa806aba6d8747beebc3eed2c661b326dd62fd76..171d2979a0218ad5e22112190a59866b3e0b617f 100644 --- a/paddle/fluid/platform/device_context_test.cu +++ b/paddle/fluid/platform/device_context_test.cu @@ -69,19 +69,3 @@ TEST(Device, DeviceContextPool) { ASSERT_NE(dev_ctx, nullptr); } } - -int main(int argc, char** argv) { - std::vector places; - - places.emplace_back(paddle::platform::CPUPlace()); - int count = paddle::platform::GetCUDADeviceCount(); - for (int i = 0; i < count; ++i) { - places.emplace_back(paddle::platform::CUDAPlace(i)); - } - - VLOG(0) << " DeviceCount " << count; - paddle::platform::DeviceContextPool::Init(places); - - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/paddle/fluid/platform/dynload/dynamic_loader.cc b/paddle/fluid/platform/dynload/dynamic_loader.cc index 198d8566b1bd726c5b33d8af22a19cb30a280fa2..93bf7c13516ffa4baca6a30f1daf946939726d85 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.cc +++ b/paddle/fluid/platform/dynload/dynamic_loader.cc @@ -36,8 +36,6 @@ DEFINE_string(cuda_dir, "", DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so."); -DEFINE_string(lapack_dir, "", "Specify path for loading liblapack.so."); - DEFINE_string(nccl_dir, "", "Specify path for loading nccl library, such as libcublas, " "libcurand. For instance, /usr/local/cuda/lib64. If default, " @@ -189,14 +187,6 @@ void* GetWarpCTCDsoHandle() { #endif } -void* GetLapackDsoHandle() { -#if defined(__APPLE__) || defined(__OSX__) - return GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.dylib"); -#else - return GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.so"); -#endif -} - void* GetNCCLDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) return GetDsoHandleFromSearchPath(FLAGS_nccl_dir, "libnccl.dylib"); diff --git a/paddle/fluid/platform/dynload/dynamic_loader.h b/paddle/fluid/platform/dynload/dynamic_loader.h index ca87dc47f355a8a4fc840262044413414edf00a0..84fd2ce9987628a5ed29e4125a03dedb96e416c1 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.h +++ b/paddle/fluid/platform/dynload/dynamic_loader.h @@ -23,7 +23,6 @@ void* GetCUDNNDsoHandle(); void* GetCUPTIDsoHandle(); void* GetCurandDsoHandle(); void* GetWarpCTCDsoHandle(); -void* GetLapackDsoHandle(); void* GetNCCLDsoHandle(); void* GetTensorRtDsoHandle(); void* GetMKLMLDsoHandle(); diff --git a/paddle/fluid/platform/float16_test.cc b/paddle/fluid/platform/float16_test.cc index a589e32b61a9b6a44bdc4529eee715d987d6922c..ede294be1e2e26693bd3ead2ccd5e6a6c8a075bc 100644 --- a/paddle/fluid/platform/float16_test.cc +++ b/paddle/fluid/platform/float16_test.cc @@ -13,8 +13,8 @@ limitations under the License. */ #include #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/platform/init.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/platform/init.cc similarity index 96% rename from paddle/fluid/framework/init.cc rename to paddle/fluid/platform/init.cc index a1094976f6c0965ac0a601d7e37575969146fdab..0b776528414735e8a7c1e3763e7ccb662bb9f285 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/platform/init.cc @@ -16,10 +16,10 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/platform/cpu_helper.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/string/piece.h" @@ -115,7 +115,7 @@ void InitDevices(bool init_p2p, const std::vector devices) { places.emplace_back(platform::CPUPlace()); platform::DeviceContextPool::Init(places); #ifndef PADDLE_WITH_MKLDNN - operators::math::SetNumThreads(1); + platform::SetNumThreads(1); #endif } diff --git a/paddle/fluid/framework/init.h b/paddle/fluid/platform/init.h similarity index 100% rename from paddle/fluid/framework/init.h rename to paddle/fluid/platform/init.h diff --git a/paddle/fluid/framework/init_test.cc b/paddle/fluid/platform/init_test.cc similarity index 96% rename from paddle/fluid/framework/init_test.cc rename to paddle/fluid/platform/init_test.cc index 928e2d14abea604cf483f4bc1e1c58fbae04dd21..eef1470a90c7da15efff965fc8f66dfa616ba25f 100644 --- a/paddle/fluid/framework/init_test.cc +++ b/paddle/fluid/platform/init_test.cc @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/init.h" TEST(InitDevices, CPU) { using paddle::framework::InitDevices; diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 4fef351c2118e43697606c90a616cd870e78cd77..89ca4f781273e99bbb83216c238dfc5c88c0a22b 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -2,13 +2,13 @@ if(WITH_PYTHON) if(WITH_AMD_GPU) hip_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc - DEPS pybind python proto_desc memory executor prune init profiler feed_fetch_method + DEPS pybind python proto_desc memory executor prune profiler feed_fetch_method parallel_executor ${GLOB_OP_LIB}) else() cc_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc - DEPS pybind python proto_desc memory executor prune init profiler feed_fetch_method + DEPS pybind python proto_desc memory executor prune profiler feed_fetch_method parallel_executor ${GLOB_OP_LIB}) if(NOT APPLE AND NOT ANDROID) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index aea08172426a4cc169e90ca519cd23aa4eb5dd1d..56bc0f7d7a7b29acbca98c620f2f9886e81a2ee8 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -25,7 +25,6 @@ limitations under the License. */ #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/framework/framework.pb.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor_array.h" @@ -37,6 +36,7 @@ limitations under the License. */ #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/pybind/const_value.h" @@ -297,7 +297,7 @@ All parameter, weight, gradient are variables in Paddle. py::return_value_policy::reference); py::class_(m, "Reader", "") - .def("reset", &framework::ReaderHolder::ReInit); + .def("reset", &framework::ReaderHolder::ResetAll); using LoDTensorBlockingQueue = ::paddle::operators::reader::LoDTensorBlockingQueue; diff --git a/paddle/fluid/string/printf.h b/paddle/fluid/string/printf.h index 062095a1c3e977c0bcc89346ead765acb023bcf7..47de23377398423dabf3b0ed5b670e564f57cdfb 100644 --- a/paddle/fluid/string/printf.h +++ b/paddle/fluid/string/printf.h @@ -83,6 +83,13 @@ void Fprintf(std::ostream& out, const char* fmt, const Args&... args) { tinyformat::vformat(out, fmt, tinyformat::makeFormatList(args...)); } +template +std::string Sprintf(const Args&... args) { + std::ostringstream oss; + Fprintf(oss, ""); + return oss.str(); +} + template std::string Sprintf(const char* fmt, const Args&... args) { std::ostringstream oss; diff --git a/paddle/fluid/string/printf_test.cc b/paddle/fluid/string/printf_test.cc index 678029f93534ab374bd29083f8991d632ccdd5a1..544b12ef3a877a6e84c136433799301edaa4abdf 100644 --- a/paddle/fluid/string/printf_test.cc +++ b/paddle/fluid/string/printf_test.cc @@ -27,4 +27,5 @@ TEST(StringPrintf, StringPrintf) { EXPECT_EQ(std::string("Wednesday, July 27, 14:44"), paddle::string::Sprintf("%s, %s %d, %.2d:%.2d", weekday, month, day, hour, min)); + EXPECT_EQ(std::string(""), paddle::string::Sprintf()); } diff --git a/paddle/fluid/train/demo/demo_trainer.cc b/paddle/fluid/train/demo/demo_trainer.cc index 813d8386868558bd62a9d5670d540ddeddb2b77d..4425f062efa6eab552caee1a429746528cd66926 100644 --- a/paddle/fluid/train/demo/demo_trainer.cc +++ b/paddle/fluid/train/demo/demo_trainer.cc @@ -15,11 +15,11 @@ #include #include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" namespace paddle { diff --git a/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp index 637686e443ceb740d52d42524246ae48a85d52f0..3ef0dfbfe2e5842918500a3b0706c1a55024ce46 100644 --- a/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp @@ -532,6 +532,7 @@ void TrainerThread::computeThread() { break; } } + hl_fini(); } void TrainerThread::prefetch() { @@ -651,6 +652,7 @@ void TrainerThread::copyGradToBufferThread() { } partnerThread->notifyGradientCollect(pid); } + hl_fini(); } void TrainerThread::gradCollectThread() { @@ -693,6 +695,7 @@ void TrainerThread::gradCollectThread() { notifyCopyGradToBuffer(pid); } } + hl_fini(); } void TrainerThread::doCallback(int pid) { @@ -741,6 +744,7 @@ void TrainerThread::valueDispatchThread() { thread->notifyValueReady(pid); } + hl_fini(); } void TrainerThread::notifyValueReady(int paramId) { diff --git a/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp b/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp index 450514ca88a319b30ca3ebae669c78502087540a..33d24b5b832fe9011591606860e0f50361367790 100644 --- a/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp +++ b/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp @@ -197,6 +197,7 @@ void ParallelThread::computeThread() { job_work.layer_->markAllInputGrad(); } } + hl_fini(); } void ParallelThread::start() { diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index d8f0b76b7ba0fedfe411aa86f6f8a0c77a02beca..d173b41e86f61954954b6a5ea9957d2e172deca0 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -312,6 +312,20 @@ EOF fi } +function assert_api_not_changed() { + mkdir -p ${PADDLE_ROOT}/build/.check_api_workspace + cd ${PADDLE_ROOT}/build/.check_api_workspace + virtualenv .env + source .env/bin/activate + pip install ${PADDLE_ROOT}/build/python/dist/*whl + curl ${PADDLE_API_SPEC_URL:-https://raw.githubusercontent.com/PaddlePaddle/FluidAPISpec/master/API.spec} \ + > origin.spec + python ${PADDLE_ROOT}/tools/print_signatures.py paddle.fluid > new.spec + python ${PADDLE_ROOT}/tools/diff_api.py origin.spec new.spec + deactivate +} + + function single_test() { TEST_NAME=$1 if [ -z "${TEST_NAME}" ]; then @@ -550,6 +564,7 @@ function main() { cicheck) cmake_gen ${PYTHON_ABI:-""} build + assert_api_not_changed run_test gen_capi_package gen_fluid_inference_lib diff --git a/paddle/testing/CMakeLists.txt b/paddle/testing/CMakeLists.txt index a1f446817e0cbc1b4391398a82b0846d01bbec2c..22644818994134d4797edfae8d156a005c103d52 100644 --- a/paddle/testing/CMakeLists.txt +++ b/paddle/testing/CMakeLists.txt @@ -6,6 +6,6 @@ if(WITH_TESTING) add_library(paddle_test_util STATIC TestUtil.cpp) add_dependencies(paddle_test_util paddle_proto ${external_project_dependencies}) if(NOT MOBILE_INFERENCE) - cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS init memory gtest gflags) + cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS device_context memory gtest gflags) endif() endif() diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index 555be3d00e2dc467eec45210cc997779827ed69f..cfea2059c3ce20fb44732d990e9708ad6f8d81a1 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -16,8 +16,8 @@ limitations under the License. */ #include "gflags/gflags.h" #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/platform/init.h" int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 93cd6b621ac860bad37ad5ccfb733c53d07f8d2b..ea3117e02bd993b06de39725b2c3296031065e3c 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -26,7 +26,7 @@ except ImportError, e: """NOTE: You may need to run \"export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH\" if you encounters \"libmkldnn.so not found\" errors. If you have python installed in other directory, replace \"/usr/local/lib\" with your own - directory. The original error is: """ % str(e)) + directory. The original error is: \n""" + e.message) except Exception, e: raise e import unique_name diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index c66289eaecc99d6efa31a5b4c552b3f733ad1c65..99e23f8a7f8d2aefd925baaa4a440c797c0ef886 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -376,9 +376,6 @@ def open_recordio_file(filename, if pass_num > 1: main_prog_var = multi_pass(reader=main_prog_var, pass_num=pass_num) - if for_parallel: - main_prog_var = parallel(reader=main_prog_var) - return monkey_patch_reader_methods(main_prog_var) @@ -610,9 +607,6 @@ def open_files(filenames, main_prog_reader = multi_pass( reader=main_prog_reader, pass_num=pass_num) - if for_parallel: - main_prog_reader = parallel(reader=main_prog_reader) - return monkey_patch_reader_methods(main_prog_reader) @@ -728,11 +722,6 @@ def multi_pass(reader, pass_num): 'create_multi_pass_reader', reader, {'pass_num': int(pass_num)}) -def parallel(reader): - return __create_shared_decorated_reader__('create_threaded_reader', reader, - {}) - - def read_file(reader): """ Execute the given reader and get data via it. diff --git a/python/paddle/fluid/tests/test_mnist_if_else_op.py b/python/paddle/fluid/tests/test_if_else_op.py similarity index 66% rename from python/paddle/fluid/tests/test_mnist_if_else_op.py rename to python/paddle/fluid/tests/test_if_else_op.py index d34f52db5ffc889f17513d034ad2c99f696b0cdf..1b58925599de62510ea9048f5210bb0b7e49f933 100644 --- a/python/paddle/fluid/tests/test_mnist_if_else_op.py +++ b/python/paddle/fluid/tests/test_if_else_op.py @@ -14,10 +14,11 @@ import paddle import paddle.fluid.layers as layers -from paddle.fluid.framework import Program, program_guard, default_main_program, default_startup_program +from paddle.fluid.framework import Program, program_guard from paddle.fluid.executor import Executor from paddle.fluid.optimizer import MomentumOptimizer import paddle.fluid.core as core +import paddle.fluid as fluid import unittest import numpy as np @@ -31,14 +32,13 @@ class TestMNISTIfElseOp(unittest.TestCase): label = layers.data(name='y', shape=[1], dtype='int64') - limit = layers.fill_constant_batch_size_like( - input=label, dtype='int64', shape=[1], value=5.0) + limit = layers.fill_constant(shape=[1], dtype='int64', value=5) cond = layers.less_than(x=label, y=limit) true_image, false_image = layers.split_lod_tensor( input=image, mask=cond) true_out = layers.create_tensor(dtype='float32') - true_cond = layers.ConditionalBlock([true_image]) + true_cond = layers.ConditionalBlock([cond]) with true_cond.block(): hidden = layers.fc(input=true_image, size=100, act='tanh') @@ -46,7 +46,7 @@ class TestMNISTIfElseOp(unittest.TestCase): layers.assign(input=prob, output=true_out) false_out = layers.create_tensor(dtype='float32') - false_cond = layers.ConditionalBlock([false_image]) + false_cond = layers.ConditionalBlock([cond]) with false_cond.block(): hidden = layers.fc(input=false_image, size=200, act='tanh') @@ -64,7 +64,7 @@ class TestMNISTIfElseOp(unittest.TestCase): train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=8192), - batch_size=200) + batch_size=10) place = core.CPUPlace() exe = Executor(place) @@ -94,8 +94,7 @@ class TestMNISTIfElseOp(unittest.TestCase): label = layers.data(name='y', shape=[1], dtype='int64') - limit = layers.fill_constant_batch_size_like( - input=label, dtype='int64', shape=[1], value=5.0) + limit = layers.fill_constant(shape=[1], dtype='int64', value=5) cond = layers.less_than(x=label, y=limit) ie = layers.IfElse(cond) @@ -125,7 +124,7 @@ class TestMNISTIfElseOp(unittest.TestCase): place = core.CPUPlace() exe = Executor(place) - exe.run(kwargs['startup_program']) + exe.run(startup_prog) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): @@ -133,7 +132,7 @@ class TestMNISTIfElseOp(unittest.TestCase): y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = y_data.reshape((y_data.shape[0], 1)) - outs = exe.run(kwargs['main_program'], + outs = exe.run(prog, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss]) @@ -143,6 +142,67 @@ class TestMNISTIfElseOp(unittest.TestCase): self.assertFalse(True) +class TestIfElse(unittest.TestCase): + def set_test_case(self): + # condiction is: self.data < self.cond_value + self.cond_value = 0.5 + self.data = np.random.rand(25, 1).astype(np.float32) + + def compare_ifelse_op_and_numpy(self, place): + self.set_test_case() + + prog = Program() + startup_prog = Program() + with program_guard(prog, startup_prog): + src = layers.data(name='data', shape=[1], dtype='float32') + cond = layers.fill_constant( + [1], dtype='float32', value=self.cond_value) + ifcond = layers.less_than(x=src, y=cond) + ie = layers.IfElse(ifcond) + with ie.true_block(): + true_target = ie.input(src) + ie.output(true_target) + + with ie.false_block(): + false_target = ie.input(src) + ie.output(false_target) + if_out = ie() + out = layers.reduce_sum(if_out) + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + fetch_list = [out] + o1, = exe.run(fluid.default_main_program(), + feed={'data': self.data}, + fetch_list=[out]) + o2 = np.sum(self.data) + self.assertTrue( + np.allclose( + o1, o2, atol=1e-8), + "IfElse result : " + str(o1) + "\n Numpy result :" + str(o2)) + + def test_cpu(self): + self.compare_ifelse_op_and_numpy(fluid.CPUPlace()) + + def test_cuda(self): + if not core.is_compiled_with_cuda(): + return + self.compare_ifelse_op_and_numpy(fluid.CUDAPlace(0)) + + +class TestIfElseTrueBranch(TestIfElse): + def set_test_case(self): + # condiction is: self.data < self.cond_value + self.cond_value = 10. + self.data = np.random.rand(25, 1).astype(np.float32) + + +class TestIfElseFalseBranch(TestIfElse): + def set_test_case(self): + # condiction is: self.data < self.cond_value + self.cond_value = -10. + self.data = np.random.rand(25, 1).astype(np.float32) + + if __name__ == '__main__': - # temp disable if else unittest since it could be buggy. - exit(0) + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_data_balance.py b/python/paddle/fluid/tests/unittests/test_data_balance.py index cffa3329ac556dc77f3cb508b807cbd49bb974f7..6d810920d55ccf069ff408c553069e8f5e590271 100644 --- a/python/paddle/fluid/tests/unittests/test_data_balance.py +++ b/python/paddle/fluid/tests/unittests/test_data_balance.py @@ -103,8 +103,12 @@ class TestDataBalance(unittest.TestCase): exe = fluid.Executor(place) exe.run(startup_prog) + build_strategy = fluid.BuildStrategy() + build_strategy.enable_data_balance = True parallel_exe = fluid.ParallelExecutor( - use_cuda=self.use_cuda, main_program=main_prog) + use_cuda=self.use_cuda, + main_program=main_prog, + build_strategy=build_strategy) if (parallel_exe.device_count > self.batch_size): print("WARNING: Unittest TestDataBalance skipped. \ @@ -145,9 +149,12 @@ class TestDataBalance(unittest.TestCase): place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup_prog) - + build_strategy = fluid.BuildStrategy() + build_strategy.enable_data_balance = True parallel_exe = fluid.ParallelExecutor( - use_cuda=self.use_cuda, main_program=main_prog) + use_cuda=self.use_cuda, + main_program=main_prog, + build_strategy=build_strategy) if (parallel_exe.device_count > self.batch_size): print("WARNING: Unittest TestDataBalance skipped. \ diff --git a/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py index 281068e945e76a42635868d19573498f79fde1f3..026ac2112b2d78644b3315b9cab8019ca27e9714 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py @@ -40,7 +40,6 @@ class TestFakeDequantizeMaxAbsOp(OpTest): self.op_type = "fake_dequantize_max_abs" x = np.random.randn(31, 65).astype("float32") yq, scale = quantize_max_abs(x, self.num_bits) - print 'scale ', scale ydq = dequantize_max_abs(yq, self.num_bits, scale) self.inputs = {'X': yq} diff --git a/python/paddle/fluid/tests/unittests/test_parallel_op.py b/python/paddle/fluid/tests/unittests/test_parallel_op.py index 79bea148f9398152a02d70946cdc5fff1f47ba6b..9ba5f988f317a515b77c0b428da236626419a2c3 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_op.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_op.py @@ -113,7 +113,9 @@ class BaseParallelForTest(unittest.TestCase): generator = callback() # Automatically insert parallel do if use_parallel = True if use_parallel: - places = fluid.layers.get_places() + thread_num = fluid.core.get_cuda_device_count( + ) if use_gpu else 8 + places = fluid.layers.get_places(thread_num) pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl) data = next(generator) diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 05fed72ee6471ba42007b5a9f09f89148ac27a30..53d6ca86a008f798af2854a154cce8b7242d2f35 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -309,10 +309,10 @@ class DistributeTranspiler(object): def get_pserver_program(self, endpoint): """ Get parameter server side program. - + Args: endpoint (str): current parameter server endpoint. - + Returns: Program: the program for current parameter server to run. """ @@ -516,7 +516,7 @@ class DistributeTranspiler(object): endpoint (str): current pserver endpoint. pserver_program (Program): call get_pserver_program first and pass the result here. - + Returns: Program: parameter server side startup program. """ @@ -552,10 +552,10 @@ class DistributeTranspiler(object): op_on_pserver = True new_outputs[key] = pserver_vars[op.output(key)[0]] - # most startup program ops have no inputs - new_inputs = self._get_input_map_from_op(pserver_vars, op) - if op_on_pserver: + # most startup program ops have no inputs + new_inputs = self._get_input_map_from_op(pserver_vars, op) + if op.type in [ "gaussian_random", "fill_constant", "uniform_random" ]: diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py index d32c69d148dfa1633ce344611ca3fe7879a234e9..b8afeae5ebd6ef7948a7c0c2775f419af461da04 100644 --- a/python/paddle/fluid/transpiler/inference_transpiler.py +++ b/python/paddle/fluid/transpiler/inference_transpiler.py @@ -19,7 +19,7 @@ from ..framework import Program from ..executor import global_scope -class InferenceTranspiler: +class InferenceTranspiler(object): ''' Convert the fluid program to optimized inference program. diff --git a/python/paddle/libs/__init__.py b/python/paddle/libs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34d4f4d07ed0d452c1965c5f1f198230571931aa --- /dev/null +++ b/python/paddle/libs/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# used for setup.py.in to store the thirdparty shared libraries diff --git a/python/setup.py.in b/python/setup.py.in index 51380149d0b09224c02050902897f23f53600de2..a0cb39070bf7a89e3ea4cb1d31f54f919d6ff74e 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,14 +1,13 @@ from setuptools import setup, Distribution, Extension import subprocess +import os +import re +import shutil class BinaryDistribution(Distribution): def has_ext_modules(foo): return True -MAJOR = 0 -MINOR = 14 -PATCH = 0 RC = 0 -ISTAGED = False @@ -20,14 +19,47 @@ def git_commit(): git_commit = 'Unknown' return git_commit +def _get_version_detail(idx): + assert idx < 3, "vesion info consists of %(major)d.%(minor)d.%(patch)d, \ + so detail index must less than 3" + + if re.match('@TAG_VERSION_REGEX@', '@PADDLE_VERSION@'): + version_details = '@PADDLE_VERSION@'.split('.') + + if len(version_details) == 3: + return version_details[idx] + + return 0 + +def get_major(): + return int(_get_version_detail(0)) + +def get_minor(): + return int(_get_version_detail(1)) + +def get_patch(): + return str(_get_version_detail(2)) + +def is_taged(): + try: + cmd = ['git', 'describe', '--exact-match', '--tags'] + git_tag = subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0].strip() + except: + return False + + if git_tag.replace('v', '') == '@PADDLE_VERSION@': + return True + else: + return False + def write_version_py(filename='paddle/version.py'): cnt = ''' # THIS FILE IS GENERATED FROM PADDLEPADDLE SETUP.PY # -full_version = '%(major)d.%(minor)d.%(patch)d' +full_version = '%(major)d.%(minor)d.%(patch)s' major = '%(major)d' minor = '%(minor)d' -patch = '%(patch)d' +patch = '%(patch)s' rc = '%(rc)d' istaged = %(istaged)s commit = '%(commit)s' @@ -49,19 +81,20 @@ def mkl(): commit = git_commit() with open(filename, 'w') as f: f.write(cnt % { - 'major': MAJOR, - 'minor': MINOR, - 'patch': PATCH, + 'major': get_major(), + 'minor': get_minor(), + 'patch': get_patch(), 'rc': RC, 'version': '${PADDLE_VERSION}', 'commit': commit, - 'istaged': ISTAGED, + 'istaged': is_taged(), 'with_mkl': '@WITH_MKL@'}) write_version_py(filename='@PADDLE_BINARY_DIR@/python/paddle/version.py') packages=['paddle', + 'paddle.libs', 'paddle.utils', 'paddle.dataset', 'paddle.reader', @@ -113,12 +146,35 @@ package_dir={ } if '${WITH_FLUID_ONLY}'== 'OFF': package_dir['py_paddle']='${PADDLE_BINARY_DIR}/python/py_paddle' - -paddle_rt_lib_dir = 'lib' -paddle_rt_libs = ['${WARPCTC_LIBRARIES}'] -if '${MKL_SHARED_LIBS}'!= '': - paddle_rt_libs += '${MKL_SHARED_LIBS}'.split(';') +# put all thirdparty libraries in paddle.libs +package_data['paddle.libs']=['libwarpctc.so'] +libs_path='${PADDLE_BINARY_DIR}/python/paddle/libs' +shutil.copy('${WARPCTC_LIBRARIES}', libs_path) +if '${WITH_MKL}' == 'ON': + shutil.copy('${MKLML_LIB}', libs_path) + shutil.copy('${MKLML_IOMP_LIB}', libs_path) + package_data['paddle.libs']+=['libmklml_intel.so','libiomp5.so'] +if '${WITH_MKLDNN}' == 'ON': + # change rpath of libmkldnn.so.0, add $ORIGIN/ to it. + # The reason is that all thirdparty libraries in the same directory, + # thus, libmkldnn.so.0 will find libmklml_intel.so and libiomp5.so. + command = "patchelf --set-rpath '$ORIGIN/' ${MKLDNN_SHARED_LIB}" + if os.system(command) != 0: + raise Exception("patchelf --set-rpath for libmkldnn.so.0 fails") + package_data['paddle.libs']+=['libmkldnn.so.0'] + shutil.copy('${MKLDNN_SHARED_LIB}', libs_path) +# remove unused paddle/libs/__init__.py +os.remove(libs_path+'/__init__.py') +package_dir['paddle.libs']=libs_path + +# change rpath of core.so, add $ORIGIN/../libs/ to it. +# The reason is that libwarpctc.so, libiomp5.so etc are in paddle.libs, and +# core.so is in paddle.fluid, thus paddle/fluid/../libs will pointer to above libraries. +# This operation will fix https://github.com/PaddlePaddle/Paddle/issues/3213 +command = "patchelf --set-rpath '$ORIGIN/../libs/' ${PADDLE_BINARY_DIR}/python/paddle/fluid/core.so" +if os.system(command) != 0: + raise Exception("patchelf --set-rpath for core.so fails") setup(name='${PACKAGE_NAME}', version='${PADDLE_VERSION}', @@ -128,6 +184,5 @@ setup(name='${PACKAGE_NAME}', ext_modules=[Extension('_foo', ['stub.cc'])], package_data=package_data, package_dir=package_dir, - scripts=paddle_bins, - data_files=[(paddle_rt_lib_dir, paddle_rt_libs)] + scripts=paddle_bins ) diff --git a/tools/diff_api.py b/tools/diff_api.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9f2c72cb78ddf88ff2a7bb1c0ee4b00ec0ec96 --- /dev/null +++ b/tools/diff_api.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +from __future__ import print_function +import difflib +import sys + +with open(sys.argv[1], 'r') as f: + origin = f.read() + origin = origin.splitlines() + +with open(sys.argv[2], 'r') as f: + new = f.read() + new = new.splitlines() + +differ = difflib.Differ() +result = differ.compare(origin, new) + +error = False +print('API Difference is: ') +for each_diff in result: + if each_diff[0] in ['-', '?']: # delete or change API is not allowed + error = True + elif each_diff[0] == '+': + # only new layers is allowed. + if not each_diff.startswith('+ paddle.fluid.layers.'): + error = True + + if each_diff[0] != ' ': + print(each_diff) + +if error: + sys.exit(1)