diff --git a/CMakeLists.txt b/CMakeLists.txt index c649aafeddaf9f28c213d086236c3779d3137d92..de47086dbd6a440cd413c7843c83b1c69d9841b2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,6 +39,7 @@ option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_F option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF) option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND}) +option(WITH_TENSORRT "Compile PaddlePaddle with TensorRT support." OFF) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) @@ -181,6 +182,11 @@ if(WITH_GPU) include(cuda) endif(WITH_GPU) +# TensorRT depends on GPU. +if (NOT WITH_GPU) + set(WITH_TENSORRT OFF) +endif() + if(WITH_AMD_GPU) find_package(HIP) include(hip) diff --git a/Dockerfile b/Dockerfile index fbec88c7966d6ea93495519843d6cda63f622661..9097bb657d2366997112ec7662762a93358aa647 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ # A image for building paddle binaries # Use cuda devel base image for both cpu and gpu environment -FROM nvidia/cuda:8.0-cudnn5-devel-ubuntu16.04 +FROM nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04 MAINTAINER PaddlePaddle Authors ARG UBUNTU_MIRROR @@ -45,6 +45,13 @@ ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin # install glide RUN curl -s -q https://glide.sh/get | sh +# Install TensorRT +# The unnecessary files has been removed to make the library small. +RUN wget -qO- http://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz | \ + tar -xz -C /usr/local && \ + cp -rf /usr/local/TensorRT/include /usr && \ + cp -rf /usr/local/TensorRT/lib /usr + # git credential to skip password typing RUN git config --global credential.helper store @@ -57,7 +64,7 @@ RUN localedef -i en_US -f UTF-8 en_US.UTF-8 # specify sphinx version as 1.5.6 and remove -U option for [pip install -U # sphinx-rtd-theme] since -U option will cause sphinx being updated to newest # version(1.7.1 for now), which causes building documentation failed. -RUN pip install --upgrade pip && \ +RUN pip install --upgrade pip==9.0.3 && \ pip install -U wheel && \ pip install -U docopt PyYAML sphinx==1.5.6 && \ pip install sphinx-rtd-theme==0.1.9 recommonmark diff --git a/benchmark/fluid/mnist.py b/benchmark/fluid/mnist.py index 43866da9cb113e9d49fc1c51f67da94cbc6bfd8e..dc10ac2ec195acc9a5693718141ddb32417dfb71 100644 --- a/benchmark/fluid/mnist.py +++ b/benchmark/fluid/mnist.py @@ -139,9 +139,6 @@ def run_benchmark(model, args): # inference program inference_program = fluid.default_main_program().clone() - with fluid.program_guard(inference_program): - inference_program = fluid.io.get_inference_program( - target_vars=[batch_acc, batch_size_tensor]) # Optimization opt = fluid.optimizer.AdamOptimizer( @@ -161,7 +158,7 @@ def run_benchmark(model, args): train_reader = paddle.batch( paddle.dataset.mnist.train(), batch_size=args.batch_size) - accuracy = fluid.average.WeightedAverage() + accuracy = fluid.metrics.Accuracy() iters, num_samples, start_time = 0, 0, time.time() for pass_id in range(args.pass_num): accuracy.reset() @@ -184,7 +181,7 @@ def run_benchmark(model, args): "label": y_data}, fetch_list=[avg_cost, batch_acc, batch_size_tensor] ) # The accuracy is the accumulation of batches, but not the current batch. - accuracy.add(value=outs[1], weight=outs[2]) + accuracy.update(value=outs[1], weight=outs[2]) iters += 1 num_samples += len(y_data) loss = np.array(outs[0]) diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index 6320b17520a687f88993b6f464d9115838b0f96b..e3b9d94215a858c5c9a34e1b7e97540f1876801d 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -62,29 +62,33 @@ endif() ## Then find the reference-cblas. www.netlib.org/blas/ - - set(REFERENCE_CBLAS_ROOT $ENV{REFERENCE_CBLAS_ROOT} CACHE PATH "Folder contains reference-cblas") -set(REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS - ${REFERENCE_CBLAS_ROOT}/include - /usr/include - /usr/include/cblas -) - -set(REFERENCE_CBLAS_LIB_SEARCH_PATHS - ${REFERENCE_CBLAS_ROOT}/lib - /usr/lib - /usr/lib/blas/reference/ - /usr/lib/reference/ -) +if(NOT CMAKE_CROSSCOMPILING) + set(REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS + ${REFERENCE_CBLAS_ROOT}/include + /usr/include + /usr/include/cblas + ) + + set(REFERENCE_CBLAS_LIB_SEARCH_PATHS + ${REFERENCE_CBLAS_ROOT}/lib + /usr/lib + /usr/lib/blas/reference/ + /usr/lib/reference/ + ) +else() + # Disable the finding of reference cblas under host's system path + set(REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS ${REFERENCE_CBLAS_ROOT}/include) + set(REFERENCE_CBLAS_LIB_SEARCH_PATHS ${REFERENCE_CBLAS_ROOT}/lib) +endif() find_path(REFERENCE_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS}) find_library(REFERENCE_CBLAS_LIBRARY NAMES cblas PATHS ${REFERENCE_CBLAS_LIB_SEARCH_PATHS}) -if (REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY) +if(REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY) set(CBLAS_FOUND ON) set(CBLAS_PROVIDER REFERENCE) set(CBLAS_INC_DIR ${REFERENCE_CBLAS_INCLUDE_DIR}) diff --git a/cmake/external/grpc.cmake b/cmake/external/grpc.cmake index 0853b981813c5d60a12603471df7e0b216b0822f..e90948782bb5e333bbdb47ef9d61c1e37e3cf9e4 100644 --- a/cmake/external/grpc.cmake +++ b/cmake/external/grpc.cmake @@ -24,16 +24,16 @@ SET(GRPC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/grpc) SET(GRPC_INCLUDE_DIR "${GRPC_INSTALL_DIR}/include/" CACHE PATH "grpc include directory." FORCE) SET(GRPC_CPP_PLUGIN "${GRPC_INSTALL_DIR}/bin/grpc_cpp_plugin" CACHE FILEPATH "GRPC_CPP_PLUGIN" FORCE) IF(APPLE) - SET(BUILD_CMD make -n HAS_SYSTEM_PROTOBUF=false -s -j8 static grpc_cpp_plugin | sed "s/-Werror//g" | sh) + SET(BUILD_CMD make -n HAS_SYSTEM_PROTOBUF=false -s -j static grpc_cpp_plugin | sed "s/-Werror//g" | sh) ELSE() - SET(BUILD_CMD make HAS_SYSTEM_PROTOBUF=false -s -j8 static grpc_cpp_plugin) + SET(BUILD_CMD make HAS_SYSTEM_PROTOBUF=false -s -j static grpc_cpp_plugin) ENDIF() ExternalProject_Add( extern_grpc DEPENDS protobuf zlib GIT_REPOSITORY "https://github.com/grpc/grpc.git" - GIT_TAG "v1.8.x" + GIT_TAG "v1.10.x" PREFIX ${GRPC_SOURCES_DIR} UPDATE_COMMAND "" CONFIGURE_COMMAND "" diff --git a/cmake/external/nccl.cmake b/cmake/external/nccl.cmake deleted file mode 100644 index af5c689c3524741a88518eeb3f85996872257677..0000000000000000000000000000000000000000 --- a/cmake/external/nccl.cmake +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if(NOT WITH_GPU) - return() -endif() - -include(ExternalProject) - -set(NCCL_SOURCE_DIR ${THIRD_PARTY_PATH}/nccl) - -include_directories(${NCCL_SOURCE_DIR}/src/extern_nccl/src) - -if(WITH_DSO) - # If we use DSO, we do not build nccl, just download the dependencies - set(NCCL_BUILD_COMMAND "") - set(NCCL_INSTALL_COMMAND "") - set(NCCL_INSTALL_DIR "") -else() - # otherwise, we build nccl and link it. - set(NCCL_INSTALL_DIR ${THIRD_PARTY_PATH}/install/nccl) - # Note: cuda 8.0 is needed to make nccl - # When cuda is not installed on the system directory, need to set CUDA_HOME to your cuda root - set(NCCL_BUILD_COMMAND "make -j 8") - set(NCCL_INSTALL_COMMAND "make install PREFIX=${NCCL_INSTALL_DIR}") -endif() - -ExternalProject_Add( - extern_nccl - ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY "https://github.com/NVIDIA/nccl.git" - GIT_TAG "v1.3.4-1" - PREFIX "${NCCL_SOURCE_DIR}" - UPDATE_COMMAND "" - CONFIGURE_COMMAND "" - BUILD_COMMAND "${NCCL_BUILD_COMMAND}" - INSTALL_COMMAND "${NCCL_INSTALL_COMMAND}" - INSTALL_DIR "${NCCL_INSTALL_DIR}" - TEST_COMMAND "" -) - -if(WITH_DSO) - if(${CMAKE_VERSION} VERSION_LESS "3.3.0") - set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/lib_nccl_dummy.c) - file(WRITE ${dummyfile} "const char * dummy_nccl = \"${dummyfile}\";") - add_library(nccl STATIC ${dummyfile}) - else() - add_library(nccl INTERFACE) - endif() -else() - add_library(nccl STATIC IMPORTED GLOBAL) - set_property(TARGET nccl PROPERTY IMPORTED_LOCATION - ${NCCL_INSTALL_DIR}/lib/libnccl_static.a) -endif() - -add_dependencies(nccl extern_nccl) diff --git a/cmake/external/snappy.cmake b/cmake/external/snappy.cmake index 71f54c425d4c38e271a8f1b78887d95a27252443..80282329c6ac65fbd1493a6838efca4bd9cadaad 100644 --- a/cmake/external/snappy.cmake +++ b/cmake/external/snappy.cmake @@ -11,19 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -IF(MOBILE_INFERENCE) +if(MOBILE_INFERENCE OR RPI) return() -ENDIF() +endif() include (ExternalProject) # NOTE: snappy is needed when linking with recordio -SET(SNAPPY_SOURCES_DIR ${THIRD_PARTY_PATH}/snappy) -SET(SNAPPY_INSTALL_DIR ${THIRD_PARTY_PATH}/install/snappy) -SET(SNAPPY_INCLUDE_DIR "${SNAPPY_INSTALL_DIR}/include/" CACHE PATH "snappy include directory." FORCE) +set(SNAPPY_SOURCES_DIR ${THIRD_PARTY_PATH}/snappy) +set(SNAPPY_INSTALL_DIR ${THIRD_PARTY_PATH}/install/snappy) +set(SNAPPY_INCLUDE_DIR "${SNAPPY_INSTALL_DIR}/include" CACHE PATH "snappy include directory." FORCE) + +set(SNAPPY_LIBRARIES "${SNAPPY_INSTALL_DIR}/lib/libsnappy.a") ExternalProject_Add( extern_snappy @@ -51,8 +52,7 @@ ExternalProject_Add( ) add_library(snappy STATIC IMPORTED GLOBAL) -set_property(TARGET snappy PROPERTY IMPORTED_LOCATION - "${SNAPPY_INSTALL_DIR}/lib/libsnappy.a") +set_property(TARGET snappy PROPERTY IMPORTED_LOCATION ${SNAPPY_LIBRARIES}) include_directories(${SNAPPY_INCLUDE_DIR}) add_dependencies(snappy extern_snappy) diff --git a/cmake/external/snappystream.cmake b/cmake/external/snappystream.cmake index 8f7a3bf8eeaef75c8840f4ea318b484d33249bb7..20a96430823d07a07d4bb4602e7fc0cfe55c3bf2 100644 --- a/cmake/external/snappystream.cmake +++ b/cmake/external/snappystream.cmake @@ -11,9 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -IF(MOBILE_INFERENCE) +IF(MOBILE_INFERENCE OR RPI) return() ENDIF() @@ -21,9 +20,11 @@ include (ExternalProject) # NOTE: snappy is needed when linking with recordio -SET(SNAPPYSTREAM_SOURCES_DIR ${THIRD_PARTY_PATH}/snappy_stream) -SET(SNAPPYSTREAM_INSTALL_DIR ${THIRD_PARTY_PATH}/install/snappy_stream) -SET(SNAPPYSTREAM_INCLUDE_DIR "${SNAPPYSTREAM_INSTALL_DIR}/include/" CACHE PATH "snappy stream include directory." FORCE) +set(SNAPPYSTREAM_SOURCES_DIR ${THIRD_PARTY_PATH}/snappy_stream) +set(SNAPPYSTREAM_INSTALL_DIR ${THIRD_PARTY_PATH}/install/snappy_stream) +set(SNAPPYSTREAM_INCLUDE_DIR "${SNAPPYSTREAM_INSTALL_DIR}/include" CACHE PATH "snappy stream include directory." FORCE) + +set(SNAPPYSTREAM_LIBRARIES "${SNAPPYSTREAM_INSTALL_DIR}/lib/libsnappystream.a") ExternalProject_Add( extern_snappystream @@ -51,8 +52,7 @@ ExternalProject_Add( ) add_library(snappystream STATIC IMPORTED GLOBAL) -set_property(TARGET snappystream PROPERTY IMPORTED_LOCATION - "${SNAPPYSTREAM_INSTALL_DIR}/lib/libsnappystream.a") +set_property(TARGET snappystream PROPERTY IMPORTED_LOCATION ${SNAPPYSTREAM_LIBRARIES}) include_directories(${SNAPPYSTREAM_INCLUDE_DIR}) # For snappysteam to include its own headers. include_directories(${THIRD_PARTY_PATH}/install) # For Paddle to include snappy stream headers. diff --git a/cmake/generic.cmake b/cmake/generic.cmake index c4c9f77df8d57fe162616d2250bd4dfe5b7754e7..1d3e2ade6d393c6e4c37eea0dc1064cdb18808a5 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -195,14 +195,7 @@ function(cc_library TARGET_NAME) list(REMOVE_ITEM cc_library_DEPS warpctc) add_dependencies(${TARGET_NAME} warpctc) endif() - if("${cc_library_DEPS}" MATCHES "ARCHIVE_START") - # Support linking flags: --whole-archive (Linux) / -force_load (MacOS). - # WARNING: Please don't use ARCHIVE_START&ARCHIVE_END if TARGET_NAME will be linked by other libraries. - target_circle_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) - list(REMOVE_ITEM cc_library_DEPS ARCHIVE_START ARCHIVE_END) - else() - target_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) - endif() + target_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) add_dependencies(${TARGET_NAME} ${cc_library_DEPS}) endif() @@ -243,11 +236,7 @@ function(cc_test TARGET_NAME) set(multiValueArgs SRCS DEPS ARGS) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_executable(${TARGET_NAME} ${cc_test_SRCS}) - # Support linking flags: --whole-archive (Linux) / -force_load (MacOS) - target_circle_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) - if("${cc_test_DEPS}" MATCHES "ARCHIVE_START") - list(REMOVE_ITEM cc_test_DEPS ARCHIVE_START ARCHIVE_END) - endif() + target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME} ${cc_test_ARGS} diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index 0323cd9698cba916d2aa04403be97c0a6a463830..cc758019827b9a5416a801e4da43d754d4492a73 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -1,7 +1,22 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set_property(GLOBAL PROPERTY FLUID_MODULES "") # find all fluid modules is used for paddle fluid static library function(find_fluid_modules TARGET_NAME) get_filename_component(__target_path ${TARGET_NAME} ABSOLUTE) + string(REGEX REPLACE "^${PADDLE_SOURCE_DIR}/" "" __target_path ${__target_path}) string(FIND "${__target_path}" "fluid" pos) if(pos GREATER 1) get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) @@ -77,6 +92,23 @@ elseif (WITH_MKLML) ) endif() +if(NOT MOBILE_INFERENCE AND NOT RPI) + set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/snappy") + copy(snappy_lib + SRCS ${SNAPPY_INCLUDE_DIR} ${SNAPPY_LIBRARIES} + DSTS ${dst_dir} ${dst_dir}/lib) + + set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/snappystream") + copy(snappystream_lib + SRCS ${SNAPPYSTREAM_INCLUDE_DIR} ${SNAPPYSTREAM_LIBRARIES} + DSTS ${dst_dir} ${dst_dir}/lib) + + set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/zlib") + copy(zlib_lib + SRCS ${ZLIB_INCLUDE_DIR} ${ZLIB_LIBRARIES} + DSTS ${dst_dir} ${dst_dir}/lib) +endif() + # paddle fluid module set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid") set(dst_dir "${CMAKE_INSTALL_PREFIX}/paddle/fluid") diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index 7066637a7cb27b83724cb4030c29a1019981f52b..0f9521616952a2857222feab8c38fb480761ee2d 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -3,7 +3,9 @@ add_custom_target(paddle_apis ALL add_custom_target(paddle_docs ALL DEPENDS paddle_v2_docs paddle_v2_docs_cn - paddle_fluid_docs paddle_fluid_docs_cn) + paddle_fluid_docs paddle_fluid_docs_cn + paddle_mobile_docs paddle_mobile_docs_cn) add_subdirectory(v2) add_subdirectory(fluid) +add_subdirectory(mobile) diff --git a/doc/fluid/api/layers.rst b/doc/fluid/api/layers.rst index 22e6fb13d7320986a60bc1ef5530187e0970c767..5c02886efd7d11e9520910526fb90ec01e123bae 100644 --- a/doc/fluid/api/layers.rst +++ b/doc/fluid/api/layers.rst @@ -473,6 +473,12 @@ multiplex .. autofunction:: paddle.fluid.layers.multiplex :noindex: +label_smooth +------------ + +.. autofunction:: paddle.fluid.layers.label_smooth + :noindex: + ops === diff --git a/doc/fluid/design/concepts/parallel_executor.md b/doc/fluid/design/concepts/parallel_executor.md index 9aed3b059a1595ba3971d7d5acfc0d16a731584b..4f88e27bed722e9f2f535e368926fe49b4e72e56 100644 --- a/doc/fluid/design/concepts/parallel_executor.md +++ b/doc/fluid/design/concepts/parallel_executor.md @@ -84,7 +84,7 @@ Running an operator can be asynchronized. There is a thread pool to execute an ` ## Synchronize GPU Kernels -The GPU is a non-blocking device. The different streams need be synchronized when switing streams. In current implementation, the synchronization based on the following algorithm: +The GPU is a non-blocking device. The different streams need be synchronized when switching streams. In current implementation, the synchronization based on the following algorithm: 1. `OpHandle` will record `DeviceContext` that it is used. 2. In `OpHandle::Run`, if the `DeviceContext` of current operator is different from `DeviceContext` of any input variable, just wait the generate operator of this input variable. diff --git a/doc/fluid/design/dist_train/README.md b/doc/fluid/design/dist_train/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2dd652d8bdcb8f3b6e759347bd55b217be909386 --- /dev/null +++ b/doc/fluid/design/dist_train/README.md @@ -0,0 +1,57 @@ +## Distributed training overview doc + +Currently Paddle Fluid use parameter server architecture to support distributed training. + +For synchronous and asynchronous training, the differences are mostly in the logic of parameter server. Now we have already support synchronous training. + +### Synchronous training + +The training process of synchronous training is: + +![synchronous distributed training](./src/sync_distributed_training.png) + +1. Pserver + 1. set `barrier_condition_` to 0 and waits for trainers to send gradient. +1. Trainer + 1. Trainer read minibatch of data, run forward-backward with local parameter copy and get the gradients for parameters. + 1. Trainer use split op to split all the gradient into blocks. The split method is determined at compile time. + 1. Trainer use send_op to send all the split gradients to corresponding parameter server. + 1. After trainer send all the gradients, it will send a `BATCH_BARRIER_MESSAGE` to all pservers. + 1. Trainer call GetVariable to pserver and wait for `barrier_condition_` on pserver to be 1. +1. Pserver + 1. Pserver will count the number of `BATCH_BARRIER_MESSAGE`. + 1. When the count of `BATCH_BARRIER_MESSAGE` is equal to the number of Trainer. Pserver thinks it received all gradient from all trainers. + 1. Pserver will run the optimization block to optimize the parameters. + 1. After optimization, pserver set `barrier_condition_` to 1. + 1. Pserver wait for `FETCH_BARRIER_MESSAGE`. +1. Trainer. + 1. The trainer uses GetVariable to get all the parameters from pserver. + 1. Trainer sends a `FETCH_BARRIER_MESSAGE` to each pserver. +1. Pserver. + 1. when the number of `FETCH_BARRIER_MESSAGE` reach the number of all trainers. Pserver think all the parameters have been got. it will go back to 1. to set `barrier_condition_` to 0. + +### Asynchronous training +In the above process. There are two barriers for all trainers to synchronize with each other. In asynchronous training, these two barriers are not needed. The trainer can just send gradients to pserver and then get parameters back. + +The training process of asynchronous training can be: + +![asynchronous distributed training](./src/async_distributed_training.png) + +1. Pserver: + 1. Each parameter has a queue to receive its gradient from trainers. + 1. Each parameter has a thread to read data from the queue and run optimize block, using the gradient to optimize the parameter. + 1. Using an independent thread to handle RPC call `GetVariable` for trainers to get parameters back.(Maybe here we should use a thread pool to speed up fetching the parameters.) + +1. Trainer: + 1. Trainer read a batch of data. Run forward and backward with local parameter copy and get the gradients for parameters. + 1. Trainer split all gradients to blocks and then send these gradient blocks to pservers(pserver will put them into the queue). + 2. Trainer gets all parameters back from pserver. + +### Note: +There are also some conditions that need to consider. For exmaple: + +1. If trainer needs to wait for the pserver to apply it's gradient and then get back the parameters back. +1. If we need a lock between parameter update and parameter fetch. +1. If one parameter must be on one server, or it can also be split and send to multiple parameter servers. + +The above architecture of asynchronous training can support different mode, we can have a detailed test in the future for these problems. diff --git a/doc/fluid/design/dist_train/async_update.md b/doc/fluid/design/dist_train/async_update.md new file mode 100644 index 0000000000000000000000000000000000000000..6a0835b761b69030ba30697e6e8863928efbf57f --- /dev/null +++ b/doc/fluid/design/dist_train/async_update.md @@ -0,0 +1,58 @@ +# Design Doc: Asynchronous Update With Distributed Training + +## Background + +For the typical synchronous distributed training, some significant steps are as follows: + +1. A Trainer will compute the gradients and SEND them to the Parameter Server(PServer) nodes. +1. After the PServer node received gradients came from all the Trainers, It will aggregate the +gradient variables for the same parameter into one gradient variable and then apply the aggregated +gradient to the respective parameter, finally using an optimize algorithms(SGD, Monument...) +to update the parameters. +1. The Trainer would wait for the PServers finished the optimize stage, and GET the parameters from PServer, +so all the Trainers would get the same parameters. + +In the synchronously distributed training, there should be a `Barrier` to synchronise the +parameters after the optimizing stage. The performance of a distributed training job would +depend on the slowest node if there were hundreds or thousands of training nodes in a +Job, the performance of synchronously distributed training might be very poor because of +the slow node. So this design doc would introduce an approach to implement +*asynchronously* distributed training in PaddlePaddle Fluid. + +## Design + + + +As the figure above, we describe a global view of asynchronously update process and use +the parameter `w1` as an example to introduce the steps: +1. For each gradient variables, they may distribute on different GPU card and aggregate +them while they are all calculated. +1. Split the gradient variable into multiple blocks according to the number of PServer +instances and then send them. +1. PServer would run an `Optimize Block` using a specified optimize algorithm to update +the specified parameter. +1. The trainer will fetch latest parameter from PServer before running forward Op which depends +on the specified parameter. +1. Broadcast the received variable into multiple GPU cards and continue to run the next +mini-batch. + +### Trainer + +- For the multiple devices distributed training, we need to aggregate the gradient +variables which placed on different devices firstly and then schedule a `SendVars` Operator to +send the gradient variables to the multiple PServer instances. +- Schedule `FetchVars` operator to fetch the latest parameter from PServer before running +the forward ops. +- There could be a large number of gradient variables to be sent, so we need to use another +thread pool(IO Threadpool) whose a number of the schedulable threads is larger than the +computing thread pool to avoid competitive the thread resources with computing. + +### Parameter Server + + + +- There should be multiple trainer instances want to optimize the same parameter at +the same time, to avoid the racing, we need one `BlockingQueue` for each gradient +variable to process them one by one. +- We need a `Map` structure to map a gradient variable name to the `OptimizeBlock` which +can optimize the respective parameter. diff --git a/doc/fluid/design/dist_train/src/async_distributed_training.png b/doc/fluid/design/dist_train/src/async_distributed_training.png new file mode 100644 index 0000000000000000000000000000000000000000..3b53ab59c0cd7b44b2956f16f1adc47fe85909d3 Binary files /dev/null and b/doc/fluid/design/dist_train/src/async_distributed_training.png differ diff --git a/doc/fluid/design/dist_train/src/async_pserver.graffle b/doc/fluid/design/dist_train/src/async_pserver.graffle new file mode 100644 index 0000000000000000000000000000000000000000..d2301611774fcb3866473e3e6470568d1e1312cf Binary files /dev/null and b/doc/fluid/design/dist_train/src/async_pserver.graffle differ diff --git a/doc/fluid/design/dist_train/src/async_pserver.png b/doc/fluid/design/dist_train/src/async_pserver.png new file mode 100644 index 0000000000000000000000000000000000000000..7d900b0c0eb291c67537b9cf93227c671bafdc73 Binary files /dev/null and b/doc/fluid/design/dist_train/src/async_pserver.png differ diff --git a/doc/fluid/design/dist_train/src/async_update.graffle b/doc/fluid/design/dist_train/src/async_update.graffle new file mode 100644 index 0000000000000000000000000000000000000000..3a631888688a0d564a873fcb16d943958c91223e Binary files /dev/null and b/doc/fluid/design/dist_train/src/async_update.graffle differ diff --git a/doc/fluid/design/dist_train/src/async_update.png b/doc/fluid/design/dist_train/src/async_update.png new file mode 100644 index 0000000000000000000000000000000000000000..3e8db973f45d6d9ac8dcce1dc7878067e79e6dcc Binary files /dev/null and b/doc/fluid/design/dist_train/src/async_update.png differ diff --git a/doc/fluid/design/dist_train/src/distributed_training.graffle b/doc/fluid/design/dist_train/src/distributed_training.graffle new file mode 100644 index 0000000000000000000000000000000000000000..1168801bc1fadfce310a74cb3110695bd1629f6b Binary files /dev/null and b/doc/fluid/design/dist_train/src/distributed_training.graffle differ diff --git a/doc/fluid/design/dist_train/src/sync_distributed_training.png b/doc/fluid/design/dist_train/src/sync_distributed_training.png new file mode 100644 index 0000000000000000000000000000000000000000..e4f9a221fea4b7238e8a1d84e609c0371f6ef7a2 Binary files /dev/null and b/doc/fluid/design/dist_train/src/sync_distributed_training.png differ diff --git a/doc/fluid/design/motivation/fluid.md b/doc/fluid/design/motivation/fluid.md index 5e147f8263e685a4665b5793f7127178cbc3cfdd..4b7696cc1bbf57ace72c4d31ffc2bfe6c1071939 100644 --- a/doc/fluid/design/motivation/fluid.md +++ b/doc/fluid/design/motivation/fluid.md @@ -119,7 +119,7 @@ An actual Fluid example is described [here](https://github.com/PaddlePaddle/Pad From the example, the Fluid programs look very similar to their PyTorch equivalent programs, except that Fluid's loop structure, wrapped with Python's `with` statement, could run much faster than just a Python loop. -We have more examples of the [`if-then-else`](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/if_else_op.md) structure of Fluid. +We have more examples of the [`if-then-else`](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/execution/if_else_op.md) structure of Fluid. ## Turing Completeness diff --git a/doc/fluid/dev/index_cn.rst b/doc/fluid/dev/index_cn.rst index b123b756e2251c38f319e1aefa2cb04fd7a36b03..ad798003f560e7fb0e6db6083fdd152fd3417584 100644 --- a/doc/fluid/dev/index_cn.rst +++ b/doc/fluid/dev/index_cn.rst @@ -4,6 +4,7 @@ .. toctree:: :maxdepth: 1 + api_doc_std_cn.md new_op_cn.md new_op_kernel.md use_eigen_cn.md diff --git a/doc/fluid/dev/index_en.rst b/doc/fluid/dev/index_en.rst index 98988fc22dcedecdbcd67fb3bf761377bf046337..80c899a82fa452c5cd8f38dad89c15d3041b09e3 100644 --- a/doc/fluid/dev/index_en.rst +++ b/doc/fluid/dev/index_en.rst @@ -4,6 +4,7 @@ Development .. toctree:: :maxdepth: 1 + api_doc_std_en.md new_op_en.md new_op_kernel.md use_eigen_en.md diff --git a/doc/mobile/CMakeLists.txt b/doc/mobile/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..b104a6318d474d6531670b8ac3569448774850c7 --- /dev/null +++ b/doc/mobile/CMakeLists.txt @@ -0,0 +1,53 @@ +if(NOT DEFINED SPHINX_THEME) + set(SPHINX_THEME default) +endif() + +if(NOT DEFINED SPHINX_THEME_DIR) + set(SPHINX_THEME_DIR) +endif() + +# configured documentation tools and intermediate build results +set(BINARY_BUILD_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_build") + +# Sphinx cache with pickled ReST documents +set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") + +# HTML output director +set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") + +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/../templates/conf.py.en.in" + "${BINARY_BUILD_DIR_EN}/conf.py" + @ONLY) + +sphinx_add_target(paddle_mobile_docs + html + ${BINARY_BUILD_DIR_EN} + ${SPHINX_CACHE_DIR_EN} + ${CMAKE_CURRENT_SOURCE_DIR} + ${SPHINX_HTML_DIR_EN}) + +add_dependencies(paddle_mobile_docs gen_proto_py paddle_python) + +# configured documentation tools and intermediate build results +set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") + +# Sphinx cache with pickled ReST documents +set(SPHINX_CACHE_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_doctrees") + +# HTML output director +set(SPHINX_HTML_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/html") + +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/../templates/conf.py.cn.in" + "${BINARY_BUILD_DIR_CN}/conf.py" + @ONLY) + +sphinx_add_target(paddle_mobile_docs_cn + html + ${BINARY_BUILD_DIR_CN} + ${SPHINX_CACHE_DIR_CN} + ${CMAKE_CURRENT_SOURCE_DIR} + ${SPHINX_HTML_DIR_CN}) + +add_dependencies(paddle_mobile_docs_cn gen_proto_py paddle_python) diff --git a/doc/mobile/index_cn.rst b/doc/mobile/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..8297316e8fbb2b8f41954030293feadbcd81295e --- /dev/null +++ b/doc/mobile/index_cn.rst @@ -0,0 +1,9 @@ +移动端 +===== + +.. toctree:: + :maxdepth: 1 + + cross_compiling_for_android_cn.md + cross_compiling_for_ios_cn.md + cross_compiling_for_raspberry_cn.md \ No newline at end of file diff --git a/doc/mobile/index_en.rst b/doc/mobile/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..e0acdff0284e3bc84b2cc4a34a142ee01754f940 --- /dev/null +++ b/doc/mobile/index_en.rst @@ -0,0 +1,9 @@ +Mobile +====== + +.. toctree:: + :maxdepth: 1 + + cross_compiling_for_android_en.md + cross_compiling_for_ios_en.md + cross_compiling_for_raspberry_en.md diff --git a/doc/v2/dev/write_docs_cn.rst b/doc/v2/dev/write_docs_cn.rst index 23615f8830e99633676c83ec5d28139a732c623c..4231f2bb5cd800c0cd86835b5d07e491fcde4989 100644 --- a/doc/v2/dev/write_docs_cn.rst +++ b/doc/v2/dev/write_docs_cn.rst @@ -65,39 +65,55 @@ PaddlePaddle.org工具可以配合Docker使用,需要在系统里先安装好D 不使用PaddlePaddle.org工具 -------------------------- -使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 `_ 。安装好Docker之后可以使用源码目录下的脚本构建文档,即 +使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 `_ 。该方法与 `从源码编译PaddlePaddle `_ 相似,通过从源码中构建可用于编译PaddlePaddle文档的Docker镜像并运行,在进入Docker容器后使用源码中的脚本构建PaddlePaddle文档,具体步骤如下: -[TBD] +.. code-block:: bash + + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + + # 从源码中构建可用于编译PaddlePaddle文档的Docker镜像 + docker build -t paddle:dev . + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" -e "WITH_DOC=ON" paddle:dev /bin/bash + + # 进入Docker容器后使用build.sh脚本构建PaddlePaddle文档 + bash -x /paddle/paddle/scripts/docker/build.sh + +注:上述命令把当前目录(源码根目录)映射为 container 里的 :code:`/paddle` 目录。 + +编译完成后,会产生 ``doc/v2`` 和 ``doc/fluid`` 两个目录,在这两个目录下分别都生成 ``cn/html/`` 、 ``en/html`` 、 ``api/en/html`` 共三个子目录,分别进入这些目录下,执行以下命令: + +.. code-block:: bash + + python -m SimpleHTTPServer 8088 + +在浏览器中输入 http://localhost:8088 就可以看到编译生成的 ``v2`` 和 ``fluid`` 两种版本的中/英文的文档页面和英文的API页面。 如果不想使用Docker,也可以使用以下命令直接构建PaddlePaddle文档,即 .. code-block:: bash - mkdir paddle - cd paddle git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle mkdir -p build cd build cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON # 如果只需要构建使用文档,则执行以下命令 - make -j $processors gen_proto_py - make -j $processors paddle_docs paddle_docs_cn + make -j $processors paddle_docs # 如果只需要构建API,则执行以下命令 - make -j $processors gen_proto_py framework_py_proto - make -j $processors copy_paddle_pybind - make -j $processors paddle_api_docs + make -j $processors paddle_apis 其中$processors代表启动和CPU核一样多的进程来并行编译,可以根据本机的CPU核数设置相应的值。 -编译完成后,进入 ``doc/v2`` 目录,如果选择构建文档则会在该目录下生成 ``cn/html/`` 、 ``en/html`` 两个子目录,选择构建API则会生成 ``api/en/html`` 目录,分别进入这些目录下,执行以下命令: +编译完成后,同样会产生 ``doc/v2`` 和 ``doc/fluid`` 两个目录,如果选择构建文档则会在这两个目录下分别都生成 ``cn/html/`` 、 ``en/html`` 两个子目录,选择构建API则会在这两个目录下分别生成 ``api/en/html`` 目录,分别进入这些子目录下,执行以下命令: .. code-block:: bash python -m SimpleHTTPServer 8088 -在浏览器中输入 http://localhost:8088 就可以看到编译生成的中/英文的文档页面和英文的API页面,下图为生成的英文文档首页示例。注意,示例中由于使用了sphinx的原始主题,所以页面的风格与官网并不一致,但这并不影响开发者进行调试。 +在浏览器中输入 http://localhost:8088 就可以看到编译生成的 ``v2`` 和 ``fluid`` 两种版本的中/英文的文档页面和英文的API页面。下图为生成的 ``v2`` 英文文档首页示例。注意,示例中由于使用了sphinx的原始主题,所以页面的风格与官网并不一致,但这并不影响开发者进行调试。 .. image:: src/doc_en.png :align: center diff --git a/doc/v2/dev/write_docs_en.rst b/doc/v2/dev/write_docs_en.rst index 15ff0d34ad622f100fe98d8738b830e47c35b41b..6105455e202e4704aa25f0fd9916b9b61a569702 100644 --- a/doc/v2/dev/write_docs_en.rst +++ b/doc/v2/dev/write_docs_en.rst @@ -68,39 +68,56 @@ Please `click here `_ on how to install Docker. After Docker is installed, you could use the scripts in the source directory to build the documentation. +Build PaddlePaddle's documentation with Docker,you need to install Docker first. Please refer to `Docker's official website `_ on how to install Docker. This method is quite similar to ` Build From Sources `_ , by constructing, from source code, a docker image that can be used to build PaddlePaddle documentation. Enter the Docker container and use the script ``build.sh`` in the source directory to build the PaddlePaddle documentation. The specific steps are as follows: -[TBD] +.. code-block:: bash + + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + + # Construct a docker image from source code + docker build -t paddle:dev . + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" -e "WITH_DOC=ON" paddle:dev /bin/bash + + # Use build.sh to build PaddlePaddle documentation + bash -x /paddle/paddle/scripts/docker/build.sh + +Note: The above commands maps the current directory (source root directory) to the :code:`/paddle` directory in the container. + +After compiling, there should be two generated directories: ``doc/v2`` and ``doc/fluid``, where three subdirectories ``cn/html/``, ``en/html`` and ``api/en/html`` are generated. Please enter these directories respectively and execute the following commands: + +.. code-block:: bash + + python -m SimpleHTTPServer 8088 + +Use a web browser and navigate to http://localhost:8000, you could see the compiled ``v2`` 's and ``fluid`` 's Chinese/English documents page and English APIs page. If you do not wish to use Docker, you can also use the following commands to directly build the PaddlePaddle documentation. .. code-block:: bash - mkdir paddle - cd paddle + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle mkdir -p build cd build cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON # If you only need to build documents, use the following commands - make -j $processors gen_proto_py - make -j $processors paddle_docs paddle_docs_cn + make -j $processors paddle_docs # If you only need to build APIs, use the following commands - make -j $processors gen_proto_py framework_py_proto - make -j $processors copy_paddle_pybind - make -j $processors paddle_api_docs + make -j $processors paddle_apis $processors indicates that as many processes as the CPU cores are started to compile in parallel. It should be set according to the number of CPU cores of your machine. -After the compilation is complete, enter the ``doc/v2`` directory. If you chose to build documents, it will generate ``cn/html/`` and ``en/html`` subdirectories under this directory. If you chose to build APIs,it will generate``api/en/html`` subdirectory. Please enter these directories respectively and execute the following commands: +After compiling, there also should be two generated directories: ``doc/v2`` and ``doc/fluid`` . If you chose to build documents, two subdirectories ``cn/html/`` and ``en/html`` will be generated in both two directories. If you chose to build APIs,a subdirectory ``api/en/html`` will be generated. Please enter these directories respectively and execute the following commands: .. code-block:: bash python -m SimpleHTTPServer 8088 -Use a web browser and navigate to http://localhost:8000, you could see the compiled Chinese/English documents page and the English APIs page. The following figure is an example of the built English documents home page. Note that due to the sphinx's original theme used in the example, the style of the page is not consistent with the official website, but this does not affect the developer's debugging. +Use a web browser and navigate to http://localhost:8000, you could see the compiled ``v2`` 's and ``fluid`` 's Chinese/English documents page and English APIs page. The following figure is an example of the built ``v2`` 's English documents home page. Note that due to the sphinx's original theme used in the example, the style of the page is not consistent with the official website, but this does not affect the developer's debugging. .. image:: src/doc_en.png :align: center diff --git a/doc/v2/howto/capi/compile_paddle_lib_en.md b/doc/v2/howto/capi/compile_paddle_lib_en.md index 11d69b9b79c1a41898d3060d3fe25a31330334a3..6212a3081116d988630706e83d2349dd200b73ab 100644 --- a/doc/v2/howto/capi/compile_paddle_lib_en.md +++ b/doc/v2/howto/capi/compile_paddle_lib_en.md @@ -1,3 +1,175 @@ ## Install and Build -TBD +### Download & Install + + Download the latest C-API development package from CI system and install. You can find the required version in the table below: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Version TipsC-API
cpu_avx_mklpaddle.tgz
cpu_avx_openblas-
cpu_noavx_openblaspaddle.tgz
cuda7.5_cudnn5_avx_mklpaddle.tgz
cuda8.0_cudnn5_avx_mklpaddle.tgz
cuda8.0_cudnn7_avx_mklpaddle.tgz
+ +### From source + + Users can also compile the C-API library from PaddlePaddle source code by compiling with the following compilation options: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionsValue
WITH_C_APION
WITH_PYTHONOFF(recommended)
WITH_SWIG_PYOFF(recommended)
WITH_GOLANGOFF(recommended)
WITH_GPUON/OFF
WITH_MKLON/OFF
+ +It is best to set up with recommended values to avoid linking with unnecessary libraries. Set other compilation options as you need. + +Pull the latest following code snippet from github, and configure compilation options(replace PADDLE_ROOT with the installation path of the PaddlePaddle C-API inference library): + +```shell +PADDLE_ROOT=/path/of/capi +git clone https://github.com/PaddlePaddle/Paddle.git +cd Paddle +mkdir build +cd build +cmake -DCMAKE_INSTALL_PREFIX=$PADDLE_ROOT \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_C_API=ON \ + -DWITH_SWIG_PY=OFF \ + -DWITH_GOLANG=OFF \ + -DWITH_PYTHON=OFF \ + -DWITH_MKL=OFF \ + -DWITH_GPU=OFF \ + .. +``` + +After running the above code to generate Makefile , run: `make && make install`. After successful compilation, the dependencies required by C-API(includes: (1)PaddlePaddle inference library and header files; (2) Third-party libraries and header files) will be stored in the `PADDLE_ROOT` directory. + +If the compilation is successful, see the following directory structure under `PADDLE_ROOT`(includes PaddlePaddle header files and libraries, and third-party libraries and header files(determined by the link methods if necessary)): + +```text +├── include +│   └── paddle +│   ├── arguments.h +│   ├── capi.h +│   ├── capi_private.h +│   ├── config.h +│   ├── error.h +│   ├── gradient_machine.h +│   ├── main.h +│   ├── matrix.h +│   ├── paddle_capi.map +│   └── vector.h +├── lib +│   ├── libpaddle_capi_engine.a +│   ├── libpaddle_capi_layers.a +│   ├── libpaddle_capi_shared.so +│   └── libpaddle_capi_whole.a +└── third_party + ├── gflags + │   ├── include + │   │   └── gflags + │   │   ├── gflags_completions.h + │   │   ├── gflags_declare.h + │   │   ... + │   └── lib + │   └── libgflags.a + ├── glog + │   ├── include + │   │   └── glog + │   │   ├── config.h + │   │   ... + │   └── lib + │   └── libglog.a + ├── openblas + │   ├── include + │   │   ├── cblas.h + │   │   ... + │   └── lib + │   ... + ├── protobuf + │   ├── include + │   │   └── google + │   │   └── protobuf + │   │   ... + │   └── lib + │   └── libprotobuf-lite.a + └── zlib + ├── include + │   ... + └── lib + ... + +``` + +### Linking Description: + +There are three kinds of linking methods: + +1. Linking with dynamic library `libpaddle_capi_shared.so`(This way is much more convenient and easier, **Without special requirements, it is recommended**), refer to the following: + 1. Compiling with CPU version and using `OpenBLAS`; only need to link one library named `libpaddle_capi_shared.so` to develop prediction program through C-API. + 1. Compiling with CPU version and using `MKL` lib, you need to link MKL library directly to develop prediction program through PaddlePaddle C-API, due to `MKL` has its own dynamic library. + 1. Compiling with GPU version, CUDA library will be loaded dynamically on prediction program run-time, and also set CUDA library to  `LD_LIBRARY_PATH` environment variable. + +2. Linking with static library `libpaddle_capi_whole.a`,refer to the following: + 1. Specify `-Wl,--whole-archive` linking options. + 1. Explicitly link third-party libraries such as `gflags`、`glog`、`libz`、`protobuf` .etc, you can find them under `PADDLE_ROOT/third_party` directory. + 1. Use OpenBLAS library if compiling C-API,must explicitly link `libopenblas.a`. + 1. Use MKL when compiling C-API, must explicitly link MKL dynamic library. + +3. Linking with static library `libpaddle_capi_layers.a` and `libpaddle_capi_engine.a`,refer to the following: + 1. This linking methods is mainly used for mobile prediction. + 1. Split `libpaddle_capi_whole.a` into two static linking library at least to reduce the size of linking libraries. + 1. Specify `-Wl,--whole-archive -lpaddle_capi_layers`  and `-Wl,--no-whole-archive -lpaddle_capi_engine` for linking. + 1. The third-party dependencies need explicitly link same as method 2 above. diff --git a/doc/v2/howto/cluster/multi_cluster/k8s_distributed_en.md b/doc/v2/howto/cluster/multi_cluster/k8s_distributed_en.md index bc3d50b3ffd3b703a3a656caa1f96bdcf683f68b..dee1b7554f97af17989c3f7739d8feea3b6b8e3f 100644 --- a/doc/v2/howto/cluster/multi_cluster/k8s_distributed_en.md +++ b/doc/v2/howto/cluster/multi_cluster/k8s_distributed_en.md @@ -1,3 +1,372 @@ -# Kubernetes Distributed +# Distributed Training on Kubernetes -TBD +We introduced how to create a PaddlePaddle Job with a single node on Kuberentes in the +previous document. +In this article, we will introduce how to create a PaddlePaddle job with multiple nodes +on Kubernetes cluster. + +## Overall Architecture + +Before creating a training job, the users need to slice the training data and deploy +the Python scripts along with it into the distributed file system +(We can use the different type of Kuberentes Volumes to mount different distributed +file systems). Before training starts, The program will copy the training data into the +Container and also save the models at the same path during training. The global architecture +is as follows: + +![PaddlePaddle on Kubernetes Architecture](src/k8s-paddle-arch.png) + +The above figure describes a distributed training architecture which contains 3 nodes, each +Pod mounts a folder of the distributed file system to save training data and models +by Kubernetes Volume. Kubernetes created 3 Pods for this training phase and scheduled these on +3 nodes, each Pod has a PaddlePaddle container. After the containers car created, +PaddlePaddle starts up the communication between PServer and Trainer and read training +data for this training job. + +As the description above, we can start up a PaddlePaddle distributed training job on a +Kubernetes ready cluster with the following steps: + +1. [Build PaddlePaddle Docker Image](#Build a Docker Image) +1. [Split training data and upload to the distributed file system](#Upload Training Data) +1. [Edit a YAML file and create a Kubernetes Job](#Create a Job) +1. [Check the output](#Check The Output) + +We will introduce these steps as follows: + +### Build a Docker Image + +Training docker image needs to package the paddle pserver and paddle trainer runtimes, as well as two more processes before we can kick off the training: + +- Copying the training data into container. +- Generating the initialization arguments for `Paddle PServer` and `Paddle Training` processes. + +Since the paddlepaddle official docker image already has the runtimes we need, we'll take it as the base image and pack some additional scripts for the processes mentioned above to build our training image. for more detail, please find from the following link: +- https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/usage/cluster/src/k8s_train/Dockerfile + + +```bash +$ cd doc/howto/usage/k8s/src/k8s_train +$ docker build -t [YOUR_REPO]/paddle:mypaddle . +``` + +And then upload the new Docker Image to a Docker hub: + +```bash +docker push [YOUR_REPO]/paddle:mypaddle +``` + +**[NOTE]**, in the above command arguments, `[YOUR_REPO]` represents your Docker repository, +you need to use your repository instead of it. We will replace it with your respository name to +represent the Docker Image which built in this step. + +### Prepare Training Data + +We can download and split the training job by creating a Kubernetes Job, or custom your image +by editing [k8s_train](./src/k8s_train/). + +Before creating a Job, we need to bind a [persistenVolumeClaim](https://kubernetes.io/docs/user-guide/persistent-volumes) by the different type of +the different file system, the generated dataset would be saved on this volume. + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: paddle-data +spec: + template: + metadata: + name: pi + spec: + hostNetwork: true + containers: + - name: paddle-data + image: paddlepaddle/paddle-tutorial:k8s_data + imagePullPolicy: Always + volumeMounts: + - mountPath: "/mnt" + name: nfs + env: + - name: OUT_DIR + value: /home/work/mfs/paddle-cluster-job + - name: SPLIT_COUNT + value: "3" + volumes: + - name: nfs + persistentVolumeClaim: + claimName: mfs + restartPolicy: Never +``` + +Create the Job with the following command: + +```bash +> kubectl create -f xxx.yaml +``` + +If created successfully, you can see some information like this: + +```base +[root@paddle-kubernetes-node0 nfsdir]$ tree -d +. +`-- paddle-cluster-job + |-- 0 + | `-- data + |-- 1 + | `-- data + |-- 2 + | `-- data + |-- output + |-- quick_start +``` + +The `paddle-cluster-job` above is the job name for this training job; we need 3 +PaddlePaddle training nodes and save the split training data in `paddle-cluster-job` path, +the folder `0`, `1` and `2` represents the `training_id` on each node, `quick_start` folder is used to store training data, `output` folder is used to store the models and logs. + + +### Create a Job + +Kubernetes allow users to create objects with YAML files, and we can use a command-line tool +to create it. + +The Job YAML file describes that which Docker Image would be used in this training job, how much nodes would be created, what's the startup arguments of `Paddle PServer/Trainer` process and what's the type of Volumes. You can find the details of the YAML filed in +[Kubernetes Job API](http://kubernetes.io/docs/api-reference/batch/v1/definitions/#_v1_job). +The following is an example for this training job: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: paddle-cluster-job +spec: + parallelism: 3 + completions: 3 + template: + metadata: + name: paddle-cluster-job + spec: + volumes: + - name: jobpath + hostPath: + path: /home/work/mfs + containers: + - name: trainer + image: [YOUR_REPO]/paddle:mypaddle + command: ["bin/bash", "-c", "/root/start.sh"] + env: + - name: JOB_NAME + value: paddle-cluster-job + - name: JOB_PATH + value: /home/jobpath + - name: JOB_NAMESPACE + value: default + - name: TRAIN_CONFIG_DIR + value: recommendation + - name: CONF_PADDLE_NIC + value: eth0 + - name: CONF_PADDLE_PORT + value: "7164" + - name: CONF_PADDLE_PORTS_NUM + value: "2" + - name: CONF_PADDLE_PORTS_NUM_SPARSE + value: "2" + - name: CONF_PADDLE_GRADIENT_NUM + value: "3" + volumeMounts: + - name: jobpath + mountPath: /home/jobpath + restartPolicy: Never +``` + +In the above YAML file: +- `metadata.name`, The job name. +- `parallelism`, Whether the Kubernetes Job would create `parallelism` Pods at the same time. +- `completions`, The Job would become the success status only when the number of successful Pod(the exit code is 0) + is equal to `completions`. +- `volumeMounts`, the name field `jobpath` is a key, the `mountPath` field represents + the path in the container, and we can define the `jobpath` in `volumes` filed, use `hostPath` + to configure the host path we want to mount. +- `env`, the environment variables in the Container, we pass some startup arguments by + this approach, some details are as following: + - JOB_PATH:the mount path in the container + - JOB_NAME:the job name + - TRAIN_CONFIG_DIR:the job path in the container, we can find the training data path by + combine with JOB_NAME. + - CONF_PADDLE_NIC: the argument `--nics` of `Paddle PServer` process, the network + device name. + - CONF_PADDLE_PORT: the argument `--port` of `Paddle PServer` process. + - CONF_PADDLE_PORTS_NUM: the argument `--ports_num` of `Paddle PServer`, the port number + for dense prameter update. + - CONF_PADDLE_PORTS_NUM_SPARSE:the argument `--ports_num_for_sparse` of `Paddle PServer`, + the port number for sparse parameter update. + - CONF_PADDLE_GRADIENT_NUM:the number of training node, the argument + `--num_gradient_servers` of `Paddle PServer` and `Paddle Trainer`. + +You can find some details information at [here] +(http://www.paddlepaddle.org/docs/develop/documentation/zh/howto/usage/cmd_parameter/detail_introduction_cn.html)。 + +We can use the command-line tool of Kubernetes to create a Job when we finish the YAML file: + +```bash +kubectl create -f job.yaml +``` + +Upon successful creation, Kubernetes would create 3 Pods as PaddlePaddle training node, +pull the Docker image and begin to train. + + +### Checkout the Output + +At the process of training, we can check the logs and the output models which is stored in +the `output` folder. + +**NOTE**, `node_0`, `node_1` and `node_2` represent the +`trainer_id` of the PaddlePaddle training job rather than the node id of Kubernetes. + +```bash +[root@paddle-kubernetes-node0 output]# tree -d +. +├── node_0 +│   ├── server.log +│   └── train.log +├── node_1 +│   ├── server.log +│   └── train.log +├── node_2 +...... +├── pass-00002 +│   ├── done +│   ├── ___embedding_0__.w0 +│   ├── ___embedding_1__.w0 +...... +``` + +We can checkout the status of each training Pod by viewing the logs: + +```bash +[root@paddle-kubernetes-node0 node_0]# cat train.log +I1116 09:10:17.123121 50 Util.cpp:155] commandline: + /usr/local/bin/../opt/paddle/bin/paddle_trainer + --nics=eth0 --port=7164 + --ports_num=2 --comment=paddle_process_by_paddle + --pservers=192.168.129.66,192.168.223.143,192.168.129.71 + --ports_num_for_sparse=2 --config=./trainer_config.py + --trainer_count=4 --num_passes=10 --use_gpu=0 + --log_period=50 --dot_period=10 --saving_period=1 + --local=0 --trainer_id=0 + --save_dir=/home/jobpath/paddle-cluster-job/output +I1116 09:10:17.123440 50 Util.cpp:130] Calling runInitFunctions +I1116 09:10:17.123764 50 Util.cpp:143] Call runInitFunctions done. +[WARNING 2016-11-16 09:10:17,227 default_decorators.py:40] please use keyword arguments in paddle config. +[INFO 2016-11-16 09:10:17,239 networks.py:1282] The input order is [movie_id, title, genres, user_id, gender, age, occupation, rating] +[INFO 2016-11-16 09:10:17,239 networks.py:1289] The output order is [__square_error_cost_0__] +I1116 09:10:17.392917 50 Trainer.cpp:170] trainer mode: Normal +I1116 09:10:17.613910 50 PyDataProvider2.cpp:257] loading dataprovider dataprovider::process +I1116 09:10:17.680917 50 PyDataProvider2.cpp:257] loading dataprovider dataprovider::process +I1116 09:10:17.681543 50 GradientMachine.cpp:134] Initing parameters.. +I1116 09:10:18.012390 50 GradientMachine.cpp:141] Init parameters done. +I1116 09:10:18.018641 50 ParameterClient2.cpp:122] pserver 0 192.168.129.66:7164 +I1116 09:10:18.018950 50 ParameterClient2.cpp:122] pserver 1 192.168.129.66:7165 +I1116 09:10:18.019069 50 ParameterClient2.cpp:122] pserver 2 192.168.223.143:7164 +I1116 09:10:18.019492 50 ParameterClient2.cpp:122] pserver 3 192.168.223.143:7165 +I1116 09:10:18.019716 50 ParameterClient2.cpp:122] pserver 4 192.168.129.71:7164 +I1116 09:10:18.019836 50 ParameterClient2.cpp:122] pserver 5 192.168.129.71:7165 +``` + +## Some Additional Details + +### Using Environment Variables + +Usually we use the environment varialbes to configurate the PaddlePaddle Job which runs in +Kubernetes, `start_paddle.py` provides a start up script to convert the environment variable +to the start up arguments of PaddlePaddle process: + +```bash +API = "/api/v1/namespaces/" +JOBSELECTOR = "labelSelector=job-name=" +JOB_PATH = os.getenv("JOB_PATH") + "/" + os.getenv("JOB_NAME") +JOB_PATH_OUTPUT = JOB_PATH + "/output" +JOBNAME = os.getenv("JOB_NAME") +NAMESPACE = os.getenv("JOB_NAMESPACE") +PADDLE_NIC = os.getenv("CONF_PADDLE_NIC") +PADDLE_PORT = os.getenv("CONF_PADDLE_PORT") +PADDLE_PORTS_NUM = os.getenv("CONF_PADDLE_PORTS_NUM") +PADDLE_PORTS_NUM_SPARSE = os.getenv("CONF_PADDLE_PORTS_NUM_SPARSE") +PADDLE_SERVER_NUM = os.getenv("CONF_PADDLE_GRADIENT_NUM") +``` + +### Communication between Pods + +At the begin of `start_paddle.py`, it would initializes and parses the arguments. + +```python +parser = argparse.ArgumentParser(prog="start_paddle.py", + description='simple tool for k8s') + args, train_args_list = parser.parse_known_args() + train_args = refine_unknown_args(train_args_list) + train_args_dict = dict(zip(train_args[:-1:2], train_args[1::2])) + podlist = getPodList() +``` + +And then query the status of all the other Pods of this Job by the function `getPodList()`, and fetch `triner_id` by the function `getIdMap(podlist)` if all the Pods status is `RUNNING`. + +```python + podlist = getPodList() + # need to wait until all pods are running + while not isPodAllRunning(podlist): + time.sleep(10) + podlist = getPodList() + idMap = getIdMap(podlist) +``` + +**NOTE**: `getPodList()` would prefetch all the Pods in the current namespace, if some +Pods are alreay running, it may cause some error. We will use [statfulesets](https://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets) instead of +Kubernetes Pod or Replicaset in the future. + +The function `getIdMap(podlist)` fetches IPs addresses of `podlist` and then sort them +to generate `trainer_id`. + +```python +def getIdMap(podlist): + ''' + generate tainer_id by ip + ''' + ips = [] + for pod in podlist["items"]: + ips.append(pod["status"]["podIP"]) + ips.sort() + idMap = {} + for i in range(len(ips)): + idMap[ips[i]] = i + return idMap +``` + +After getting the `idMap`, we can generate the arguments of `Paddle PServer` and `Paddle Trainer` +so that we can start up them by `startPaddle(idMap, train_args_dict)`. + +### Create Job + +The main goal of `startPaddle` is generating the arguments of `Paddle PServer` and +`Paddle Trainer` processes. Take `Paddle Trainer` as an example, we parse the +environment variable and then get `PADDLE_NIC`, `PADDLE_PORT`, `PADDLE_PORTS_NUM` and etc..., +finally find `trainerId` from `idMap` according to its IP address. + +```python + program = 'paddle train' + args = " --nics=" + PADDLE_NIC + args += " --port=" + str(PADDLE_PORT) + args += " --ports_num=" + str(PADDLE_PORTS_NUM) + args += " --comment=" + "paddle_process_by_paddle" + ip_string = "" + for ip in idMap.keys(): + ip_string += (ip + ",") + ip_string = ip_string.rstrip(",") + args += " --pservers=" + ip_string + args_ext = "" + for key, value in train_args_dict.items(): + args_ext += (' --' + key + '=' + value) + localIP = socket.gethostbyname(socket.gethostname()) + trainerId = idMap[localIP] + args += " " + args_ext + " --trainer_id=" + \ + str(trainerId) + " --save_dir=" + JOB_PATH_OUTPUT +``` diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index c44f8a8a8ecc1ba1f886fc41aec863b4ca3458a6..8b1ca5e16548334ed0c9a6d31b88e0805304579e 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -24,6 +24,6 @@ if(NOT WITH_FLUID_ONLY) endif() add_subdirectory(testing) -if(NOT MOBILE_INFERENCE AND NOT ANDROID AND NOT IOS) +if(NOT MOBILE_INFERENCE AND NOT RPI) add_subdirectory(fluid) endif() diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index d725763b01d5953985f8e090605f68a8419b5498..d274d96c29bdbf5973d568d783369c3975bdc436 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -3,6 +3,7 @@ add_subdirectory(platform) add_subdirectory(framework) add_subdirectory(operators) add_subdirectory(pybind) -add_subdirectory(inference) add_subdirectory(string) add_subdirectory(recordio) +# NOTE: please add subdirectory inference at last. +add_subdirectory(inference) diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 3840bbe83b68dc2a49aa73feb57a80e9992cad5f..1f3ca24df16cf080d325fbdc0d613a828e384b2a 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -79,14 +79,12 @@ add_custom_command(TARGET framework_py_proto POST_BUILD COMMENT "Copy generated python proto into directory paddle/fluid/proto." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) -cc_library(backward SRCS backward.cc DEPS net_op) -cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context fill_constant_op) cc_library(lod_rank_table SRCS lod_rank_table.cc DEPS lod_tensor) cc_library(feed_fetch_method SRCS feed_fetch_method.cc DEPS lod_tensor scope glog) cc_library(executor SRCS executor.cc DEPS op_registry device_context scope -framework_proto backward glog lod_rank_table feed_fetch_method) +framework_proto glog lod_rank_table feed_fetch_method) cc_library(parallel_executor SRCS parallel_executor.cc DEPS multi_devices_graph_builder threaded_ssa_graph_executor) diff --git a/paddle/fluid/framework/backward.cc b/paddle/fluid/framework/backward.cc deleted file mode 100644 index 1314af2b3dab281bd201e6a77bfbe87e0bd58ffb..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/backward.cc +++ /dev/null @@ -1,585 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/backward.h" -#include "paddle/fluid/operators/net_op.h" - -#include -#include -#include -#include - -#include "paddle/fluid/framework/block_desc.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/net_op.h" - -namespace paddle { -namespace framework { - -static std::unordered_set* g_ctrl_flow_ops_ = nullptr; -// Control Flow operators's backward is significantly different from -// computational operators. Hack Code here. -// We should design a better way to backward CtrlFlowOps. -static std::unordered_set& CtrlFlowOps() { - if (g_ctrl_flow_ops_ == nullptr) { - g_ctrl_flow_ops_ = new std::unordered_set{ - "increment", "lod_rank_table", "less_than"}; - } - return *g_ctrl_flow_ops_; -} - -static inline std::unique_ptr CreateGradOp( - const OperatorBase& op, const std::unordered_set& no_grad_set, - std::unordered_map* grad_to_var) { - OpDesc op_desc; - op_desc.SetInputMap(op.Inputs()); - op_desc.SetOutputMap(op.Outputs()); - op_desc.SetType(op.Type()); - op_desc.SetAttrMap(op.Attrs()); - auto& info = OpInfoMap::Instance().Get(op.Type()); - auto grad_descs = info.GradOpMaker()(op_desc, no_grad_set, grad_to_var, {}); - std::vector> grad_ops; - grad_ops.reserve(grad_descs.size()); - std::transform(grad_descs.begin(), grad_descs.end(), - std::back_inserter(grad_ops), - [](const std::unique_ptr& grad_desc) { - return OpRegistry::CreateOp(*grad_desc); - }); - PADDLE_ENFORCE(!grad_ops.empty()); - if (grad_ops.size() == 1) { - return std::move(grad_ops[0]); - } else { - auto net_op = new operators::NetOp(); - for (auto& grad_op : grad_ops) { - net_op->AppendOp(std::move(grad_op)); - } - net_op->CompleteAddOp(); - return std::unique_ptr(net_op); - } -} - -template -static void ForEachVarName(const Map& names, T callback) { - for (auto& name : names) { - for (auto& n : name.second) { - if (callback(n)) return; - } - } -} - -// return whether all the names + suffixes in the set -static bool AllInSet( - const std::map>& names, - const std::string& suffix, const std::unordered_set& set) { - bool all_in_set = true; - ForEachVarName(names, [&all_in_set, &set, &suffix](const std::string& n) { - all_in_set = set.find(n + suffix) != set.end(); - return !all_in_set; - }); - return all_in_set; -} - -static std::unique_ptr NOP() { - auto net_op = new operators::NetOp(); - net_op->SetType("@NOP@"); - net_op->CompleteAddOp(); - return std::unique_ptr(net_op); -} - -// Get backward operator from a forward operator, a recursive implementation. -// -// no_grad_names the gradient variable names without gradient calculating. -// -// uniq_id is a unique index used inside recursively calling -// BackwardRecursive. use `uid = uniq_id++;` to get the unique index, and -// pass `uniq_id` through recursive calling. -// -// returns The backward operator. In a simple situation, it may be a simple -// operator, in a complex situation, it maybe a NetOp. -// -// See Backward.h for details -static std::unique_ptr BackwardRecursive( - const OperatorBase& forwardOp, - std::unordered_set& no_grad_names, - std::unordered_map* grad_to_var, - size_t& uniq_id) { - // If all input gradients of forwarding operator do not need to calculate, - // just return an NOP. Not return null ptr because NOP does not take - // too much time for calculation, but it is useful for simplifying logic. - if (AllInSet(forwardOp.Inputs() /*names*/, kGradVarSuffix /*suffix*/, - no_grad_names /*set*/)) { - return NOP(); - } - - // All output gradients of forwarding operator do not need to calculate. - // Then all input gradients cannot be computed at all, and we put them into - // `no_grad_names` set. Return an NOP. - if (AllInSet(forwardOp.Outputs() /*names*/, kGradVarSuffix /*suffix*/, - no_grad_names /*set*/)) { - ForEachVarName(forwardOp.Inputs(), - [&no_grad_names](const std::string& name) -> bool { - no_grad_names.insert(GradVarName(name)); - return false; - }); - return NOP(); - } - - // Returned gradient network - auto net = std::unique_ptr(new operators::NetOp()); - - if (forwardOp.IsNetOp()) { - // Because forwardOp is a net op, it can static_cast. - auto& forwardNet = static_cast(forwardOp); - - // Map from output gradient variable name to operator's indices in - // backward net's ops_. That operator generates that variable. - std::unordered_map> dup_output_ops; - - size_t local_op_id = 0; - // reversely travel forwardNet and collect all duplicate outputs. - for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); - ++it, ++local_op_id) { - auto& fwd = *it; - auto bwd = BackwardRecursive(*fwd, no_grad_names, grad_to_var, uniq_id); - ForEachVarName(bwd->Outputs(), - [&dup_output_ops, local_op_id](const std::string& out) { - dup_output_ops[out].emplace_back(local_op_id); - return false; - }); - net->AppendOp(std::move(bwd)); - } - // Get unique ID for this method. - auto uid = uniq_id++; - // TODO(dzh): more comment - // multiple operators which have the same output (y for example) may - // overwrite the same y variable when backward, special operations are token - // to handle this case. For each duplicate output, rename it to an alias - // (original name with a offset), append an `add` op for its operator, - // and finally sum all the alias variable to the final output variable y. - using Pos = std::pair>; - std::list insert_position; - for (auto& dup_output_op : dup_output_ops) { - const std::string& name = dup_output_op.first; - // duplicate @Empty@ don't need to be added - if (name == kEmptyVarName) continue; - - auto& dup_op = dup_output_op.second; - // no duplicate output - if (dup_op.size() == 1) continue; - - // process the duplicate outputs - std::vector dup_outputs; - for (size_t i = 0; i < dup_op.size(); ++i) { - // rename each duplicate output to an alias - auto op_offset = dup_op[i]; - dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" + - std::to_string(i)); - net->ops_[op_offset]->Rename(name, dup_outputs.back()); - } - // collect all the offset for each alias, - // insert a sum operator to add all aliases to output - insert_position.push_back( - {dup_op.back(), - OpRegistry::CreateOp("sum", {{"X", dup_outputs}}, {{"Out", {name}}}, - AttributeMap{})}); - } - - // make sure the inserted `sum` ops follow the BFS order. - insert_position.sort( - [](const Pos& l, const Pos& r) { return l.first > r.first; }); - - for (auto& pos : insert_position) { - net->InsertOp(pos.first + 1, std::move(pos.second)); - } - } else { - std::unique_ptr grad_op( - CreateGradOp(forwardOp, no_grad_names, grad_to_var)); - - ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, &grad_op]( - const std::string& grad_input) { - if (no_grad_names.count(grad_input)) { - // +1 for \0 - std::string prefix = grad_input.substr( - 0, grad_input.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); - grad_op->Rename(grad_input, prefix + kZeroVarSuffix); - - // If part of input gradient of that operator is not calculated, fill - // zero variables to that input gradient. - net->AppendOp(OpRegistry::CreateOp("fill_zeros_like", {{"X", {prefix}}}, - {{"Out", {grad_input}}}, - AttributeMap{})); - } - return false; - }); - - ForEachVarName(grad_op->Outputs(), - [&no_grad_names, &grad_op](const std::string& grad_output) { - if (no_grad_names.count(grad_output)) { - grad_op->Rename(grad_output, kEmptyVarName); - } - return false; - }); - - if (net->ops_.empty()) { // Current no aux op is added to network - return grad_op; - } - net->AppendOp(std::move(grad_op)); - } - net->SetType("@GENERATED_BACKWARD@"); - net->CompleteAddOp(); - return std::unique_ptr( - static_cast(net.release())); -} - -// See header for comments -std::unique_ptr Backward( - const OperatorBase& forwardOp, - const std::unordered_set& no_grad_vars) { - std::unordered_set no_grad_names; - no_grad_names.reserve(no_grad_vars.size() + 1); - - no_grad_names.insert(std::string(kEmptyVarName) + kGradVarSuffix); - - for (auto& name : no_grad_vars) { - no_grad_names.insert(name + kGradVarSuffix); - } - size_t uid = 0; - std::unordered_map grad_to_var; - return BackwardRecursive(forwardOp, no_grad_names, &grad_to_var, uid); -} - -// ==================================== // - -static bool AllGradInSet(const std::vector& names, - const std::unordered_set& set) { - for (const std::string& name : names) { - if (!set.count(GradVarName(name))) { - return false; - } - } - if (VLOG_IS_ON(10)) { - std::ostringstream sout; - sout << "All input {"; - for (auto& name : names) { - sout << name << ","; - } - sout << "} is in {"; - for (auto& name : set) { - sout << name << ","; - } - sout << "}"; - VLOG(10) << sout.str(); - } - return true; -} - -static std::string FwdName(const std::string& grad_name) { - auto pos = grad_name.find("@GRAD"); - if (pos == std::string::npos) { - return ""; - } else { - return grad_name.substr(0, pos); - } -} - -static void CreateGradVarInBlock( - size_t grad_op_start_index, - const std::unordered_map& param_name_map, - BlockDesc* block_desc, - std::unordered_map* grad_var_record) { - auto ops = block_desc->AllOps(); - for (size_t op_index = grad_op_start_index; op_index < ops.size(); - ++op_index) { - std::unordered_set new_vars; - auto& ctrl_flow_ops = CtrlFlowOps(); - ForEachVarName(ops[op_index]->Outputs(), - [&](const std::string& grad_var_name) { - if (ctrl_flow_ops.find(ops[op_index]->Type()) != - ctrl_flow_ops.end()) { - if (block_desc->HasVarRecursive(grad_var_name)) { - return false; - } - } else { - if (block_desc->HasVar(grad_var_name)) { - return false; - } - } - if (grad_var_name == framework::kEmptyVarName) { - return false; - } - auto var = block_desc->Var(grad_var_name); - VLOG(10) << "Creating Variable " << grad_var_name; - new_vars.insert(var->Name()); - auto it = param_name_map.find(grad_var_name); - if (it == param_name_map.end()) { - return false; - } - auto param_var_name = it->second; - auto& grad_record = (*grad_var_record)[param_var_name]; - grad_record.name_ = grad_var_name; - grad_record.block_idx_ = block_desc->ID(); - grad_record.op_idx_ = static_cast(op_index); - return false; /* not break */ - }); - ops[op_index]->InferVarType(block_desc); - for (auto& arg : ops[op_index]->OutputArgumentNames()) { - if (new_vars.find(arg) == new_vars.end()) { - continue; - } - auto pname = FwdName(arg); - auto* param = block_desc->FindVarRecursive(pname); - auto* grad = block_desc->FindVar(arg); - if (param == nullptr) { - grad->SetDataType(proto::VarType::FP32); - } else { - grad->SetDataType(param->GetDataType()); - } - } - ops[op_index]->InferShape(*block_desc); - } -} - -std::vector> MakeOpGrad( - const OpDesc* op_desc, std::unordered_set* no_grad_vars, - std::unordered_map* grad_to_var, - const std::vector& grad_block = std::vector()) { - std::vector> grad_op_descs; - // All input gradients of forwarding operator do not need to calculate. - const std::vector& inputs = op_desc->InputArgumentNames(); - if (AllGradInSet(inputs, *no_grad_vars)) { - VLOG(10) << "Drop operator " << op_desc->Type(); - return grad_op_descs; // empty vector - } - - // All output gradients of forwarding operator do not need to calculate. - const std::vector& outputs = op_desc->OutputArgumentNames(); - - if (AllGradInSet(outputs, *no_grad_vars)) { - VLOG(10) << "Drop operator " << op_desc->Type(); - // FIXME: Hack code here - auto& ctrl_flow_ops = CtrlFlowOps(); - if (ctrl_flow_ops.find(op_desc->Type()) == ctrl_flow_ops.end()) { - // Only computational op need drop input's gradient. - for (const std::string& name : inputs) { - no_grad_vars->insert(GradVarName(name)); - VLOG(10) << " Also drop " << GradVarName(name); - } - } - - return grad_op_descs; // empty vector - } - - grad_op_descs = - OpInfoMap::Instance() - .Get(op_desc->Type()) - .GradOpMaker()(*op_desc, *no_grad_vars, grad_to_var, grad_block); - - std::list> pending_fill_zeros_ops; - for (auto& desc : grad_op_descs) { - for (const std::string& in_name : desc->InputArgumentNames()) { - if (no_grad_vars->count(in_name)) { - std::string prefix = in_name.substr( - 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); - std::string new_name = prefix + kZeroVarSuffix; - desc->Rename(in_name, new_name); - std::unique_ptr fill_zeros_op( - new OpDesc("fill_zeros_like", {{"X", {prefix}}}, - {{"Out", {new_name}}}, AttributeMap{})); - pending_fill_zeros_ops.push_back(std::move(fill_zeros_op)); - } - } - } - - for (auto& p : pending_fill_zeros_ops) { - grad_op_descs.insert(grad_op_descs.begin(), std::move(p)); - } - return grad_op_descs; -} - -static BlockDesc* CreateStepBlock( - ProgramDesc& program_desc, std::unordered_set* no_grad_vars, - std::unordered_map* grad_to_var, - int step_block_idx); - -std::vector> MakeBlockBackward( - ProgramDesc& program_desc, int block_idx, - std::unordered_set* no_grad_vars, - std::unordered_map* grad_to_var) { - VLOG(5) << "MakeBlockBackward"; - BlockDesc* cur_block = program_desc.MutableBlock(block_idx); - std::vector op_descs = cur_block->AllOps(); - std::unordered_map> dup_out_ops; - size_t grad_desc_idx = 0; - std::vector> backward_descs; - - for (auto it = op_descs.rbegin(); it != op_descs.rend(); ++it) { - VLOG(5) << "Making backward " << (*it)->Type() << " op"; - std::vector> op_grads; - - if ((*it)->Type() == "recurrent" || (*it)->Type() == "while" || - (*it)->Type() == "parallel_do") { - int step_block_idx = (*it)->GetBlockAttr("sub_block"); - BlockDesc* backward_block = CreateStepBlock(program_desc, no_grad_vars, - grad_to_var, step_block_idx); - op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var, {backward_block}); - } else if ((*it)->Type() == "conditional_block") { - BlockDesc* backward_block = - CreateStepBlock(program_desc, no_grad_vars, grad_to_var, - (*it)->GetBlockAttr("sub_block")); - op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var, {backward_block}); - } else { - op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var); - } - - if (VLOG_IS_ON(10)) { - std::ostringstream sout; - sout << "Made "; - for (auto& op_grad : op_grads) { - sout << op_grad->Type() << " "; - } - VLOG(10) << sout.str(); - } - - for (const auto& desc : op_grads) { - for (const std::string& out_name : desc->OutputArgumentNames()) { - if (out_name.find("@GRAD") == std::string::npos) { - // Not all outputs of a backward operator is a gradient. Only gradient - // need to be sum. Skip variables are not gradient. - continue; - } - dup_out_ops[out_name].emplace_back(grad_desc_idx); - } - ++grad_desc_idx; - } - std::transform(op_grads.begin(), op_grads.end(), - std::back_inserter(backward_descs), - [](std::unique_ptr& ptr) { return std::move(ptr); }); - } - - VLOG(5) << "Appending Sums"; - // Check whether some variables are written more than once - std::list>> pending_sum_ops; - for (const auto& dup : dup_out_ops) { - const std::string& out_name = dup.first; - const std::vector dup_op = dup.second; - if (out_name != kEmptyVarName && dup_op.size() > 1) { - std::vector sum_op_inputs; - std::string next_g_name = out_name; - for (size_t i = 0; i < dup_op.size(); ++i) { - VLOG(10) << backward_descs[dup_op[i]]->Type() << " has " << out_name - << " duplicated"; - std::string new_name = out_name + "@RENAME@" + std::to_string(i); - backward_descs[dup_op[i]]->RenameOutput(out_name, new_name); - backward_descs[dup_op[i]]->RenameInput(out_name, next_g_name); - sum_op_inputs.emplace_back(new_name); - next_g_name = sum_op_inputs.back(); - } - std::unique_ptr sum_op(new OpDesc("sum", {{"X", sum_op_inputs}}, - {{"Out", {out_name}}}, - AttributeMap{})); - pending_sum_ops.push_back({dup_op.back(), std::move(sum_op)}); - } - } - - pending_sum_ops.sort([](const std::pair>& a, - const std::pair>& b) { - return a.first > b.first; - }); - for (auto& p : pending_sum_ops) { - backward_descs.insert(backward_descs.begin() + p.first + 1, - std::move(p.second)); - } - - VLOG(5) << "MakeBlockBackward Finished"; - - return backward_descs; -} - -static BlockDesc* CreateStepBlock( - ProgramDesc& program_desc, std::unordered_set* no_grad_vars, - std::unordered_map* grad_to_var, - int step_block_idx) { - auto backward_block_op_descs = MakeBlockBackward(program_desc, step_block_idx, - no_grad_vars, grad_to_var); - BlockDesc* backward_block = - program_desc.AppendBlock(*program_desc.MutableBlock(step_block_idx)); - for (auto& ptr : backward_block_op_descs) { - backward_block->AppendAllocatedOp(move(ptr)); - } - return backward_block; -} - -ParamGradInfoMap AppendBackward( - ProgramDesc& program_desc, const VarDesc& target, - const std::unordered_set& no_grad_vars) { - std::unordered_set no_grad_var_names; - no_grad_var_names.reserve(no_grad_vars.size() + 1); - no_grad_var_names.insert(std::string(kEmptyVarName) + kGradVarSuffix); - for (auto& name : no_grad_vars) { - no_grad_var_names.insert(GradVarName(name)); - } - - const int root_block_idx = 0; - auto root_block = program_desc.MutableBlock(root_block_idx); - - std::string fill_one_op_out = GradVarName(target.Name()); - bool is_scalar = target.GetShape() == std::vector{1}; - PADDLE_ENFORCE(is_scalar, "target should be scalar"); - VLOG(3) << "backward from loss=" << target.Name() - << " data_type=" << target.GetDataType(); - std::unique_ptr fill_one_op( - new OpDesc("fill_constant", {}, {{"Out", {fill_one_op_out}}}, - {{"shape", std::vector{1}}, - {"value", static_cast(1.0)}, - {"dtype", target.GetDataType()}})); - // infer var type of fill_one_op - fill_one_op->InferVarType(root_block); - - root_block->AppendAllocatedOp(std::move(fill_one_op)); - size_t forward_op_num = root_block->OpSize(); - size_t forward_block_num = program_desc.Size(); - - // Insert backward operators - std::unordered_map grad_to_var; - auto backward_op_descs = MakeBlockBackward(program_desc, root_block_idx, - &no_grad_var_names, &grad_to_var); - - for (auto& ptr : backward_op_descs) { - root_block->AppendAllocatedOp(std::move(ptr)); - } - // Create Variable - - // Create target gradient variable - std::unordered_map retv; - - auto var = root_block->Var(fill_one_op_out); - var->SetDataType(target.GetDataType()); - var->SetShape(target.GetShape()); - auto& target_grad = retv[target.Name()]; - target_grad.name_ = fill_one_op_out; - target_grad.block_idx_ = root_block_idx; - target_grad.op_idx_ = static_cast(forward_op_num); - - // create grad_var for all blocks in this program - CreateGradVarInBlock(forward_op_num, grad_to_var, root_block, &retv); - for (size_t block_index = forward_block_num; - block_index < program_desc.Size(); ++block_index) { - CreateGradVarInBlock(0, grad_to_var, program_desc.MutableBlock(block_index), - &retv); - } - return retv; -} - -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/backward.h b/paddle/fluid/framework/backward.h deleted file mode 100644 index 3a971090c25c85efbf976532c364371baba9a870..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/backward.h +++ /dev/null @@ -1,56 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include - -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/framework/program_desc.h" - -namespace paddle { -namespace framework { - -// Create the backward operator from a forward operator. -// TODO(yuyang18): Add more API reference comment. -extern std::unique_ptr Backward( - const OperatorBase& forwardOp, - const std::unordered_set& no_grad_vars); - -struct GradVarInfo { - GradVarInfo() {} - GradVarInfo(const std::string& name, int block_idx, int op_idx) - : name_(name), block_idx_(block_idx), op_idx_(op_idx) {} - - bool operator==(const GradVarInfo& b) const { - return name_ == b.name_ && block_idx_ == b.block_idx_ && - op_idx_ == b.op_idx_; - } - - std::string name_; - int block_idx_; - int op_idx_; -}; - -using ParamGradInfoMap = std::unordered_map; - -ParamGradInfoMap AppendBackward( - ProgramDesc& program_desc, const VarDesc& target, - const std::unordered_set& no_grad_vars); - -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/backward_test.cc b/paddle/fluid/framework/backward_test.cc deleted file mode 100644 index cc1f871360ed3f7071364dbb0f932bfd997cadb0..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/backward_test.cc +++ /dev/null @@ -1,918 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/framework/backward.h" - -#include -#include "paddle/fluid/framework/block_desc.h" -#include "paddle/fluid/framework/op_desc.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/var_desc.h" -#include "paddle/fluid/operators/net_op.h" - -USE_NO_KERNEL_OP(fill_constant); - -namespace paddle { -namespace framework { - -using DeviceContext = platform::DeviceContext; - -class NoneOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext *ctx) const override {} -}; - -template -class NoneKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &context) const override {} -}; - -class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { - public: - RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "Input X of Add"); - AddInput("b", "Bias of Add"); - AddOutput("Out", "Out of Add"); - AddComment("Add Op"); - } -}; - -class RowWiseAddGradMaker : public SingleGradOpDescMaker { - public: - using SingleGradOpDescMaker::SingleGradOpDescMaker; - - protected: - std::unique_ptr Apply() const override { - auto grad_op = new OpDesc(); - grad_op->SetInput(GradVarName("Out"), OutputGrad("Out")); - grad_op->SetOutput(GradVarName("X"), InputGrad("X")); - grad_op->SetOutput(GradVarName("b"), InputGrad("b")); - grad_op->SetType("rowwise_add_grad"); - return std::unique_ptr(grad_op); - } -}; - -class MulOpMaker : public OpProtoAndCheckerMaker { - public: - MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "A"); - AddInput("Y", "B"); - AddOutput("Out", "Out"); - AddAttr("x_num_col_dims", "").SetDefault(1).EqualGreaterThan(1); - AddAttr("y_num_col_dims", "").SetDefault(1).EqualGreaterThan(1); - AddComment("Mul"); - } -}; - -class SigmoidOpMaker : public OpProtoAndCheckerMaker { - public: - SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "X"); - AddOutput("Out", "Y"); - AddComment("Sigmoid"); - } -}; - -class NoGradOpMaker : public OpProtoAndCheckerMaker { - public: - NoGradOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "X input"); - AddOutput("Out", "Y output"); - AddComment("NoGradOp, same input output. no Grad"); - } -}; - -class FcOp : public operators::NetOp { - public: - FcOp(const std::string &type, const VariableNameMap &inputs, - const VariableNameMap &outputs, const AttributeMap &attrs) - : NetOp(type, inputs, outputs, attrs) { - AppendOp(OpRegistry::CreateOp( - "mul", {{"X", {Input("X")}}, {"Y", {Input("W")}}}, - {{"Out", {Output("mul_result")}}}, AttributeMap{})); - auto input_b = Inputs("b"); - std::string before_act = "mul_result"; - if (input_b.size() != 0) { - AppendOp(OpRegistry::CreateOp( - "rowwise_add", {{"X", {Output("mul_result")}}, {"b", {input_b[0]}}}, - {{"Out", {Output("add_result")}}}, AttributeMap{})); - before_act = "add_result"; - } else { - auto out_varname = Output("add_result"); - if (out_varname != kEmptyVarName) { - this->Rename(out_varname, kEmptyVarName); - } - } - - AppendOp(OpRegistry::CreateOp("sigmoid", {{"X", {Output(before_act)}}}, - {{"Out", {Output("Out")}}}, AttributeMap{})); - CompleteAddOp(false); - } -}; - -class FcOpMaker : public OpProtoAndCheckerMaker { - public: - FcOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "x"); - AddInput("W", "w"); - AddInput("b", "b"); - AddOutput("mul_result", "").AsIntermediate(); - AddOutput("add_result", "").AsIntermediate(); - AddOutput("Out", ""); - AddComment(""); - } -}; - -class ManyOutputOpMaker : public OpProtoAndCheckerMaker { - public: - ManyOutputOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("x", "x"); - AddOutput("y", "y"); - AddOutput("z", "z"); - AddComment(""); - } -}; - -class FillZeroOpMaker : public OpProtoAndCheckerMaker { - public: - FillZeroOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "x"); - AddOutput("Out", "out"); - AddComment(""); - } -}; - -class SumOpMaker : public framework::OpProtoAndCheckerMaker { - public: - SumOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of sum operator.").AsDuplicable(); - AddOutput("Out", "the output tensor of sum operator."); - AddComment(""); - } -}; - -class MultInOutOpMaker : public OpProtoAndCheckerMaker { - public: - MultInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "x"); - AddInput("H", "h"); - AddOutput("Y", "y"); - AddOutput("Z", "z"); - AddComment(""); - } -}; - -class MinusGradOpDescMaker : public GradOpDescMakerBase { - public: - using GradOpDescMakerBase::GradOpDescMakerBase; - - std::vector> operator()() const override { - std::vector> retv; - auto x_g = InputGrad("X"); - if (!x_g.empty()) { - auto *op_desc = new OpDesc(); - op_desc->SetType("scale"); - op_desc->SetInput("X", OutputGrad("Out")); - op_desc->SetOutput("Out", x_g); - op_desc->SetAttr("scale", 1.0f); - retv.emplace_back(op_desc); - } - - auto y_g = InputGrad("Y"); - if (!y_g.empty()) { - auto *op_desc = new OpDesc(); - op_desc->SetType("scale"); - op_desc->SetInput("X", OutputGrad("Out")); - op_desc->SetOutput("Out", y_g); - op_desc->SetAttr("scale", -1.0f); - retv.emplace_back(op_desc); - } - return retv; - } -}; - -class MinusOpMaker : public OpProtoAndCheckerMaker { - public: - MinusOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", ""); - AddInput("Y", ""); - AddOutput("Out", ""); - AddComment("minus for unittest"); - } -}; -} // namespace framework -} // namespace paddle - -namespace f = paddle::framework; -namespace ops = paddle::operators; -using EnforceNotMet = paddle::platform::EnforceNotMet; -// rowwise_add -REGISTER_OPERATOR(rowwise_add, f::NoneOp, f::RowWiseAddOpMaker, - f::RowWiseAddGradMaker); -REGISTER_OP_CPU_KERNEL(rowwise_add, - f::NoneKernel); -REGISTER_OPERATOR(rowwise_add_grad, f::NoneOp); -REGISTER_OP_CPU_KERNEL(rowwise_add_grad, - f::NoneKernel); -// mul -REGISTER_OP(mul, f::NoneOp, f::MulOpMaker, mul_grad, f::NoneOp); -REGISTER_OP_CPU_KERNEL(mul, f::NoneKernel); -REGISTER_OP_CPU_KERNEL(mul_grad, - f::NoneKernel); -// sigmoid -REGISTER_OP(sigmoid, f::NoneOp, f::SigmoidOpMaker, sigmoid_grad, f::NoneOp); -REGISTER_OP_CPU_KERNEL(sigmoid, - f::NoneKernel); -REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NoneOp, f::NoGradOpMaker); -// fill_zeros_like -REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::NoneOp, f::FillZeroOpMaker); -REGISTER_OP_CPU_KERNEL(fill_zeros_like, - f::NoneKernel); -// sum -REGISTER_OP(sum, f::NoneOp, f::SumOpMaker, sum_grad, f::NoneOp); -REGISTER_OP_CPU_KERNEL(sum, f::NoneKernel); -REGISTER_OP_CPU_KERNEL(sum_grad, - f::NoneKernel); -// fc -REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); -// many_output_op -REGISTER_OP(many_output_op, f::NoneOp, f::ManyOutputOpMaker, - many_output_op_grad, f::NoneOp); -// mult_in_out -REGISTER_OP(mult_in_out, f::NoneOp, f::MultInOutOpMaker, mult_in_out_grad, - f::NoneOp); -REGISTER_OP_CPU_KERNEL(mult_in_out, - f::NoneKernel); -REGISTER_OP_CPU_KERNEL(mult_in_out_grad, - f::NoneKernel); -// minus -REGISTER_OPERATOR(minus, f::NoneOp, f::MinusOpMaker, f::MinusGradOpDescMaker); -REGISTER_OP_CPU_KERNEL(minus, f::NoneKernel); -// scale -REGISTER_OPERATOR(scale, f::NoneOp); -REGISTER_OP_CPU_KERNEL(scale, f::NoneKernel); - -TEST(Backward, simple_op_not_need_grad) { - auto fwd = - f::OpRegistry::CreateOp("rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, - {{"Out", {"out"}}}, f::AttributeMap{}); - ASSERT_NE(fwd, nullptr); - auto gop = f::Backward(*fwd, {"x"}); - ASSERT_EQ(gop->Output(f::GradVarName("X")), f::kEmptyVarName); - - auto no_input_gop = f::Backward(*fwd, {"x", "b"}); - ASSERT_NE(no_input_gop, nullptr); - ASSERT_TRUE(no_input_gop->IsNetOp()); - ASSERT_EQ(0UL, static_cast(no_input_gop.get())->ops_.size()); -} - -TEST(Backward, net_fc_backward_normal) { - std::shared_ptr fwd = - f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {"b"}}}, - {{"mul_result", {"mul_res"}}, - {"add_result", {"add_re"}}, - {"Out", {"out"}}}, - f::AttributeMap{}); - ASSERT_NE(fwd, nullptr); - std::shared_ptr gop = - f::Backward(*fwd, std::unordered_set{}); - ASSERT_TRUE(gop->IsNetOp()); - auto net = static_cast(gop.get()); - - ASSERT_NO_THROW(net->DebugString()); - - ASSERT_EQ(3UL, net->ops_.size()); - - f::OperatorBase &d_sigmoid = *net->ops_[0]; - ASSERT_EQ("sigmoid_grad", d_sigmoid.Type()); - - f::OperatorBase &d_add = *net->ops_[1]; - ASSERT_EQ("rowwise_add_grad", d_add.Type()); - - f::OperatorBase &d_mul = *net->ops_[2]; - ASSERT_EQ("mul_grad", d_mul.Type()); -} - -TEST(Backward, net_fc_backward_not_have_b) { - std::shared_ptr fwd = - f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {}}}, - {{"mul_result", {"mul_res"}}, - {"add_result", {"add_res"}}, - {"Out", {"tmp"}}}, - f::AttributeMap{}); - ASSERT_NE(fwd, nullptr); - std::shared_ptr gop = - f::Backward(*fwd, std::unordered_set{}); - ASSERT_TRUE(gop->IsNetOp()); - auto net = static_cast(gop.get()); - - ASSERT_NO_THROW(net->DebugString()); - - ASSERT_EQ(2UL, net->ops_.size()); - - f::OperatorBase &d_sigmoid = *net->ops_[0]; - ASSERT_EQ("sigmoid_grad", d_sigmoid.Type()); - - f::OperatorBase &d_mul = *net->ops_[1]; - ASSERT_EQ("mul_grad", d_mul.Type()); -} - -TEST(Backward, net_input_of_network_not_need_grad) { - ops::NetOp net; - net.AppendOp(f::OpRegistry::CreateOp( - "fc", {{"X", {"x"}}, {"W", {"W1"}}, {"b", {"b1"}}}, - {{"mul_result", {"mul_tmp_0"}}, - {"add_result", {"add_tmp_0"}}, - {"Out", {"hidden0"}}}, - f::AttributeMap{})); - net.AppendOp(f::OpRegistry::CreateOp( - "fc", {{"X", {"hidden0"}}, {"W", {"W2"}}, {"b", {"b2"}}}, - {{"mul_result", {"mul_tmp_1"}}, - {"add_result", {"add_tmp_1"}}, - {"Out", {"hidden1"}}}, - f::AttributeMap{})); - net.CompleteAddOp(); - auto bwd = Backward(net, {"x"}); // x@GRAD is not need. - ASSERT_TRUE(bwd->IsNetOp()); - auto bwd_net = static_cast(bwd.get()); - - auto output_vars = bwd_net->OutputVars(true); - std::unordered_set all_outputs = - std::unordered_set(output_vars.begin(), output_vars.end()); - all_outputs.erase(f::kEmptyVarName); - - for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) { - ASSERT_NE(all_outputs.find(f::GradVarName(out)), all_outputs.end()); - } - - // Not Generated X - ASSERT_EQ(all_outputs.find(f::GradVarName("X")), all_outputs.end()); - - ASSERT_EQ(2UL, bwd_net->ops_.size()); - ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); - auto first_fc_grad = static_cast(bwd_net->ops_[1].get()); - ASSERT_EQ(3UL, first_fc_grad->ops_.size()); - ASSERT_EQ(f::kEmptyVarName, - first_fc_grad->ops_[2]->Output(f::GradVarName("X"))); -} - -TEST(Backward, net_shared_weight) { - ops::NetOp net; - net.AppendOp(f::OpRegistry::CreateOp("mul", {{"X", {"x"}}, {"Y", {"w"}}}, - {{"Out", {"out"}}}, f::AttributeMap{})); - net.AppendOp(f::OpRegistry::CreateOp("mul", {{"X", {"out"}}, {"Y", {"w"}}}, - {{"Out", {"FinalOut"}}}, - f::AttributeMap{})); - net.CompleteAddOp(); - - auto bwd = f::Backward(net, std::unordered_set{}); - ASSERT_TRUE(bwd->IsNetOp()); - auto bwd_net = static_cast(bwd.get()); - ASSERT_EQ(3UL, bwd_net->ops_.size()); - ASSERT_EQ("sum", bwd_net->ops_[2]->Type()); -} - -TEST(Backward, op_all_input_are_not_need) { - auto fwd = - f::OpRegistry::CreateOp("rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, - {{"Out", {"out"}}}, f::AttributeMap{}); - auto backward = f::Backward(*fwd, {"x", "b"}); - ASSERT_TRUE(backward->IsNetOp()); - auto net = static_cast(backward.get()); - ASSERT_TRUE(net->ops_.empty()); -} - -TEST(Backward, op_all_output_are_not_need) { - auto fwd = - f::OpRegistry::CreateOp("rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, - {{"Out", {"out"}}}, f::AttributeMap{}); - auto backward = f::Backward(*fwd, {"out"}); - ASSERT_TRUE(backward->IsNetOp()); - auto net = static_cast(backward.get()); - ASSERT_TRUE(net->ops_.empty()); -} - -TEST(Backward, op_part_of_output_are_not_need) { - auto fwd = - f::OpRegistry::CreateOp("many_output_op", {{"x", {"X"}}}, - {{"y", {"Y"}}, {"z", {"Z"}}}, f::AttributeMap{}); - auto backward = f::Backward(*fwd, {"Z"}); - ASSERT_TRUE(backward->IsNetOp()); - auto net = static_cast(backward.get()); - ASSERT_EQ(net->ops_.size(), 2UL); - - auto &fill_zero = *net->ops_[0]; - ASSERT_EQ("fill_zeros_like", fill_zero.Type()); - ASSERT_EQ(1UL, fill_zero.Inputs("X").size()); - ASSERT_EQ("Z", fill_zero.Input("X")); - ASSERT_EQ(1UL, fill_zero.Outputs("Out").size()); - ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Out")); - - auto &d_many_out = *net->ops_[1]; - ASSERT_EQ("many_output_op_grad", d_many_out.Type()); - ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.Inputs().size()); // I/O/OG - ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, - d_many_out.Input(f::GradVarName("z"))); - ASSERT_EQ(f::GradVarName("Y"), d_many_out.Input(f::GradVarName("y"))); - ASSERT_EQ(f::GradVarName("X"), d_many_out.Output(f::GradVarName("x"))); -} - -TEST(Backward, op_part_of_input_are_not_need) { - auto fwd = f::OpRegistry::CreateOp("mul", {{"X", {"a"}}, {"Y", {"b"}}}, - {{"Out", {"out"}}}, f::AttributeMap{}); - auto backward = f::Backward(*fwd, {"a"}); - auto &grad_mul = *backward; - ASSERT_EQ(grad_mul.Type(), "mul_grad"); - ASSERT_EQ(grad_mul.Inputs().size(), 2UL + 1UL + 1UL); - ASSERT_EQ(grad_mul.Outputs().size(), 2UL); - ASSERT_EQ(grad_mul.Output(f::GradVarName("X")), f::kEmptyVarName); - ASSERT_EQ(grad_mul.Output(f::GradVarName("Y")), f::GradVarName("b")); - ASSERT_EQ(grad_mul.Input(f::GradVarName("Out")), f::GradVarName("out")); - ASSERT_EQ(grad_mul.Input("X"), "a"); - ASSERT_EQ(grad_mul.Input("Y"), "b"); - ASSERT_EQ(grad_mul.Input("Out"), "out"); -} - -TEST(Backward, linear_net_intermediate_variable_has_no_grad) { - ops::NetOp net; - net.AppendOp(f::OpRegistry::CreateOp( - "fc", {{"X", {"x1"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"mul_result", {"mul_out1"}}, - {"add_result", {"add_out1"}}, - {"Out", {"out1"}}}, - f::AttributeMap{})); - net.AppendOp(f::OpRegistry::CreateOp( - "fc", {{"X", {"out1"}}, {"W", {"w2"}}, {"b", {"b2"}}}, - {{"mul_result", {"mul_out2"}}, - {"add_result", {"tmp_out2"}}, - {"Out", {"out2"}}}, - f::AttributeMap{})); - net.AppendOp(f::OpRegistry::CreateOp( - "fc", {{"X", {"out2"}}, {"W", {"w3"}}, {"b", {"b3"}}}, - {{"mul_result", {"mul_out3"}}, - {"add_result", {"tmp_out3"}}, - {"Out", {"out3"}}}, - f::AttributeMap{})); - net.CompleteAddOp(); - - auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"}); - ASSERT_TRUE(backward->IsNetOp()); - auto bwd_net = static_cast(backward.get()); - ASSERT_EQ(bwd_net->ops_.size(), 3UL); - auto &grad_fc = *bwd_net->ops_[0]; - - const char *all = paddle::operators::NetOp::kAll; - EXPECT_EQ(grad_fc.Inputs(all).size(), - 2UL /* external input number */ - + 1UL /* external output number*/ - + 1UL /* number of gradient of external output*/ - + 2UL /* internal variable number*/ - ); - EXPECT_EQ(grad_fc.Outputs(all).size(), - 2UL /* input number of mul*/ - + 2UL /* input number of rowwise_add*/ - + 1UL /* input number of sigmod */ - - 1UL /* out2 is not needed*/); - EXPECT_EQ(bwd_net->ops_[1]->Inputs(all).size(), 0UL); - EXPECT_EQ(bwd_net->ops_[1]->Outputs(all).size(), 0UL); - EXPECT_EQ(bwd_net->ops_[2]->Inputs(all).size(), 0UL); - EXPECT_EQ(bwd_net->ops_[2]->Outputs(all).size(), 0UL); -} - -TEST(Backward, simple_single_op) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - - f::OpDesc *op = block->AppendOp(); - op->SetType("rowwise_add"); - op->SetInput("X", {"x"}); - op->SetInput("b", {"b"}); - op->SetOutput("Out", {"out"}); - - auto target = f::VarDesc("out"); - target.SetShape({1}); - auto var_to_grad = - AppendBackward(program, target, std::unordered_set{}); - - ASSERT_EQ(block->AllOps().size(), 3UL); - f::OpDesc *fill_op = block->AllOps()[1]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op = block->AllOps()[2]; - EXPECT_EQ(grad_op->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op->InputNames().size(), 1UL); - ASSERT_EQ(grad_op->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("X")), - std::vector({f::GradVarName("x")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b")})); - - EXPECT_EQ(var_to_grad.size(), 3UL); - EXPECT_EQ(var_to_grad.at("b"), f::GradVarInfo(f::GradVarName("b"), 0, 2)); - EXPECT_EQ(var_to_grad.at("x"), f::GradVarInfo(f::GradVarName("x"), 0, 2)); - - EXPECT_TRUE(block->HasVar(f::GradVarName("b"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("x"))); -} - -TEST(Backward, default_attribute) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - f::OpDesc *op = block->AppendOp(); - op->SetType("mul"); - op->SetInput("X", {"x"}); - op->SetInput("Y", {"y"}); - op->SetOutput("Out", {"out"}); - op->CheckAttrs(); - - auto target = f::VarDesc("out"); - target.SetShape({1}); - AppendBackward(program, target, std::unordered_set{}); - - ASSERT_EQ(block->AllOps().size(), 3UL); - EXPECT_EQ(boost::get(op->GetAttr("x_num_col_dims")), 1); - EXPECT_EQ(boost::get(op->GetAttr("y_num_col_dims")), 1); - - f::OpDesc *fill_op = block->AllOps()[1]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op = block->AllOps()[2]; - ASSERT_EQ(grad_op->Type(), "mul_grad"); - EXPECT_EQ(boost::get(grad_op->GetAttr("x_num_col_dims")), 1); - EXPECT_EQ(boost::get(grad_op->GetAttr("y_num_col_dims")), 1); -} - -TEST(Backward, simple_mult_op) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - f::OpDesc *op1 = block->AppendOp(); - op1->SetType("rowwise_add"); - op1->SetInput("X", {"x1"}); - op1->SetInput("b", {"b1"}); - op1->SetOutput("Out", {"out1"}); - - f::OpDesc *op2 = block->AppendOp(); - op2->SetType("mul"); - op2->SetInput("X", {"out1"}); - op2->SetInput("Y", {"y2"}); - op2->SetOutput("Out", {"out2"}); - - f::OpDesc *op3 = block->AppendOp(); - op3->SetType("rowwise_add"); - op3->SetInput("X", {"out2"}); - op3->SetInput("b", {"b3"}); - op3->SetOutput("Out", {"out3"}); - - auto target = f::VarDesc("out3"); - target.SetShape({1}); - size_t forward_len = block->AllOps().size(); - auto var_to_grad = - AppendBackward(program, target, std::unordered_set{}); - - ASSERT_EQ(block->AllOps().size(), 6UL + 1); - f::OpDesc *fill_op = block->AllOps()[forward_len]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op1 = block->AllOps()[6]; - EXPECT_EQ(grad_op1->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op1->InputNames().size(), 1UL); - ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), - std::vector({f::GradVarName("x1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b1")})); - - f::OpDesc *grad_op2 = block->AllOps()[5]; - EXPECT_EQ(grad_op2->Type(), "mul_grad"); - ASSERT_EQ(grad_op2->InputNames().size(), 4UL); - ASSERT_EQ(grad_op2->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op2->Input("X"), std::vector({"out1"})); - EXPECT_EQ(grad_op2->Input("Y"), std::vector({"y2"})); - EXPECT_EQ(grad_op2->Input("Out"), std::vector({"out2"})); - EXPECT_EQ(grad_op2->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out2")})); - EXPECT_EQ(grad_op2->Output(f::GradVarName("X")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op2->Output(f::GradVarName("Y")), - std::vector({f::GradVarName("y2")})); - - f::OpDesc *grad_op3 = block->AllOps()[4]; - EXPECT_EQ(grad_op3->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op3->InputNames().size(), 1UL); - ASSERT_EQ(grad_op3->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op3->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out3")})); - EXPECT_EQ(grad_op3->Output(f::GradVarName("X")), - std::vector({f::GradVarName("out2")})); - EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b3")})); - - EXPECT_EQ(var_to_grad.size(), 7UL); - EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 6)); - EXPECT_EQ(var_to_grad.at("b1"), f::GradVarInfo(f::GradVarName("b1"), 0, 6)); - EXPECT_EQ(var_to_grad.at("out1"), - f::GradVarInfo(f::GradVarName("out1"), 0, 5)); - EXPECT_EQ(var_to_grad.at("y2"), f::GradVarInfo(f::GradVarName("y2"), 0, 5)); - EXPECT_EQ(var_to_grad.at("out2"), - f::GradVarInfo(f::GradVarName("out2"), 0, 4)); - EXPECT_EQ(var_to_grad.at("b3"), f::GradVarInfo(f::GradVarName("b3"), 0, 4)); - - EXPECT_TRUE(block->HasVar(f::GradVarName("x1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("b1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("out1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("y2"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("out2"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("b3"))); -} - -TEST(Backward, intermedia_var_no_grad) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - f::OpDesc *op1 = block->AppendOp(); - op1->SetType("rowwise_add"); - op1->SetInput("X", {"x1"}); - op1->SetInput("b", {"b1"}); - op1->SetOutput("Out", {"out1"}); - - f::OpDesc *op2 = block->AppendOp(); - op2->SetType("mul"); - op2->SetInput("X", {"x2"}); - op2->SetInput("Y", {"y2"}); - op2->SetOutput("Out", {"out2"}); - - f::OpDesc *op3 = block->AppendOp(); - op3->SetType("rowwise_add"); - op3->SetInput("X", {"out2"}); - op3->SetInput("b", {"b3"}); - op3->SetOutput("Out", {"out3"}); - - f::OpDesc *op4 = block->AppendOp(); - op4->SetType("mul"); - op4->SetInput("X", {"out1"}); - op4->SetInput("Y", {"out3"}); - op4->SetOutput("Out", {"out4"}); - - auto target = f::VarDesc("out4"); - target.SetShape({1}); - size_t forward_len = block->AllOps().size(); - auto var_to_grad = AppendBackward(program, target, {"out3"}); - - ASSERT_EQ(block->AllOps().size(), 7UL); - f::OpDesc *fill_op = block->AllOps()[forward_len]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op1 = block->AllOps()[6]; - EXPECT_EQ(grad_op1->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op1->InputNames().size(), 1UL); - ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), - std::vector({f::GradVarName("x1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b1")})); - - f::OpDesc *grad_op4 = block->AllOps()[5]; - EXPECT_EQ(grad_op4->Type(), "mul_grad"); - ASSERT_EQ(grad_op4->InputNames().size(), 4UL); - ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op4->Input("X"), std::vector({"out1"})); - EXPECT_EQ(grad_op4->Input("Y"), std::vector({"out3"})); - EXPECT_EQ(grad_op4->Input("Out"), std::vector({"out4"})); - EXPECT_EQ(grad_op4->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out4")})); - EXPECT_EQ(grad_op4->Output(f::GradVarName("X")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), std::vector()); - - EXPECT_EQ(var_to_grad.size(), 4UL); - EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 6)); - EXPECT_EQ(var_to_grad.at("b1"), f::GradVarInfo(f::GradVarName("b1"), 0, 6)); - EXPECT_EQ(var_to_grad.at("out1"), - f::GradVarInfo(f::GradVarName("out1"), 0, 5)); - - EXPECT_TRUE(block->HasVar(f::GradVarName("x1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("b1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("out1"))); -} - -TEST(Backward, var_no_grad) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - f::OpDesc *op1 = block->AppendOp(); - op1->SetType("mult_in_out"); - op1->SetInput("X", {"x1"}); - op1->SetInput("H", {"h1"}); - op1->SetOutput("Y", {"y1"}); - op1->SetOutput("Z", {"z1"}); - - f::OpDesc *op2 = block->AppendOp(); - op2->SetType("mult_in_out"); - op2->SetInput("X", {"y1"}); - op2->SetInput("H", {"z1"}); - op2->SetOutput("Y", {"y2"}); - op2->SetOutput("Z", {"z2"}); - - auto target = f::VarDesc("z2"); - target.SetShape({1}); - size_t forward_len = block->AllOps().size(); - auto var_to_grad = AppendBackward(program, target, {"z1"}); - - ASSERT_EQ(block->AllOps().size(), 6UL); - f::OpDesc *fill_op = block->AllOps()[forward_len]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op2 = block->AllOps()[3]; - ASSERT_EQ(grad_op2->Type(), "mult_in_out_grad"); - ASSERT_EQ(grad_op2->InputNames().size(), 6UL); - ASSERT_EQ(grad_op2->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op2->Input("X"), std::vector({"y1"})); - EXPECT_EQ(grad_op2->Input("H"), std::vector({"z1"})); - EXPECT_EQ(grad_op2->Input("Y"), std::vector({"y2"})); - EXPECT_EQ(grad_op2->Input("Z"), std::vector({"z2"})); - EXPECT_EQ(grad_op2->Input(f::GradVarName("Y")), - std::vector({f::GradVarName("y2")})); - EXPECT_EQ(grad_op2->Input(f::GradVarName("Z")), - std::vector({f::GradVarName("z2")})); - EXPECT_EQ(grad_op2->Output(f::GradVarName("X")), - std::vector({f::GradVarName("y1")})); - EXPECT_EQ(grad_op2->Output(f::GradVarName("H")), std::vector()); - - f::OpDesc *fill_zero_op = block->AllOps()[4]; - ASSERT_EQ(fill_zero_op->Type(), "fill_zeros_like"); - ASSERT_EQ(fill_zero_op->InputNames().size(), 1UL); - ASSERT_EQ(fill_zero_op->OutputNames().size(), 1UL); - EXPECT_EQ(fill_zero_op->Input("X"), std::vector({"z1"})); - EXPECT_EQ(fill_zero_op->Output("Out"), - std::vector({std::string("z1") + f::kZeroVarSuffix})); - - f::OpDesc *grad_op1 = block->AllOps()[5]; - ASSERT_EQ(grad_op1->Type(), "mult_in_out_grad"); - ASSERT_EQ(grad_op1->InputNames().size(), 6UL); - ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op1->Input("X"), std::vector({"x1"})); - EXPECT_EQ(grad_op1->Input("H"), std::vector({"h1"})); - EXPECT_EQ(grad_op1->Input("Y"), std::vector({"y1"})); - EXPECT_EQ(grad_op1->Input("Z"), std::vector({"z1"})); - EXPECT_EQ(grad_op1->Input(f::GradVarName("Y")), - std::vector({f::GradVarName("y1")})); - EXPECT_EQ(grad_op1->Input(f::GradVarName("Z")), - std::vector({std::string("z1") + f::kZeroVarSuffix})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), - std::vector({f::GradVarName("x1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("H")), - std::vector({f::GradVarName("h1")})); - - EXPECT_EQ(var_to_grad.size(), 4UL); - EXPECT_EQ(var_to_grad.at("y1"), f::GradVarInfo(f::GradVarName("y1"), 0, 3)); - EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 5)); - EXPECT_EQ(var_to_grad.at("h1"), f::GradVarInfo(f::GradVarName("h1"), 0, 5)); - - EXPECT_TRUE(block->HasVar(f::GradVarName("y1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("x1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("h1"))); -} - -TEST(Backward, shared_var) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - f::OpDesc *op1 = block->AppendOp(); - op1->SetType("rowwise_add"); - op1->SetInput("X", {"x1"}); - op1->SetInput("b", {"b1"}); - op1->SetOutput("Out", {"out1"}); - - f::OpDesc *op2 = block->AppendOp(); - op2->SetType("mul"); - op2->SetInput("X", {"out1"}); - op2->SetInput("Y", {"y2"}); - op2->SetOutput("Out", {"out2"}); - - f::OpDesc *op3 = block->AppendOp(); - op3->SetType("rowwise_add"); - op3->SetInput("X", {"out1"}); - op3->SetInput("b", {"b3"}); - op3->SetOutput("Out", {"out3"}); - - auto target = f::VarDesc("out3"); - target.SetShape({1}); - size_t forward_len = block->AllOps().size(); - auto var_to_grad = - AppendBackward(program, target, std::unordered_set{}); - - ASSERT_EQ(block->AllOps().size(), 8UL); - f::OpDesc *fill_op = block->AllOps()[forward_len]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - - f::OpDesc *grad_op3 = block->AllOps()[4]; - ASSERT_EQ(grad_op3->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op3->InputNames().size(), 1UL); - ASSERT_EQ(grad_op3->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op3->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out3")})); - EXPECT_EQ(grad_op3->Output(f::GradVarName("X")), - std::vector({f::GradVarName("out1") + "@RENAME@0"})); - EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b3")})); - - f::OpDesc *grad_op4 = block->AllOps()[5]; - ASSERT_EQ(grad_op4->Type(), "mul_grad"); - ASSERT_EQ(grad_op4->InputNames().size(), 4UL); - ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op4->Input("X"), std::vector({"out1"})); - EXPECT_EQ(grad_op4->Input("Y"), std::vector({"y2"})); - EXPECT_EQ(grad_op4->Input("Out"), std::vector({"out2"})); - EXPECT_EQ(grad_op4->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out2")})); - EXPECT_EQ(grad_op4->Output(f::GradVarName("X")), - std::vector({f::GradVarName("out1") + "@RENAME@1"})); - EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), - std::vector({f::GradVarName("y2")})); - - f::OpDesc *sum_op = block->AllOps()[6]; - ASSERT_EQ(sum_op->Type(), "sum"); - ASSERT_EQ(sum_op->InputNames().size(), 1UL); - ASSERT_EQ(sum_op->OutputNames().size(), 1UL); - EXPECT_EQ(sum_op->Input("X"), - std::vector({f::GradVarName("out1") + "@RENAME@0", - f::GradVarName("out1") + "@RENAME@1"})); - EXPECT_EQ(sum_op->Output("Out"), - std::vector({f::GradVarName("out1")})); - - f::OpDesc *grad_op1 = block->AllOps()[7]; - ASSERT_EQ(grad_op1->Type(), "rowwise_add_grad"); - ASSERT_EQ(grad_op1->InputNames().size(), 1UL); - ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); - EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), - std::vector({f::GradVarName("x1")})); - EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), - std::vector({f::GradVarName("b1")})); - - EXPECT_EQ(var_to_grad.size(), 6UL); - EXPECT_EQ(var_to_grad.at("b3"), f::GradVarInfo(f::GradVarName("b3"), 0, 4)); - EXPECT_EQ(var_to_grad.at("y2"), f::GradVarInfo(f::GradVarName("y2"), 0, 5)); - EXPECT_EQ(var_to_grad.at("out1"), - f::GradVarInfo(f::GradVarName("out1"), 0, 6)); - EXPECT_EQ(var_to_grad.at("x1"), f::GradVarInfo(f::GradVarName("x1"), 0, 7)); - EXPECT_EQ(var_to_grad.at("b1"), f::GradVarInfo(f::GradVarName("b1"), 0, 7)); - - EXPECT_TRUE(block->HasVar(f::GradVarName("b3"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("y2"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("out1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("x1"))); - EXPECT_TRUE(block->HasVar(f::GradVarName("b1"))); -} - -TEST(Backward, half_backward) { - f::ProgramDesc program; - f::BlockDesc *block = program.MutableBlock(0); - auto *op1 = block->AppendOp(); - op1->SetType("minus"); - op1->SetInput("X", {"a"}); - op1->SetInput("Y", {"b"}); - op1->SetOutput("Out", {"out"}); - - auto target = f::VarDesc("out"); - target.SetShape({1}); - size_t forward_len = block->AllOps().size(); - auto var_to_grad = AppendBackward(program, target, {"b"}); - f::OpDesc *fill_op = block->AllOps()[forward_len]; - EXPECT_EQ(fill_op->Type(), "fill_constant"); - auto ops = block->AllOps(); - ASSERT_EQ(3UL, ops.size()); - - EXPECT_EQ(var_to_grad.size(), 2UL); - EXPECT_EQ(var_to_grad.at("a"), - f::GradVarInfo(f::GradVarName("a"), 0, forward_len + 1)); -} diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index fbe08349c37c4fde115ceea954ba2b84880088d7..b8847e4b909cbab67b2ddb6885b45b73d402de19 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -13,11 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/block_desc.h" +#include #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" -#include - namespace paddle { namespace framework { @@ -147,52 +146,7 @@ void BlockDesc::RemoveOp(size_t s, size_t e) { if (ops_.begin() + s == ops_.end() || ops_.begin() + e == ops_.end()) { return; } - auto get_vars = [](std::deque>::iterator &op, - std::vector &v) { - auto in_names = (*op)->InputArgumentNames(); - v.insert(v.end(), in_names.begin(), in_names.end()); - auto out_names = (*op)->OutputArgumentNames(); - v.insert(v.end(), out_names.begin(), out_names.end()); - std::sort(v.begin(), v.end()); - auto last = std::unique(v.begin(), v.end()); - v.erase(last, v.end()); - }; - need_update_ = true; - - for (size_t i = s; i < e; i++) { - // since remove op one by one, every time remove the first op. - auto op = ops_.begin() + s; - - // collect input and output variables from current delete op - std::vector cur_vars; - get_vars(op, cur_vars); - - // remove current op - ops_.erase(ops_.begin() + s); - - // collect input and output variables from other ops - std::vector other_vars; - for (auto it = ops_.begin(); it != ops_.end(); it++) { - get_vars(it, other_vars); - } - - // variables should be deleted - std::vector delete_vars; - // delete_vars = cur_vars - cur_vars ^ other_input_vars - std::set_difference(cur_vars.begin(), cur_vars.end(), other_vars.begin(), - other_vars.end(), - std::inserter(delete_vars, delete_vars.end())); - // remove variables - for (size_t i = 0; i < delete_vars.size(); i++) { - auto name = delete_vars[i]; - auto it = vars_.find(name); - PADDLE_ENFORCE(it != vars_.end(), - "%s is not in variable list, it should not be deleted", - name); - vars_.erase(it); - VLOG(3) << "deleting variable " << name; - } - } + ops_.erase(ops_.begin() + s, ops_.begin() + e); } std::vector BlockDesc::AllOps() const { diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index 873969b2a884f6d9e133fe87bf72725c36ce8b98..eef19c4f09c60b9df18f154c85c421f5bff9413f 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -92,7 +92,7 @@ class BlockDesc { /* * Remove Op and its input/output variables. - * Note that for either input or ouput variable, if it is also an input or + * Note that for either input or output variable, if it is also an input or * output variable of other ops, we should remain it. */ void RemoveOp(size_t s, size_t e); diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 89b5c6847f15b3f2a270fe1e7db9e590549e8982..897e41f79f4e3bb9cecbe7b42fc6c4fd3401d839 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -1,10 +1,11 @@ cc_library(var_handle SRCS var_handle.cc DEPS place) -cc_library(op_handle_base SRCS op_handle_base.cc DEPS var_handle device_context) +cc_library(op_handle_base SRCS op_handle_base.cc DEPS var_handle device_context lod_tensor) cc_library(scale_loss_grad_op_handle SRCS scale_loss_grad_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) cc_library(fetch_op_handle SRCS fetch_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) nv_library(nccl_all_reduce_op_handle SRCS nccl_all_reduce_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory dynload_cuda) cc_library(computation_op_handle SRCS computation_op_handle.cc DEPS framework_proto scope place operator op_registry) +cc_library(send_op_handle SRCS send_op_handle.cc DEPS framework_proto scope place operator op_registry) cc_library(ssa_graph SRCS ssa_graph.cc DEPS var_handle op_handle_base) cc_library(ssa_graph_builder SRCS ssa_graph_builder.cc DEPS ssa_graph) @@ -15,7 +16,15 @@ else() set(multi_devices_graph_builder_deps) endif() cc_library(multi_devices_graph_builder SRCS multi_devices_graph_builder.cc DEPS ssa_graph_builder computation_op_handle - scale_loss_grad_op_handle ${multi_devices_graph_builder_deps}) + scale_loss_grad_op_handle send_op_handle ${multi_devices_graph_builder_deps}) cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS ssa_graph framework_proto) cc_library(threaded_ssa_graph_executor SRCS threaded_ssa_graph_executor.cc DEPS fetch_op_handle ssa_graph_executor scope simple_threadpool device_context) + +cc_library(broadcast_op_handle SRCS broadcast_op_handle.cc DEPS op_handle_base scope ddim memory) +cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope ddim memory) + +cc_test(broadcast_op_test SRCS broadcast_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory + device_context broadcast_op_handle) +cc_test(gather_op_test SRCS gather_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory + device_context gather_op_handle) diff --git a/paddle/fluid/framework/details/broadcast_op_handle.cc b/paddle/fluid/framework/details/broadcast_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..7d29012380e1b1710704d71a28d21dcc3097eb51 --- /dev/null +++ b/paddle/fluid/framework/details/broadcast_op_handle.cc @@ -0,0 +1,111 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/broadcast_op_handle.h" + +namespace paddle { +namespace framework { +namespace details { + +Tensor *GetTensorFromVar(Variable *in_var) { + if (in_var->IsType()) { + return in_var->GetMutable(); + } else if (in_var->IsType()) { + return in_var->GetMutable()->mutable_value(); + } else { + PADDLE_THROW("Var should be LoDTensor or SelectedRows"); + } + return nullptr; +} + +BroadcastOpHandle::BroadcastOpHandle(const std::vector &local_scopes, + const std::vector &places) + : local_scopes_(local_scopes), places_(places) {} + +void BroadcastOpHandle::RunImpl() { + // the input may have dummy var. + std::vector in_var_handle; + for (auto *in : inputs_) { + auto *out_handle = dynamic_cast(in); + if (out_handle) { + in_var_handle.push_back(out_handle); + } + } + PADDLE_ENFORCE_EQ(in_var_handle.size(), 1, + "The number of input should be one."); + + // the output may have dummy var. + std::vector out_var_handles; + for (auto *out : outputs_) { + auto *out_handle = dynamic_cast(out); + if (out_handle) { + out_var_handles.push_back(out_handle); + } + } + + PADDLE_ENFORCE_EQ( + out_var_handles.size(), places_.size(), + "The number of output should equal to the number of places."); + + // Wait input done, this Wait is asynchronous operation + auto &in_place = in_var_handle[0]->place_; + if (in_var_handle[0]->generated_op_) { + for (auto *out : out_var_handles) { + auto &out_p = out->place_; + in_var_handle[0]->generated_op_->Wait(dev_ctxes_[out_p]); + } + } + + // + auto in_scope_idx = in_var_handle[0]->scope_idx_; + auto in_var = + local_scopes_.at(in_scope_idx)->FindVar(in_var_handle[0]->name_); + Tensor *in_tensor = GetTensorFromVar(in_var); + + for (auto *out : out_var_handles) { + auto &out_p = out->place_; + auto out_var = local_scopes_.at(out->scope_idx_)->FindVar(out->name_); + + PADDLE_ENFORCE_EQ(out_p.which(), in_place.which(), + "Places must be all on CPU or all on CUDA."); + + if (in_var->IsType()) { + auto &in_sr = in_var->Get(); + auto out_sr = out_var->GetMutable(); + if (&in_sr == out_sr) continue; + out_sr->set_height(in_sr.height()); + out_sr->set_rows(in_sr.rows()); + out_sr->mutable_value()->Resize(in_sr.value().dims()); + out_sr->mutable_value()->mutable_data(out_p, in_sr.value().type()); + } else if (in_var->IsType()) { + auto in_lod = in_var->Get(); + auto out_lod = out_var->GetMutable(); + if (&in_lod == out_lod) continue; + out_lod->set_lod(in_lod.lod()); + out_lod->Resize(in_lod.dims()); + out_lod->mutable_data(out_p, in_lod.type()); + } else { + PADDLE_THROW("Var should be LoDTensor or SelectedRows."); + } + + Tensor *out_tensor = GetTensorFromVar(out_var); + paddle::framework::TensorCopy(*in_tensor, out_p, *(dev_ctxes_[in_place]), + out_tensor); + } +} + +std::string BroadcastOpHandle::Name() const { return "broadcast"; } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/broadcast_op_handle.h b/paddle/fluid/framework/details/broadcast_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..b3292422522b64a38a50f39f04e6f0d2e15492dd --- /dev/null +++ b/paddle/fluid/framework/details/broadcast_op_handle.h @@ -0,0 +1,48 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { + +struct BroadcastOpHandle : public OpHandleBase { + const std::vector &local_scopes_; + const std::vector &places_; + + BroadcastOpHandle(const std::vector &local_scopes, + const std::vector &places); + + std::string Name() const override; + + bool IsMultiDeviceTransfer() override { return false; }; + + protected: + void RunImpl() override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.cc b/paddle/fluid/framework/details/broadcast_op_handle_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..dfc52b012f8b6bf5cf1a3feab90dc1ec7842ad6c --- /dev/null +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.cc @@ -0,0 +1,231 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/broadcast_op_handle.h" +#include "gtest/gtest.h" + +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { + +namespace f = paddle::framework; +namespace p = paddle::platform; + +// test data amount +const f::DDim kDims = {20, 20}; + +struct TestBroadcastOpHandle { + std::vector> ctxs_; + std::vector local_scopes_; + Scope g_scope_; + std::unique_ptr op_handle_; + std::vector> vars_; + std::vector gpu_list_; + + void WaitAll() { + for (size_t j = 0; j < ctxs_.size(); ++j) { + ctxs_[j]->Wait(); + } + } + + void InitCtxOnGpu(bool use_gpu) { + if (use_gpu) { +#ifdef PADDLE_WITH_CUDA + int count = p::GetCUDADeviceCount(); + if (count <= 1) { + LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " + "device count is " + << count; + exit(0); + } + for (int i = 0; i < count; ++i) { + auto p = p::CUDAPlace(i); + gpu_list_.push_back(p); + ctxs_.emplace_back(new p::CUDADeviceContext(p)); + } +#else + PADDLE_THROW("CUDA is not support."); +#endif + } else { + int count = 8; + for (int i = 0; i < count; ++i) { + auto p = p::CPUPlace(); + gpu_list_.push_back(p); + ctxs_.emplace_back(new p::CPUDeviceContext(p)); + } + } + } + + void InitBroadcastOp(size_t input_scope_idx) { + for (size_t j = 0; j < gpu_list_.size(); ++j) { + local_scopes_.push_back(&(g_scope_.NewScope())); + local_scopes_[j]->Var("out"); + } + local_scopes_[input_scope_idx]->Var("input"); + + op_handle_.reset(new BroadcastOpHandle(local_scopes_, gpu_list_)); + + vars_.emplace_back(new VarHandle()); + VarHandle* in_var_handle = static_cast(vars_.back().get()); + in_var_handle->place_ = gpu_list_[input_scope_idx]; + in_var_handle->name_ = "input"; + in_var_handle->version_ = 1; + in_var_handle->scope_idx_ = input_scope_idx; + in_var_handle->generated_op_ = nullptr; + op_handle_->AddInput(in_var_handle); + + // add dummy var + vars_.emplace_back(new DummyVarHandle()); + DummyVarHandle* dummy_var_handle = + static_cast(vars_.back().get()); + dummy_var_handle->generated_op_ = nullptr; + op_handle_->AddInput(dummy_var_handle); + + for (size_t j = 0; j < gpu_list_.size(); ++j) { + op_handle_->dev_ctxes_[gpu_list_[j]] = ctxs_[j].get(); + vars_.emplace_back(new VarHandle()); + VarHandle* out_var_handle = static_cast(vars_.back().get()); + out_var_handle->place_ = gpu_list_[j]; + out_var_handle->name_ = "out"; + out_var_handle->version_ = 2; + out_var_handle->scope_idx_ = j; + op_handle_->AddOutput(out_var_handle); + } + + // add dummy var + vars_.emplace_back(new DummyVarHandle()); + DummyVarHandle* out_dummy_var_handle = + static_cast(vars_.back().get()); + out_dummy_var_handle->generated_op_ = nullptr; + op_handle_->AddOutput(out_dummy_var_handle); + } + + void TestBroadcastLodTensor(size_t input_scope_idx) { + auto in_var = local_scopes_[input_scope_idx]->Var("input"); + auto in_lod_tensor = in_var->GetMutable(); + in_lod_tensor->mutable_data(kDims, gpu_list_[input_scope_idx]); + + std::vector send_vector(static_cast(f::product(kDims))); + for (size_t k = 0; k < send_vector.size(); ++k) { + send_vector[k] = k; + } + f::LoD lod{{0, 10, 20}}; + paddle::framework::TensorFromVector( + send_vector, *(ctxs_[input_scope_idx]), in_lod_tensor); + in_lod_tensor->set_lod(lod); + + op_handle_->Run(false); + + WaitAll(); + + p::CPUPlace cpu_place; + for (size_t j = 0; j < gpu_list_.size(); ++j) { + auto out_var = local_scopes_[j]->Var("out"); + auto out_tensor = out_var->Get(); + PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal."); + + f::Tensor result_tensor; + f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor); + float* ct = result_tensor.mutable_data(cpu_place); + + for (int64_t i = 0; i < f::product(kDims); ++i) { + ASSERT_NEAR(ct[i], send_vector[i], 1e-5); + } + } + } + + void TestBroadcastSelectedRows(size_t input_scope_idx) { + auto in_var = local_scopes_[input_scope_idx]->Var("input"); + auto in_selected_rows = in_var->GetMutable(); + auto value = in_selected_rows->mutable_value(); + value->mutable_data(kDims, gpu_list_[input_scope_idx]); + int height = static_cast(kDims[0]) * 2; + std::vector rows{0, 1, 2, 3, 3, 0, 14, 7, 3, 1, + 2, 4, 6, 3, 1, 1, 1, 1, 3, 7}; + in_selected_rows->set_height(height); + in_selected_rows->set_rows(rows); + + std::vector send_vector(static_cast(f::product(kDims))); + for (size_t k = 0; k < send_vector.size(); ++k) { + send_vector[k] = k; + } + paddle::framework::TensorFromVector( + send_vector, *(ctxs_[input_scope_idx]), value); + + op_handle_->Run(false); + + WaitAll(); + + p::CPUPlace cpu_place; + for (size_t j = 0; j < gpu_list_.size(); ++j) { + auto out_var = local_scopes_[j]->Var("out"); + auto& out_select_rows = out_var->Get(); + auto rt = out_select_rows.value(); + + PADDLE_ENFORCE_EQ(out_select_rows.height(), height, + "height is not equal."); + for (size_t k = 0; k < out_select_rows.rows().size(); ++k) { + PADDLE_ENFORCE_EQ(out_select_rows.rows()[k], rows[k]); + } + + f::Tensor result_tensor; + f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor); + float* ct = result_tensor.data(); + + for (int64_t i = 0; i < f::product(kDims); ++i) { + ASSERT_NEAR(ct[i], send_vector[i], 1e-5); + } + } + } +}; + +TEST(BroadcastTester, TestCPUBroadcastTestLodTensor) { + TestBroadcastOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(false); + test_op.InitBroadcastOp(input_scope_idx); + test_op.TestBroadcastLodTensor(input_scope_idx); +} + +TEST(BroadcastTester, TestCPUBroadcastTestSelectedRows) { + TestBroadcastOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(false); + test_op.InitBroadcastOp(input_scope_idx); + test_op.TestBroadcastSelectedRows(input_scope_idx); +} + +#ifdef PADDLE_WITH_CUDA +TEST(BroadcastTester, TestGPUBroadcastTestLodTensor) { + TestBroadcastOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(true); + test_op.InitBroadcastOp(input_scope_idx); + test_op.TestBroadcastLodTensor(input_scope_idx); +} + +TEST(BroadcastTester, TestGPUBroadcastTestSelectedRows) { + TestBroadcastOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(true); + test_op.InitBroadcastOp(input_scope_idx); + test_op.TestBroadcastSelectedRows(input_scope_idx); +} +#endif + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/computation_op_handle.cc b/paddle/fluid/framework/details/computation_op_handle.cc index 7a1b40c0b60a788b1f0a70e688f8fcbe427ad076..ff6d91c1dafb0ab4cabb1646cc333e19a89eb812 100644 --- a/paddle/fluid/framework/details/computation_op_handle.cc +++ b/paddle/fluid/framework/details/computation_op_handle.cc @@ -14,6 +14,8 @@ #include "paddle/fluid/framework/details/computation_op_handle.h" +#include + namespace paddle { namespace framework { namespace details { @@ -33,7 +35,9 @@ void ComputationOpHandle::RunImpl() { } } - op_->Run(*scope_->FindVar("@TMP_SCOPE@")->Get(), place_); + this->RunAndRecordEvent([this] { + op_->Run(*scope_->FindVar(kLocalExecScopeName)->Get(), place_); + }); } std::string ComputationOpHandle::Name() const { return op_->Type(); } diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 9180903b864d03e59f55f41410b2240fa4199496..e3e7c55d153aec8ce9c25c962821b266eaa84fe4 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -14,6 +14,9 @@ #include "paddle/fluid/framework/details/fetch_op_handle.h" +#include +#include + namespace paddle { namespace framework { namespace details { @@ -57,7 +60,10 @@ void FetchOpHandle::RunImpl() { for (size_t i = 0; i < scopes.size(); ++i) { auto &scope = scopes[i]; - auto &t = scope->FindVar(var_name)->Get(); + auto &t = scope->FindVar(kLocalExecScopeName) + ->Get() + ->FindVar(var_name) + ->Get(); if (platform::is_gpu_place(var->place_)) { #ifdef PADDLE_WITH_CUDA TensorCopy(t, cpu, *dev_ctxes_[t.place()], &tensors_[i]); diff --git a/paddle/fluid/framework/details/gather_op_handle.cc b/paddle/fluid/framework/details/gather_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..8dd85be567d33991ac003707fec939a61a2d0962 --- /dev/null +++ b/paddle/fluid/framework/details/gather_op_handle.cc @@ -0,0 +1,126 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/gather_op_handle.h" + +namespace paddle { +namespace framework { +namespace details { + +GatherOpHandle::GatherOpHandle(const std::vector &local_scopes, + const std::vector &places) + : local_scopes_(local_scopes), places_(places) {} + +void GatherOpHandle::RunImpl() { + // the input may have dummy var. + std::vector in_var_handles; + for (auto *in : inputs_) { + auto *in_handle = dynamic_cast(in); + if (in_handle) { + in_var_handles.push_back(in_handle); + } + } + PADDLE_ENFORCE_EQ( + in_var_handles.size(), places_.size(), + "The number of output should equal to the number of places."); + + // the output may have dummy var. + std::vector out_var_handles; + for (auto *out : outputs_) { + auto *out_handle = dynamic_cast(out); + if (out_handle) { + out_var_handles.push_back(out_handle); + } + } + PADDLE_ENFORCE_EQ(out_var_handles.size(), 1, + "The number of output should be one."); + + auto in_0_handle = static_cast(in_var_handles[0]); + auto pre_in_var = + local_scopes_[in_0_handle->scope_idx_]->FindVar(in_0_handle->name_); + auto pre_place = in_0_handle->place_; + + PADDLE_ENFORCE(pre_in_var->IsType(), + "Currently, gather_op only can gather SelectedRows."); + + PADDLE_ENFORCE_EQ(out_var_handles[0]->place_.which(), pre_place.which(), + "The place of input and output should be the same."); + + // Wait input done, this Wait is asynchronous operation + for (auto *in : in_var_handles) { + if (in->generated_op_) { + in->generated_op_->Wait(dev_ctxes_[in->place_]); + } + } + + std::vector out_rows; + std::vector in_tensors; + std::vector in_places; + + auto &pre_in = pre_in_var->Get(); + // gather the inputs + for (auto *in : in_var_handles) { + auto in_handle = static_cast(in); + auto in_p = in_handle->place_; + in_places.push_back(in_p); + PADDLE_ENFORCE_EQ(in_p.which(), pre_place.which(), + "Places must be all on CPU or all on CUDA."); + auto in_var = + local_scopes_.at(in_handle->scope_idx_)->FindVar(in_handle->name_); + auto &in_sr = in_var->Get(); + + PADDLE_ENFORCE_EQ(in_sr.value().type(), pre_in.value().type(), + "The type of input is not consistent."); + PADDLE_ENFORCE_EQ(pre_in.height(), in_sr.height(), + "The height of inputs is not consistent."); + PADDLE_ENFORCE_EQ(pre_in.GetCompleteDims(), in_sr.GetCompleteDims(), , + "The dims of inputs is not consistent."); + + auto in_sr_rows = in_sr.rows(); + out_rows.insert(out_rows.end(), in_sr_rows.begin(), in_sr_rows.end()); + + in_tensors.emplace_back(in_sr.value()); + } + + // write the output + auto &out_place = out_var_handles[0]->place_; + auto out_scope_idx = out_var_handles[0]->scope_idx_; + auto out_var = + local_scopes_[out_scope_idx]->FindVar(out_var_handles[0]->name_); + + auto out = out_var->GetMutable(); + out->set_height(pre_in.height()); + out->set_rows(out_rows); + size_t rows = out_rows.size(); + DDim out_dim = pre_in.GetCompleteDims(); + out_dim[0] = static_cast(rows); + out->mutable_value()->Resize(out_dim); + out->mutable_value()->mutable_data(out_place, pre_in.value().type()); + Tensor *out_tensor = out->mutable_value(); + + // copy + int s = 0, e = 0; + for (size_t j = 0; j < in_tensors.size(); ++j) { + e += in_tensors[j].dims()[0]; + auto sub_out = out_tensor->Slice(s, e); + paddle::framework::TensorCopy(in_tensors[j], out_place, + *(dev_ctxes_[in_places[j]]), &sub_out); + s = e; + } +} + +std::string GatherOpHandle::Name() const { return "gather"; } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/gather_op_handle.h b/paddle/fluid/framework/details/gather_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..6c0231f642c05e6b558b7e2518a15e08c816fe4b --- /dev/null +++ b/paddle/fluid/framework/details/gather_op_handle.h @@ -0,0 +1,48 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { + +struct GatherOpHandle : public OpHandleBase { + const std::vector &local_scopes_; + const std::vector &places_; + + GatherOpHandle(const std::vector &local_scopes, + const std::vector &places); + + std::string Name() const override; + + bool IsMultiDeviceTransfer() override { return false; }; + + protected: + void RunImpl() override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/gather_op_handle_test.cc b/paddle/fluid/framework/details/gather_op_handle_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..10839f239d59e97946575297a6d125968a1458f4 --- /dev/null +++ b/paddle/fluid/framework/details/gather_op_handle_test.cc @@ -0,0 +1,192 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/gather_op_handle.h" +#include "gtest/gtest.h" + +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { +namespace f = paddle::framework; +namespace p = paddle::platform; + +// test data amount +const f::DDim kDims = {20, 20}; + +struct TestGatherOpHandle { + std::vector> ctxs_; + std::vector local_scopes_; + Scope g_scope_; + std::unique_ptr op_handle_; + std::vector> vars_; + std::vector gpu_list_; + + void WaitAll() { + for (size_t j = 0; j < ctxs_.size(); ++j) { + ctxs_[j]->Wait(); + } + } + + void InitCtxOnGpu(bool use_gpu) { + if (use_gpu) { +#ifdef PADDLE_WITH_CUDA + int count = p::GetCUDADeviceCount(); + if (count <= 1) { + LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " + "device count is " + << count; + exit(0); + } + for (int i = 0; i < count; ++i) { + auto p = p::CUDAPlace(i); + gpu_list_.push_back(p); + ctxs_.emplace_back(new p::CUDADeviceContext(p)); + } +#else + PADDLE_THROW("CUDA is not support."); +#endif + } else { + int count = 8; + for (int i = 0; i < count; ++i) { + auto p = p::CPUPlace(); + gpu_list_.push_back(p); + ctxs_.emplace_back(new p::CPUDeviceContext(p)); + } + } + } + + void InitGatherOp(size_t input_scope_idx) { + for (size_t j = 0; j < gpu_list_.size(); ++j) { + local_scopes_.push_back(&(g_scope_.NewScope())); + local_scopes_[j]->Var("out"); + } + local_scopes_[input_scope_idx]->Var("input"); + + op_handle_.reset(new GatherOpHandle(local_scopes_, gpu_list_)); + // add input + for (size_t j = 0; j < gpu_list_.size(); ++j) { + op_handle_->dev_ctxes_[gpu_list_[j]] = ctxs_[j].get(); + vars_.emplace_back(new VarHandle()); + VarHandle* in_var_handle = static_cast(vars_.back().get()); + in_var_handle->place_ = gpu_list_[j]; + in_var_handle->name_ = "input"; + in_var_handle->version_ = 1; + in_var_handle->scope_idx_ = j; + in_var_handle->generated_op_ = nullptr; + op_handle_->AddInput(in_var_handle); + } + + // add dummy var + vars_.emplace_back(new DummyVarHandle()); + DummyVarHandle* in_dummy_var_handle = + static_cast(vars_.back().get()); + in_dummy_var_handle->generated_op_ = nullptr; + op_handle_->AddInput(in_dummy_var_handle); + + // add output + vars_.emplace_back(new VarHandle()); + VarHandle* out_var_handle = static_cast(vars_.back().get()); + out_var_handle->place_ = gpu_list_[input_scope_idx]; + out_var_handle->name_ = "out"; + out_var_handle->version_ = 2; + out_var_handle->scope_idx_ = input_scope_idx; + op_handle_->AddOutput(out_var_handle); + + // add dummy var + vars_.emplace_back(new DummyVarHandle()); + DummyVarHandle* dummy_var_handle = + static_cast(vars_.back().get()); + op_handle_->AddOutput(dummy_var_handle); + } + + void TestGatherSelectedRows(size_t output_scope_idx) { + int height = kDims[0] * 2; + std::vector rows{0, 1, 2, 3, 3, 0, 14, 7, 3, 1, + 2, 4, 6, 3, 1, 1, 1, 1, 3, 7}; + std::vector send_vector(f::product(kDims)); + for (size_t k = 0; k < send_vector.size(); ++k) { + send_vector[k] = k; + } + + for (size_t input_scope_idx = 0; input_scope_idx < gpu_list_.size(); + ++input_scope_idx) { + auto in_var = local_scopes_[input_scope_idx]->Var("input"); + auto in_selected_rows = in_var->GetMutable(); + auto value = in_selected_rows->mutable_value(); + value->mutable_data(kDims, gpu_list_[input_scope_idx]); + + in_selected_rows->set_height(height); + in_selected_rows->set_rows(rows); + + paddle::framework::TensorFromVector( + send_vector, *(ctxs_[input_scope_idx]), value); + value->Resize(kDims); + } + + auto out_var = local_scopes_[output_scope_idx]->Var("out"); + auto out_selected_rows = out_var->GetMutable(); + + auto in_var = local_scopes_[output_scope_idx]->Var("input"); + auto in_selected_rows = in_var->GetMutable(); + + out_selected_rows->mutable_value()->ShareDataWith( + in_selected_rows->value()); + + op_handle_->Run(false); + + WaitAll(); + + p::CPUPlace cpu_place; + + auto& out_select_rows = out_var->Get(); + auto rt = out_select_rows.value(); + + PADDLE_ENFORCE_EQ(out_select_rows.height(), height, "height is not equal."); + for (size_t k = 0; k < out_select_rows.rows().size(); ++k) { + PADDLE_ENFORCE_EQ(out_select_rows.rows()[k], rows[k % rows.size()]); + } + + f::Tensor result_tensor; + f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); + float* ct = result_tensor.data(); + + for (int64_t j = 0; j < f::product(kDims); ++j) { + ASSERT_NEAR(ct[j], send_vector[j % send_vector.size()], 1e-5); + } + } +}; + +TEST(GatherTester, TestCPUGatherTestSelectedRows) { + TestGatherOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(false); + test_op.InitGatherOp(input_scope_idx); + test_op.TestGatherSelectedRows(input_scope_idx); +} + +#ifdef PADDLE_WITH_CUDA + +TEST(GatherTester, TestGPUGatherTestSelectedRows) { + TestGatherOpHandle test_op; + size_t input_scope_idx = 0; + test_op.InitCtxOnGpu(false); + test_op.InitGatherOp(input_scope_idx); + test_op.TestGatherSelectedRows(input_scope_idx); +} +#endif +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index e7a0cb678ebfd8a3fe5f873e995b63b0857e5ba4..5a95cbc53625888bac539f91af391ff0babec17b 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/details/multi_devices_graph_builder.h" #include "paddle/fluid/framework/details/computation_op_handle.h" #include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h" +#include "paddle/fluid/framework/details/send_op_handle.h" #include "paddle/fluid/framework/scope.h" #ifdef PADDLE_WITH_CUDA @@ -54,6 +55,27 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( } } +void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, + const OpDesc &op, + const platform::Place &p, + const size_t &i) const { + auto *op_handle = result->ops_.back().get(); + op_handle->dev_ctxes_[p] = platform::DeviceContextPool::Instance().Get(p); + + auto var_names = op.InputArgumentNames(); + + for (auto &each_var_name : var_names) { + VarHandle *var = CreateOrGetLatestVarHandle(result, each_var_name, p, i); + op_handle->AddInput(var); + } + + var_names = op.OutputArgumentNames(); + + for (auto &each_var_name : var_names) { + CreateOpOutput(result, op_handle, each_var_name, p, i); + } +} + std::unique_ptr MultiDevSSAGraphBuilder::Build( const ProgramDesc &program) const { auto graph = new SSAGraph(); @@ -76,27 +98,28 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( } } + // append send op if program is distributed trainer main program. + // always use the first device + if (!is_forwarding && op->Type() == "send") { + auto &p = places_[0]; + auto *s = local_scopes_[0]; + // FIXME(wuyi): send op always copy from GPU 0 + result.ops_.emplace_back(new SendOpHandle(*op, s, p)); + // Create inputs for output on original place and no ssa output + // is created for send op. + CreateOpHandleIOs(&result, *op, p, 0); + continue; + } + for (size_t i = 0; i < places_.size(); ++i) { auto &p = places_[i]; auto *s = local_scopes_[i]; result.ops_.emplace_back(new ComputationOpHandle(*op, s, p)); auto *op_handle = result.ops_.back().get(); - op_handle->dev_ctxes_[p] = const_cast( - platform::DeviceContextPool::Instance().Get(p)); + CreateOpHandleIOs(&result, *op, p, i); - auto var_names = op->InputArgumentNames(); - - for (auto &each_var_name : var_names) { - VarHandle *var = - CreateOrGetLatestVarHandle(&result, each_var_name, p, i); - op_handle->AddInput(var); - } - var_names = op->OutputArgumentNames(); - - for (auto &each_var_name : var_names) { - CreateOpOutput(&result, op_handle, each_var_name, p, i); - } + auto var_names = op->OutputArgumentNames(); if (is_forwarding) { if (var_names.size() == 1 && var_names[0] == loss_var_name_) { diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.h b/paddle/fluid/framework/details/multi_devices_graph_builder.h index d3c8e582cf2cdf26198822e4bd2602883622df21..f1518d75b421006db6311c3b0f602e47000ab381 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.h +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.h @@ -14,6 +14,9 @@ #pragma once +#include +#include + #include "paddle/fluid/framework/details/ssa_graph_builder.h" namespace paddle { @@ -41,6 +44,10 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { std::unique_ptr Build(const ProgramDesc &program) const override; + private: + void CreateOpHandleIOs(SSAGraph *result, const OpDesc &op, + const platform::Place &p, const size_t &i) const; + private: std::string loss_var_name_; const std::vector &places_; diff --git a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc index 55b5f113589e090386d287e228349f22fb94a7ab..1e48f75958a3ada4d1cd5c8d0f920da4fed2157e 100644 --- a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc @@ -14,6 +14,8 @@ #include "paddle/fluid/framework/details/nccl_all_reduce_op_handle.h" +#include + namespace paddle { namespace framework { namespace details { @@ -27,6 +29,32 @@ NCCLAllReduceOpHandle::NCCLAllReduceOpHandle( } } +struct ReduceLoDTensor { + const std::vector &src_tensors_; + LoDTensor &dst_tensor_; + + ReduceLoDTensor(const std::vector &src, LoDTensor *dst) + : src_tensors_(src), dst_tensor_(*dst) {} + + template + void operator()() const { + PADDLE_ENFORCE(!src_tensors_.empty()); + auto &t0 = src_tensors_[0]; + PADDLE_ENFORCE_NE(t0.numel(), 0); + dst_tensor_.Resize(t0.dims()); + T *dst = dst_tensor_.mutable_data(platform::CPUPlace()); + std::copy(t0.data(), t0.data() + t0.numel(), dst); + + for (size_t i = 1; i < src_tensors_.size(); ++i) { + auto &t = src_tensors_[i]; + PADDLE_ENFORCE_EQ(t.dims(), t0.dims()); + PADDLE_ENFORCE_EQ(t.type(), t0.type()); + std::transform(t.data(), t.data() + t.numel(), dst, dst, + [](T a, T b) -> T { return a + b; }); + } + } +}; + void NCCLAllReduceOpHandle::RunImpl() { if (inputs_.size() == 1) { return; // No need to all reduce when GPU count = 1; @@ -41,37 +69,66 @@ void NCCLAllReduceOpHandle::RunImpl() { int dtype = -1; size_t numel = 0; - std::vector> all_reduce_calls; + std::vector lod_tensors; for (size_t i = 0; i < local_scopes_.size(); ++i) { - auto &p = places_[i]; auto *s = local_scopes_[i]; - int dev_id = boost::get(p).device; auto &lod_tensor = s->FindVar(var_name)->Get(); - void *buffer = const_cast(lod_tensor.data()); + lod_tensors.emplace_back(lod_tensor); + } - if (dtype == -1) { - dtype = platform::ToNCCLDataType(lod_tensor.type()); - } + if (platform::is_gpu_place(lod_tensors[0].place())) { + std::vector> all_reduce_calls; + for (size_t i = 0; i < local_scopes_.size(); ++i) { + auto &p = places_[i]; + auto &lod_tensor = lod_tensors[i]; + void *buffer = const_cast(lod_tensor.data()); - if (numel == 0) { - numel = static_cast(lod_tensor.numel()); - } + if (dtype == -1) { + dtype = platform::ToNCCLDataType(lod_tensor.type()); + } - auto &nccl_ctx = nccl_ctxs_.at(dev_id); - auto stream = nccl_ctx.stream(); - auto comm = nccl_ctx.comm_; - all_reduce_calls.emplace_back([=] { - PADDLE_ENFORCE(platform::dynload::ncclAllReduce( - buffer, buffer, numel, static_cast(dtype), ncclSum, - comm, stream)); + if (numel == 0) { + numel = static_cast(lod_tensor.numel()); + } + + int dev_id = boost::get(p).device; + auto &nccl_ctx = nccl_ctxs_.at(dev_id); + auto stream = nccl_ctx.stream(); + auto comm = nccl_ctx.comm_; + all_reduce_calls.emplace_back([=] { + PADDLE_ENFORCE(platform::dynload::ncclAllReduce( + buffer, buffer, numel, static_cast(dtype), + ncclSum, comm, stream)); + }); + } + this->RunAndRecordEvent([&] { + platform::NCCLGroupGuard guard; + for (auto &call : all_reduce_calls) { + call(); + } }); - } + } else { // Special handle CPU only Operator's gradient. Like CRF + auto &trg = + *this->local_scopes_[0]->Var()->GetMutable(); + + // Reduce All Tensor to trg in CPU + ReduceLoDTensor func(lod_tensors, &trg); + VisitDataType(ToDataType(lod_tensors[0].type()), func); - platform::NCCLGroupGuard guard; - for (auto &call : all_reduce_calls) { - call(); + for (size_t i = 0; i < local_scopes_.size(); ++i) { + auto &scope = local_scopes_[i]; + auto &p = places_[i]; + auto *var = scope->FindVar(var_name); + auto *dev_ctx = dev_ctxes_[p]; + + RunAndRecordEvent(p, [&trg, var, dev_ctx, p] { + auto &tensor_gpu = *var->GetMutable(); + auto &tensor_cpu = trg; + TensorCopy(tensor_cpu, p, *dev_ctx, &tensor_gpu); + }); + } } } } diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index e4194a7442f677ec8970dbc387bb01ebbbf579f1..534d77860f87be08c8834efd373d90eb199ed6a2 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -54,17 +54,6 @@ void OpHandleBase::Run(bool use_event) { #endif RunImpl(); - -#ifdef PADDLE_WITH_CUDA - if (use_event) { - for (auto &p : dev_ctxes_) { - int dev_id = boost::get(p.first).device; - auto stream = - static_cast(p.second)->stream(); - PADDLE_ENFORCE(cudaEventRecord(events_.at(dev_id), stream)); - } - } -#endif } void OpHandleBase::Wait(platform::DeviceContext *waited_dev) { @@ -97,6 +86,43 @@ void OpHandleBase::AddOutput(VarHandleBase *out) { out->generated_op_ = this; } +void OpHandleBase::RunAndRecordEvent(const std::function &callback) { +#ifdef PADDLE_WITH_CUDA + if (!events_.empty()) { // Use event + std::function method = callback; + + for (auto &p : dev_ctxes_) { + method = [method, p, this]() { + static_cast(p.second)->RecordEvent( + events_.at(boost::get(p.first).device), + method); + }; + } + method(); + } else { +#endif + callback(); +#ifdef PADDLE_WITH_CUDA + } +#endif +} + +void OpHandleBase::RunAndRecordEvent(platform::Place p, + const std::function &callback) { +#ifdef PADDLE_WITH_CUDA + if (platform::is_cpu_place(p) || events_.empty()) { + callback(); + } else { + auto *ctx = dev_ctxes_.at(p); + auto *cuda_ctx = static_cast(ctx); + cuda_ctx->RecordEvent(events_.at(boost::get(p).device), + callback); + } +#else + callback(); +#endif +} + } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index d7a541ac4bb83625060db337446d03a1afda3ed0..a9a6c8d39cf8741f7d9c91579a650ad742cec381 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -24,6 +24,8 @@ namespace paddle { namespace framework { namespace details { +constexpr char kLocalExecScopeName[] = "@LCOAL_SCOPE@"; + class OpHandleBase { private: DISABLE_COPY_AND_ASSIGN(OpHandleBase); @@ -60,6 +62,11 @@ class OpHandleBase { virtual bool IsMultiDeviceTransfer() { return false; } protected: + void RunAndRecordEvent(const std::function &callback); + + void RunAndRecordEvent(platform::Place p, + const std::function &callback); + virtual void RunImpl() = 0; }; diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index 0a6f6129b812ca84db7573957b1ee0a32c1ef5c4..7fb9f99a8a1bc044e2f25f373265a5ec9f7d76d5 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -14,6 +14,8 @@ #include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h" +#include + namespace paddle { namespace framework { namespace details { @@ -37,11 +39,13 @@ void ScaleLossGradOpHandle::RunImpl() { *tmp = coeff_; } else { #ifdef PADDLE_WITH_CUDA - auto stream = - static_cast(this->dev_ctxes_[place_]) - ->stream(); - memory::Copy(boost::get(place_), tmp, - platform::CPUPlace(), &coeff_, sizeof(float), stream); + this->RunAndRecordEvent([&] { + auto stream = + static_cast(this->dev_ctxes_[place_]) + ->stream(); + memory::Copy(boost::get(place_), tmp, + platform::CPUPlace(), &coeff_, sizeof(float), stream); + }); #endif } } diff --git a/paddle/fluid/framework/details/send_op_handle.cc b/paddle/fluid/framework/details/send_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..549b9d9abbe5bfd17df3509e0442bfa19b7ecd61 --- /dev/null +++ b/paddle/fluid/framework/details/send_op_handle.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/send_op_handle.h" + +namespace paddle { +namespace framework { +namespace details { + +SendOpHandle::SendOpHandle(const framework::OpDesc &op_desc, + const Scope *local_scope, + const platform::Place &place) + : op_(framework::OpRegistry::CreateOp(op_desc)), + local_scope_(local_scope), + place_(place) {} + +void SendOpHandle::RunImpl() { + // Wait input done + for (auto *in : inputs_) { + auto &p = static_cast(in)->place_; + if (in->DebugString() == "dummy") { // HACK + continue; + } + in->generated_op_->Wait(dev_ctxes_[p]); + } + this->RunAndRecordEvent([&] { op_->Run(*local_scope_, place_); }); +} + +std::string SendOpHandle::Name() const { return "send"; } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/send_op_handle.h b/paddle/fluid/framework/details/send_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..173f9d726145aeb9e85cc0fb9056eb57bf484098 --- /dev/null +++ b/paddle/fluid/framework/details/send_op_handle.h @@ -0,0 +1,50 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/scope.h" + +namespace paddle { +namespace framework { +namespace details { + +struct SendOpHandle : public OpHandleBase { + std::unique_ptr op_; + const Scope* local_scope_; + const platform::Place& place_; + + SendOpHandle(const framework::OpDesc& op_desc, const Scope* local_scope, + const platform::Place& place); + + std::string Name() const override; + + // Delay and buffer nccl_all_reduce together can significantly increase + // performance. Disable this feature by returning false. + bool IsMultiDeviceTransfer() override { return false; }; + + protected: + void RunImpl() override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph_executor.h b/paddle/fluid/framework/details/ssa_graph_executor.h index 3b818b1a45b56351e34f9e52ec22b6d02a0c1591..a8833b7388ab907020a260d356f1484ffd227658 100644 --- a/paddle/fluid/framework/details/ssa_graph_executor.h +++ b/paddle/fluid/framework/details/ssa_graph_executor.h @@ -15,13 +15,15 @@ #pragma once #include +#include +#include + #include "paddle/fluid/framework/details/ssa_graph.h" #include "paddle/fluid/framework/feed_fetch_type.h" namespace paddle { namespace framework { namespace details { - class SSAGraphExecutor { DISABLE_COPY_AND_ASSIGN(SSAGraphExecutor); diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 62af4c1d79ded5eaa30e4e6d43cc0d7327ae9689..a371ee10fe03cda86c316f3503f9cadb8c716ae5 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -136,12 +136,6 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( ready_ops.clear(); }; - // Create local scopes. - for (auto &scope : local_scopes_) { - auto &local_scope = scope->NewScope(); - *scope->Var("@TMP_SCOPE@")->GetMutable() = &local_scope; - } - // Step 3. Execution while (!pending_vars.empty() || !ready_ops.empty() || !delayed_ops.empty()) { // 1. Run All Ready ops @@ -189,34 +183,10 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( PADDLE_ENFORCE(ready_ops.empty()); PADDLE_ENFORCE(delayed_ops.empty()); PADDLE_ENFORCE(blocked_by_delayed_ops.empty()); - ++computation_count_; - - auto sync_computation = [&] { - computation_count_ = 0; - // Wait All computational streams - for (auto p : this->places_) { - platform::DeviceContextPool::Instance().Get(p)->Wait(); - } - for (auto &scope : local_scopes_) { - scope->DropKids(); - } - }; // Wait FetchOps. if (!fetch_ops.empty()) { fetch_ops.clear(); - sync_computation(); - } - - if (computation_count_ == max_async_computation) { - sync_computation(); - } - - // NOTE: the temp scope can be dropped lazily if needed. - // Drop tmp scopes; - for (auto &scope : local_scopes_) { - auto &kid = *scope->Var("@TMP_SCOPE@")->GetMutable(); - kid = nullptr; } return fetch_data; @@ -226,10 +196,12 @@ void ThreadedSSAGraphExecutor::RunOp( BlockingQueue *ready_var_q, details::OpHandleBase *op) { auto op_run = [ready_var_q, op, this] { try { - VLOG(10) << op->Name() << " : " << op->DebugString(); + VLOG(10) << op << " " << op->Name() << " : " << op->DebugString(); op->Run(use_event_); + VLOG(10) << op << " " << op->Name() << " Done "; running_ops_--; ready_var_q->Extend(op->outputs_); + VLOG(10) << op << " " << op->Name() << "Signal posted"; } catch (platform::EnforceNotMet ex) { exception_.reset(new platform::EnforceNotMet(ex)); } catch (...) { diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index 79cfc26b461a39811a9a125e5aeac3492d967386..bb5e837b135c35b5aea403496b45aab1ccc288ff 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -99,9 +99,6 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { std::unique_ptr exception_; std::atomic running_ops_; bool allow_op_delay_; - - size_t computation_count_{0}; - size_t max_async_computation{100}; }; } // namespace details diff --git a/paddle/fluid/framework/details/var_handle.h b/paddle/fluid/framework/details/var_handle.h index 569dda17c6e91d5658c4f8b9ba0b8c8fbd966832..871e41343f53b801a22d3a450f0906f37fb372d1 100644 --- a/paddle/fluid/framework/details/var_handle.h +++ b/paddle/fluid/framework/details/var_handle.h @@ -50,6 +50,7 @@ struct VarHandle : public VarHandleBase { // version field currently is not used, however, just store the version to // debug easily. size_t version_; + size_t scope_idx_; std::string name_; platform::Place place_; }; diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index c2ca1bbc78f3ebc6066df6b666720af0d1fbbf59..513e720fd099bcd898a6c73afd1a3a16f6f53aab 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -83,8 +83,8 @@ static void CheckTensorNANOrInf(const std::string& name, if (tensor.memory_size() == 0) { return; } - if (tensor.type().hash_code() != typeid(float).hash_code() && - tensor.type().hash_code() != typeid(double).hash_code()) { + if (tensor.type().hash_code() != typeid(float).hash_code() && // NOLINT + tensor.type().hash_code() != typeid(double).hash_code()) { // NOLINT return; } PADDLE_ENFORCE(!framework::TensorContainsInf(tensor), @@ -145,12 +145,13 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, // Return true if the block has feed operators and holder of matching info. static bool has_feed_operators( const BlockDesc& block, - std::map& feed_targets, + const std::map& feed_targets, const std::string& feed_holder_name) { size_t feed_count = 0; for (auto* op : block.AllOps()) { if (op->Type() == kFeedOpType) { feed_count++; + // The input variable's name of feed_op should be feed_holder_name. PADDLE_ENFORCE_EQ(op->Input("X")[0], feed_holder_name, "Input to feed op should be '%s'", feed_holder_name); std::string feed_target_name = op->Output("Out")[0]; @@ -166,13 +167,15 @@ static bool has_feed_operators( feed_count, feed_targets.size(), "The number of feed operators should match 'feed_targets'"); - // When feed operator are present, so should be feed_holder - auto var = block.FindVar(feed_holder_name); - PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", - feed_holder_name); - PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FEED_MINIBATCH, - "'%s' variable should be 'FEED_MINIBATCH' type", - feed_holder_name); + if (!feed_holder_name.empty()) { + // When feed operator are present, so should be feed_holder. + auto var = block.FindVar(feed_holder_name); + PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", + feed_holder_name); + PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FEED_MINIBATCH, + "'%s' variable should be 'FEED_MINIBATCH' type", + feed_holder_name); + } } return feed_count > 0; @@ -185,12 +188,14 @@ static bool has_feed_operators( // and fetch_holder_name. Raise exception when any mismatch is found. // Return true if the block has fetch operators and holder of matching info. static bool has_fetch_operators( - const BlockDesc& block, std::map& fetch_targets, + const BlockDesc& block, + const std::map& fetch_targets, const std::string& fetch_holder_name) { size_t fetch_count = 0; for (auto* op : block.AllOps()) { if (op->Type() == kFetchOpType) { fetch_count++; + // The output variable's name of fetch_op should be fetch_holder_name. PADDLE_ENFORCE_EQ(op->Output("Out")[0], fetch_holder_name, "Output of fetch op should be '%s'", fetch_holder_name); std::string fetch_target_name = op->Input("X")[0]; @@ -206,13 +211,15 @@ static bool has_fetch_operators( fetch_count, fetch_targets.size(), "The number of fetch operators should match 'fetch_targets'"); - // When fetch operator are present, so should be fetch_holder - auto var = block.FindVar(fetch_holder_name); - PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", - fetch_holder_name); - PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FETCH_LIST, - "'%s' variable should be 'FETCH_LIST' type", - fetch_holder_name); + if (!fetch_holder_name.empty()) { + // When fetch operator are present, so should be fetch_holder. + auto var = block.FindVar(fetch_holder_name); + PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", + fetch_holder_name); + PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FETCH_LIST, + "'%s' variable should be 'FETCH_LIST' type", + fetch_holder_name); + } } return fetch_count > 0; @@ -259,16 +266,6 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, } } - // map the data of feed_targets to feed_holder - for (auto* op : global_block->AllOps()) { - if (op->Type() == kFeedOpType) { - std::string feed_target_name = op->Output("Out")[0]; - int idx = boost::get(op->GetAttr("col")); - SetFeedVariable(scope, *feed_targets[feed_target_name], feed_holder_name, - idx); - } - } - if (!has_fetch_ops) { // create fetch_holder variable auto* fetch_holder = global_block->Var(fetch_holder_name); @@ -292,17 +289,9 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, } } - Run(*copy_program, scope, 0, create_vars, create_vars); - - // obtain the data of fetch_targets from fetch_holder - for (auto* op : global_block->AllOps()) { - if (op->Type() == kFetchOpType) { - std::string fetch_target_name = op->Input("X")[0]; - int idx = boost::get(op->GetAttr("col")); - *fetch_targets[fetch_target_name] = - GetFetchVariable(*scope, fetch_holder_name, idx); - } - } + auto ctx = Prepare(*copy_program, 0); + RunPreparedContext(ctx.get(), scope, feed_targets, fetch_targets, create_vars, + feed_holder_name, fetch_holder_name); } std::unique_ptr Executor::Prepare( @@ -370,5 +359,42 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, } } +void Executor::RunPreparedContext( + ExecutorPrepareContext* ctx, Scope* scope, + std::map& feed_targets, + std::map& fetch_targets, bool create_vars, + const std::string& feed_holder_name, const std::string& fetch_holder_name) { + auto& global_block = ctx->prog_.Block(ctx->block_id_); + + PADDLE_ENFORCE( + has_feed_operators(global_block, feed_targets, feed_holder_name), + "Program in ExecutorPrepareContext should has feed_ops."); + PADDLE_ENFORCE( + has_fetch_operators(global_block, fetch_targets, fetch_holder_name), + "Program in the prepared context should has fetch_ops."); + + // map the data of feed_targets to feed_holder + for (auto* op : global_block.AllOps()) { + if (op->Type() == kFeedOpType) { + std::string feed_target_name = op->Output("Out")[0]; + int idx = boost::get(op->GetAttr("col")); + SetFeedVariable(scope, *feed_targets[feed_target_name], feed_holder_name, + idx); + } + } + + RunPreparedContext(ctx, scope, create_vars, create_vars); + + // obtain the data of fetch_targets from fetch_holder + for (auto* op : global_block.AllOps()) { + if (op->Type() == kFetchOpType) { + std::string fetch_target_name = op->Input("X")[0]; + int idx = boost::get(op->GetAttr("col")); + *fetch_targets[fetch_target_name] = + GetFetchVariable(*scope, fetch_holder_name, idx); + } + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 75b29b2f4065ad75b62a134b890b8f9f6730fdc7..43defdacf2a1c2f59cf3af2461ae6cfc4c61f5be 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -14,6 +14,9 @@ limitations under the License. */ #pragma once +#include +#include +#include #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" @@ -70,6 +73,13 @@ class Executor { bool create_local_scope = true, bool create_vars = true); + void RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, + std::map& feed_targets, + std::map& fetch_targets, + bool create_vars = true, + const std::string& feed_holder_name = "feed", + const std::string& fetch_holder_name = "fetch"); + private: const platform::Place place_; }; diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index 8155cb55a468a09320b1196b49fc3e34cea261b1..a56674cbe216e312c4394ef537140122352dc785 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -12,9 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/framework/lod_tensor.h" +#include +#include +#include +#include + #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memory.h" @@ -22,11 +27,6 @@ limitations under the License. */ #include "paddle/fluid/recordio/scanner.h" #include "paddle/fluid/recordio/writer.h" -#include -#include -#include -#include - namespace paddle { namespace framework { @@ -294,7 +294,7 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor, TensorFromStream(is, static_cast(tensor), dev_ctx); } -void WriteToRecordIO(recordio::Writer &writer, +void WriteToRecordIO(recordio::Writer *writer, const std::vector &tensor, const platform::DeviceContext &dev_ctx) { std::stringstream buffer; @@ -303,18 +303,20 @@ void WriteToRecordIO(recordio::Writer &writer, for (auto &each : tensor) { SerializeToStream(buffer, each, dev_ctx); } - writer.Write(buffer.str()); + writer->Write(buffer.str()); } std::vector ReadFromRecordIO( - recordio::Scanner &scanner, const platform::DeviceContext &dev_ctx) { - std::istringstream sin(scanner.Next()); - uint32_t sz; - sin.read(reinterpret_cast(&sz), sizeof(uint32_t)); + recordio::Scanner *scanner, const platform::DeviceContext &dev_ctx) { std::vector result; - result.resize(sz); - for (uint32_t i = 0; i < sz; ++i) { - DeserializeFromStream(sin, &result[i], dev_ctx); + if (scanner->HasNext()) { + std::istringstream sin(scanner->Next()); + uint32_t sz; + sin.read(reinterpret_cast(&sz), sizeof(uint32_t)); + result.resize(sz); + for (uint32_t i = 0; i < sz; ++i) { + DeserializeFromStream(sin, &result[i], dev_ctx); + } } return result; } diff --git a/paddle/fluid/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h index 4f130d265900483ec7a7c541f2610d17a352913f..1159fee39b0737402c60448dcbe69e7535c9d6e1 100644 --- a/paddle/fluid/framework/lod_tensor.h +++ b/paddle/fluid/framework/lod_tensor.h @@ -15,6 +15,9 @@ limitations under the License. */ #pragma once #include +#include +#include +#include #ifdef PADDLE_WITH_CUDA #include #include @@ -216,12 +219,12 @@ void SerializeToStream(std::ostream& os, const LoDTensor& tensor, void DeserializeFromStream(std::istream& is, LoDTensor* tensor, const platform::DeviceContext& dev_ctx); -extern void WriteToRecordIO(recordio::Writer& writer, +extern void WriteToRecordIO(recordio::Writer* writer, const std::vector& tensor, const platform::DeviceContext& dev_ctx); extern std::vector ReadFromRecordIO( - recordio::Scanner& scanner, const platform::DeviceContext& dev_ctx); + recordio::Scanner* scanner, const platform::DeviceContext& dev_ctx); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/lod_tensor_test.cc b/paddle/fluid/framework/lod_tensor_test.cc index e691e29383d4842b80769021e0e494967d38e9bb..97ab98f09b1a902a942d9667bc7716a28b98d54c 100644 --- a/paddle/fluid/framework/lod_tensor_test.cc +++ b/paddle/fluid/framework/lod_tensor_test.cc @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/lod_tensor.h" - -#include "paddle/fluid/recordio/scanner.h" -#include "paddle/fluid/recordio/writer.h" - #include #include #include #include #include +#include "paddle/fluid/framework/lod_tensor.h" + +#include "paddle/fluid/recordio/scanner.h" +#include "paddle/fluid/recordio/writer.h" + namespace paddle { namespace framework { @@ -240,8 +240,8 @@ TEST(LoDTensor, RecordIO) { *platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); { recordio::Writer writer(stream, recordio::Compressor::kSnappy); - WriteToRecordIO(writer, {tensor, tensor}, ctx); - WriteToRecordIO(writer, {tensor, tensor}, ctx); + WriteToRecordIO(&writer, {tensor, tensor}, ctx); + WriteToRecordIO(&writer, {tensor, tensor}, ctx); writer.Flush(); } @@ -254,11 +254,11 @@ TEST(LoDTensor, RecordIO) { { std::unique_ptr stream_ptr(stream); recordio::Scanner scanner(std::move(stream_ptr)); - auto tensors = ReadFromRecordIO(scanner, ctx); + auto tensors = ReadFromRecordIO(&scanner, ctx); ASSERT_EQ(tensors.size(), 2); assert_tensor_ok(tensors[0]); assert_tensor_ok(tensors[1]); - tensors = ReadFromRecordIO(scanner, ctx); + tensors = ReadFromRecordIO(&scanner, ctx); ASSERT_EQ(tensors.size(), 2); assert_tensor_ok(tensors[0]); assert_tensor_ok(tensors[1]); diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index a3b4a8c0829ae3324e933309b2eaea35fe571997..f97bd0827428feeb590fcad16c48f3461517a646 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -46,7 +46,8 @@ proto::VarType::Type GetDataTypeOfVar(const Variable* var) { } } -static DDim GetDims(const Scope& scope, const std::string& name) { +static DDim GetDims(const Scope& scope, const std::string& name, + bool get_actual_dim = false) { Variable* var = scope.FindVar(name); if (var == nullptr) { return DDim({-1}); @@ -55,7 +56,11 @@ static DDim GetDims(const Scope& scope, const std::string& name) { if (var->IsType()) { return var->Get().dims(); } else if (var->IsType()) { - return var->Get().GetCompleteDims(); + if (get_actual_dim) { + return var->Get().value().dims(); + } else { + return var->Get().GetCompleteDims(); + } } else { return DDim({-1}); } @@ -129,7 +134,7 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { for (size_t i = 0; i < input.second.size(); ++i) { ss << input.second[i]; if (scope) { - ss << "[" << GetDims(*scope, input.second[i]) << "]"; + ss << "[" << GetDims(*scope, input.second[i], true) << "]"; ss << "(" << GetLoD(*scope, input.second[i]) << ")"; } if (i != input.second.size() - 1) { @@ -149,7 +154,7 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { for (size_t i = 0; i < output.second.size(); ++i) { ss << output.second[i]; if (scope) { - ss << "[" << GetDims(*scope, output.second[i]) << "]"; + ss << "[" << GetDims(*scope, output.second[i], true) << "]"; ss << "(" << GetLoD(*scope, output.second[i]) << ")"; } if (i != output.second.size() - 1) { diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 99b3065d8df80bd8f482546ef06f6305f5875b9e..c1486b527d2e06d2b3f7e0f89458bf9a22564586 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/parallel_executor.h" #include +#include #include #ifdef PADDLE_WITH_CUDA @@ -41,6 +42,8 @@ class ParallelExecutorPrivate { #ifdef PADDLE_WITH_CUDA std::unique_ptr nccl_ctxs_; #endif + + std::vector> var_types_; }; std::vector &ParallelExecutor::GetLocalScopes() { @@ -97,14 +100,9 @@ ParallelExecutor::ParallelExecutor( allow_op_delay)); // Step 3. Create vars in each scope; - for (auto *scope : member_->local_scopes_) { - for (auto *var : main_program.Block(0).AllVars()) { - if (scope->FindVar(var->Name()) != nullptr) { - continue; - } - - InitializeVariable(scope->Var(var->Name()), var->GetType()); - } + for (auto *var : main_program.Block(0).AllVars()) { + member_->var_types_.emplace_back(var->Name(), var->GetType(), + var->Persistable()); } } @@ -115,14 +113,12 @@ void ParallelExecutor::BCastParamsToGPUs( for (auto &var : vars) { auto *main_var = main_scope->FindVar(var); - if (!main_var->IsType()) { + if (main_var == nullptr || !main_var->IsType()) { continue; } auto &main_tensor = main_var->Get(); - auto &dims = main_tensor.dims(); - if (paddle::platform::is_gpu_place(main_tensor.place())) { size_t numel = main_tensor.numel(); ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type()); @@ -165,9 +161,42 @@ void ParallelExecutor::Run( const std::unordered_map &feed_tensors) { platform::RecordBlock b(0); SplitTensorToPlaces(feed_tensors); + + // Create local scopes. + for (auto &scope : member_->local_scopes_) { + Scope &local_scope = scope->NewScope(); + *scope->Var(details::kLocalExecScopeName)->GetMutable() = + &local_scope; + + for (auto &name_type_pair : member_->var_types_) { + if (scope->FindVar(std::get<0>(name_type_pair)) != nullptr) { + continue; + } + + if (std::get<2>(name_type_pair)) { // Persistable + InitializeVariable(scope->Var(std::get<0>(name_type_pair)), + std::get<1>(name_type_pair)); + } else { + InitializeVariable(scope->Var(std::get<0>(name_type_pair)), + std::get<1>(name_type_pair)); + } + } + } + auto fetch_data = member_->executor_->Run(fetch_tensors); *member_->global_scope_->Var(fetched_var_name)->GetMutable() = fetch_data; + + // Wait All computational streams + for (auto p : member_->places_) { + platform::DeviceContextPool::Instance().Get(p)->Wait(); + } + for (auto &scope : member_->local_scopes_) { + auto &local_scope = + *scope->Var(details::kLocalExecScopeName)->GetMutable(); + scope->DeleteScope(local_scope); + local_scope = nullptr; + } } void ParallelExecutor::SplitTensorToPlaces( @@ -181,10 +210,10 @@ void ParallelExecutor::SplitTensorToPlaces( member_->places_.size(), lod_tensors.size()); for (size_t j = 0; j < member_->places_.size(); ++j) { // TODO(panxy0718): Do I need to delete this var? - member_->local_scopes_[j] - ->Var(it.first) - ->GetMutable() - ->ShareDataWith(lod_tensors[j]); + auto t = + member_->local_scopes_[j]->Var(it.first)->GetMutable(); + t->ShareDataWith(lod_tensors[j]); + t->set_lod(lod_tensors[j].lod()); } } } diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index c048c3865f14822be4a0015e385ea1b8e05d0ced..b4f16dba858fb279ec23a8a04257dda6651148cc 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -48,13 +48,13 @@ class ParallelExecutor { const std::string& fetched_var_name, const std::unordered_map& feed_tensors); + void BCastParamsToGPUs(const std::unordered_set& vars) const; + private: void SplitTensorToPlaces( const std::unordered_map& feed_tensors); ParallelExecutorPrivate* member_; - - void BCastParamsToGPUs(const std::unordered_set& vars) const; }; } // namespace framework diff --git a/paddle/fluid/framework/program_desc_test.cc b/paddle/fluid/framework/program_desc_test.cc index 66618a291b59996836e822587af618927a4263c7..6c46e9aad5b7fbf67fdcc07a12e7932ac8b6412b 100644 --- a/paddle/fluid/framework/program_desc_test.cc +++ b/paddle/fluid/framework/program_desc_test.cc @@ -66,7 +66,7 @@ TEST(ProgramDesc, copy_ctor) { for (size_t i = 0; i < global_block->OpSize(); ++i) { auto op_origin = global_block->Op(i); - auto op_copy = global_block->Op(i); + auto op_copy = global_block_copy->Op(i); ASSERT_EQ(op_origin->Type(), op_copy->Type()); ASSERT_EQ(op_origin->Inputs(), op_copy->Inputs()); @@ -131,7 +131,7 @@ TEST(ProgramDescBind, serialize_and_deserialize) { for (size_t i = 0; i < global_block->OpSize(); ++i) { auto op_origin = global_block->Op(i); - auto op_restored = global_block->Op(i); + auto op_restored = global_block_restored->Op(i); ASSERT_EQ(op_origin->Type(), op_restored->Type()); ASSERT_EQ(op_origin->Inputs(), op_restored->Inputs()); diff --git a/paddle/fluid/framework/prune_test.cc b/paddle/fluid/framework/prune_test.cc index 0e44b34383027ef58a033eb082f4bb2118b5d8a3..8af7d2d510d36e4c24ce3ae8dbc13c24ad5d4a0f 100644 --- a/paddle/fluid/framework/prune_test.cc +++ b/paddle/fluid/framework/prune_test.cc @@ -14,18 +14,17 @@ limitations under the License. */ #include "paddle/fluid/framework/prune.h" +#include +#include + #include "paddle/fluid/framework/attribute.h" #include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/operators/net_op.h" #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/program_desc.h" -#include - namespace f = paddle::framework; -namespace ops = paddle::operators; void AddOp(const std::string &type, const f::VariableNameMap &inputs, const f::VariableNameMap &outputs, f::AttributeMap attrs, diff --git a/paddle/fluid/framework/reader.cc b/paddle/fluid/framework/reader.cc index 56bf00e5f91700f0cffa917aad8608caaab0a7fe..76126f3dc64d71770d13f9d66bb30f176c112629 100644 --- a/paddle/fluid/framework/reader.cc +++ b/paddle/fluid/framework/reader.cc @@ -22,7 +22,9 @@ FileReader::FileReader(const std::vector &dims) : dims_(dims) {} void FileReader::ReadNext(std::vector *out) { ReadNextImpl(out); - PADDLE_ENFORCE_EQ(out->size(), dims_.size()); + if (out->empty()) { + return; + } for (size_t i = 0; i < dims_.size(); ++i) { auto &actual = out->at(i).dims(); auto &expect = dims_[i]; diff --git a/paddle/fluid/framework/reader.h b/paddle/fluid/framework/reader.h index 3573b99becf6d657c680c5fec0bda4bdde5dd7a2..3a413941df964c8d9454fafc6030c377c10f9fb1 100644 --- a/paddle/fluid/framework/reader.h +++ b/paddle/fluid/framework/reader.h @@ -14,14 +14,13 @@ #pragma once +#include +#include + #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/platform/place.h" -#include -#include -#include - namespace paddle { namespace framework { @@ -31,8 +30,6 @@ class ReaderBase { virtual void ReInit() = 0; - virtual bool HasNext() const = 0; - virtual ~ReaderBase(); }; @@ -44,8 +41,6 @@ class DecoratedReader : public ReaderBase { void ReInit() override { reader_->ReInit(); } - bool HasNext() const override { return reader_->HasNext(); } - protected: ReaderBase* reader_; }; @@ -80,8 +75,6 @@ class ReaderHolder { reader_->ReInit(); } - bool HasNext() const { return reader_->HasNext(); } - private: std::unique_ptr reader_; }; diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index 1d864af011bced9df188147ec436b8de12947ba9..d1b01ae05b808b229309e9689165483a11530c84 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -11,8 +11,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/framework/tensor_util.h" +#include +#include +#include namespace paddle { namespace framework { @@ -65,8 +67,6 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto dst_gpu_place = boost::get(dst_place); auto ctx_place = ctx.GetPlace(); PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); - auto ctx_gpu_place = boost::get(ctx_place); - PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); memory::Copy( dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, reinterpret_cast(ctx).stream()); diff --git a/paddle/fluid/framework/threadpool.cc b/paddle/fluid/framework/threadpool.cc index 9854d618d2b29ed123833f55198179638c95d6db..f26f212d4d5793b88fd1e6d782cdf983bf341879 100644 --- a/paddle/fluid/framework/threadpool.cc +++ b/paddle/fluid/framework/threadpool.cc @@ -14,8 +14,12 @@ #include "paddle/fluid/framework/threadpool.h" +#include "gflags/gflags.h" #include "paddle/fluid/platform/enforce.h" +DEFINE_int32(io_threadpool_size, 100, + "number of threads used for doing IO, default 100"); + namespace paddle { namespace framework { @@ -91,5 +95,20 @@ void ThreadPool::TaskLoop() { } } +std::unique_ptr ThreadPoolIO::io_threadpool_(nullptr); +std::once_flag ThreadPoolIO::io_init_flag_; + +ThreadPool* ThreadPoolIO::GetInstanceIO() { + std::call_once(io_init_flag_, &ThreadPoolIO::InitIO); + return io_threadpool_.get(); +} + +void ThreadPoolIO::InitIO() { + if (io_threadpool_.get() == nullptr) { + // TODO(typhoonzero1986): make this configurable + io_threadpool_.reset(new ThreadPool(FLAGS_io_threadpool_size)); + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/threadpool.h b/paddle/fluid/framework/threadpool.h index f9dce7105e32ff0ba03d03f8faaac3a4ed1a3595..94111ee335b1a5df327b3e46d62069b4735c54f6 100644 --- a/paddle/fluid/framework/threadpool.h +++ b/paddle/fluid/framework/threadpool.h @@ -14,12 +14,12 @@ limitations under the License. */ #pragma once -#include +#include // NOLINT #include -#include -#include +#include // NOLINT +#include // NOLINT #include -#include +#include // NOLINT #include #include "glog/logging.h" #include "paddle/fluid/platform/enforce.h" @@ -28,6 +28,22 @@ limitations under the License. */ namespace paddle { namespace framework { +struct ExceptionHandler { + mutable std::future> future_; + explicit ExceptionHandler( + std::future>&& f) + : future_(std::move(f)) {} + void operator()() const { + auto ex = this->future_.get(); + if (ex != nullptr) { + LOG(FATAL) << "The exception is thrown inside the thread pool. You " + "should use RunAndGetException to handle the exception.\n" + "The default exception handler is LOG(FATAL)." + << ex->what(); + } + } +}; + // ThreadPool maintains a queue of tasks, and runs them using a fixed // number of threads. class ThreadPool { @@ -87,22 +103,6 @@ class ThreadPool { void Wait(); private: - struct ExceptionHandler { - mutable std::future> future_; - explicit ExceptionHandler( - std::future>&& f) - : future_(std::move(f)) {} - void operator()() const { - auto ex = this->future_.get(); - if (ex != nullptr) { - LOG(FATAL) << "The exception is thrown inside the thread pool. You " - "should use RunAndGetException to handle the exception.\n" - "The default exception handler is LOG(FATAL)." - << ex->what(); - } - } - }; - DISABLE_COPY_AND_ASSIGN(ThreadPool); // If the task queue is empty and avaialbe is equal to the number of @@ -135,6 +135,17 @@ class ThreadPool { std::condition_variable completed_; }; +class ThreadPoolIO : ThreadPool { + public: + static ThreadPool* GetInstanceIO(); + static void InitIO(); + + private: + // NOTE: threadpool in base will be inhereted here. + static std::unique_ptr io_threadpool_; + static std::once_flag io_init_flag_; +}; + // Run a function asynchronously. // NOTE: The function must return void. If the function need to return a value, // you can use lambda to capture a value pointer. @@ -143,5 +154,10 @@ std::future Async(Callback callback) { return ThreadPool::GetInstance()->Run(callback); } +template +std::future AsyncIO(Callback callback) { + return ThreadPoolIO::GetInstanceIO()->Run(callback); +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index f417f62f3f75360f4ae1b7795608ae95200cfeb8..8494edee6c2c714c285c45bbb4fe1d8cb1a524aa 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -1,4 +1,4 @@ -set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor prune init) +set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor init) cc_library(paddle_fluid_api SRCS io.cc @@ -11,7 +11,7 @@ cc_library(paddle_fluid DEPS ${fluid_modules}) # Create shared library cc_library(paddle_fluid_shared SHARED SRCS io.cc - DEPS ARCHIVE_START ${GLOB_OP_LIB} ${FLUID_CORE_MODULES} ARCHIVE_END) + DEPS ${fluid_modules}) set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) if(NOT APPLE) # TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac. @@ -21,4 +21,7 @@ endif() if(WITH_TESTING) add_subdirectory(tests/book) + if (WITH_TENSORRT) + add_subdirectory(tensorrt) + endif() endif() diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index a5b62ef322bfad0fc956d7d722797bd5add6aea6..3b58019db6e55fa8198d2f77731095c6cf356266 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -17,10 +17,16 @@ limitations under the License. */ #include #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/pybind/pybind.h" namespace paddle { namespace inference { +// Temporarily add this function for exposing framework::InitDevices() when +// linking the inference shared library. +void Init(bool init_p2p) { framework::InitDevices(init_p2p); } + void ReadBinaryFile(const std::string& filename, std::string& contents) { std::ifstream fin(filename, std::ios::in | std::ios::binary); PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s", filename); diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index d07d315b93ef10a464080899b1cb9920abe83be3..756c936b33ad55e2994542b171b945e248ba2e21 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -18,12 +18,15 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" namespace paddle { namespace inference { +void Init(bool init_p2p); + void LoadPersistables(framework::Executor& executor, framework::Scope& scope, const framework::ProgramDesc& main_program, const std::string& dirname, diff --git a/paddle/fluid/inference/tensorrt/CMakeLists.txt b/paddle/fluid/inference/tensorrt/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e39c0daac76e0993382868289f66351da3d16f8f --- /dev/null +++ b/paddle/fluid/inference/tensorrt/CMakeLists.txt @@ -0,0 +1 @@ +nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) diff --git a/paddle/fluid/inference/tensorrt/test_tensorrt.cc b/paddle/fluid/inference/tensorrt/test_tensorrt.cc new file mode 100644 index 0000000000000000000000000000000000000000..a81a708e7a79225fd52c4b8e081afdcd8fe7e9ad --- /dev/null +++ b/paddle/fluid/inference/tensorrt/test_tensorrt.cc @@ -0,0 +1,155 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include +#include "NvInfer.h" +#include "cuda.h" +#include "cuda_runtime_api.h" +#include "paddle/fluid/platform/dynload/tensorrt.h" + +namespace dy = paddle::platform::dynload; + +class Logger : public nvinfer1::ILogger { + public: + void log(nvinfer1::ILogger::Severity severity, const char* msg) override { + switch (severity) { + case Severity::kINFO: + LOG(INFO) << msg; + break; + case Severity::kWARNING: + LOG(WARNING) << msg; + break; + case Severity::kINTERNAL_ERROR: + case Severity::kERROR: + LOG(ERROR) << msg; + break; + default: + break; + } + } +}; + +class ScopedWeights { + public: + ScopedWeights(float value) : value_(value) { + w.type = nvinfer1::DataType::kFLOAT; + w.values = &value_; + w.count = 1; + } + const nvinfer1::Weights& get() { return w; } + + private: + float value_; + nvinfer1::Weights w; +}; + +// The following two API are implemented in TensorRT's header file, cannot load +// from the dynamic library. So create our own implementation and directly +// trigger the method from the dynamic library. +nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger& logger) { + return static_cast( + dy::createInferBuilder_INTERNAL(&logger, NV_TENSORRT_VERSION)); +} +nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger& logger) { + return static_cast( + dy::createInferRuntime_INTERNAL(&logger, NV_TENSORRT_VERSION)); +} + +const char* kInputTensor = "input"; +const char* kOutputTensor = "output"; + +// Creates a network to compute y = 2x + 3 +nvinfer1::IHostMemory* CreateNetwork() { + Logger logger; + // Create the engine. + nvinfer1::IBuilder* builder = createInferBuilder(logger); + ScopedWeights weights(2.); + ScopedWeights bias(3.); + + nvinfer1::INetworkDefinition* network = builder->createNetwork(); + // Add the input + auto input = network->addInput(kInputTensor, nvinfer1::DataType::kFLOAT, + nvinfer1::DimsCHW{1, 1, 1}); + EXPECT_NE(input, nullptr); + // Add the hidden layer. + auto layer = network->addFullyConnected(*input, 1, weights.get(), bias.get()); + EXPECT_NE(layer, nullptr); + // Mark the output. + auto output = layer->getOutput(0); + output->setName(kOutputTensor); + network->markOutput(*output); + // Build the engine. + builder->setMaxBatchSize(1); + builder->setMaxWorkspaceSize(1 << 10); + auto engine = builder->buildCudaEngine(*network); + EXPECT_NE(engine, nullptr); + // Serialize the engine to create a model, then close. + nvinfer1::IHostMemory* model = engine->serialize(); + network->destroy(); + engine->destroy(); + builder->destroy(); + return model; +} + +void Execute(nvinfer1::IExecutionContext& context, const float* input, + float* output) { + const nvinfer1::ICudaEngine& engine = context.getEngine(); + // Two binds, input and output + ASSERT_EQ(engine.getNbBindings(), 2); + const int input_index = engine.getBindingIndex(kInputTensor); + const int output_index = engine.getBindingIndex(kOutputTensor); + // Create GPU buffers and a stream + void* buffers[2]; + ASSERT_EQ(0, cudaMalloc(&buffers[input_index], sizeof(float))); + ASSERT_EQ(0, cudaMalloc(&buffers[output_index], sizeof(float))); + cudaStream_t stream; + ASSERT_EQ(0, cudaStreamCreate(&stream)); + // Copy the input to the GPU, execute the network, and copy the output back. + ASSERT_EQ(0, cudaMemcpyAsync(buffers[input_index], input, sizeof(float), + cudaMemcpyHostToDevice, stream)); + context.enqueue(1, buffers, stream, nullptr); + ASSERT_EQ(0, cudaMemcpyAsync(output, buffers[output_index], sizeof(float), + cudaMemcpyDeviceToHost, stream)); + cudaStreamSynchronize(stream); + + // Release the stream and the buffers + cudaStreamDestroy(stream); + ASSERT_EQ(0, cudaFree(buffers[input_index])); + ASSERT_EQ(0, cudaFree(buffers[output_index])); +} + +TEST(TensorrtTest, BasicFunction) { + // Create the network serialized model. + nvinfer1::IHostMemory* model = CreateNetwork(); + + // Use the model to create an engine and an execution context. + Logger logger; + nvinfer1::IRuntime* runtime = createInferRuntime(logger); + nvinfer1::ICudaEngine* engine = + runtime->deserializeCudaEngine(model->data(), model->size(), nullptr); + model->destroy(); + nvinfer1::IExecutionContext* context = engine->createExecutionContext(); + + // Execute the network. + float input = 1234; + float output; + Execute(*context, &input, &output); + EXPECT_EQ(output, input * 2 + 3); + + // Destroy the engine. + context->destroy(); + engine->destroy(); + runtime->destroy(); +} diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index 6ed77adb9d891c75e7de358d0d7a0c06c9af96dd..97d9f03f88ad3e851a2dd4256d34e8ca76fdfb01 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -17,14 +17,15 @@ function(inference_test TARGET_NAME) string(REGEX REPLACE "^_$" "" arg "${arg}") cc_test(test_inference_${TARGET_NAME}${arg} SRCS test_inference_${TARGET_NAME}.cc - DEPS ARCHIVE_START paddle_fluid ARCHIVE_END + DEPS paddle_fluid ARGS --dirname=${PYTHON_TESTS_DIR}/book/${TARGET_NAME}${arg}.inference.model) set_tests_properties(test_inference_${TARGET_NAME}${arg} PROPERTIES DEPENDS test_${TARGET_NAME}) endforeach() endfunction(inference_test) -inference_test(fit_a_line) +# This unittest is buggy! +#inference_test(fit_a_line) inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) inference_test(recognize_digits ARGS mlp conv) diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index ca2077d07411d2cd6095e0dc2a874af0890145c5..1e6555bb02033a28dedd2a1d1962981dfcc97cc2 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -46,8 +46,8 @@ TEST(inference, image_classification) { // Run inference on CPU LOG(INFO) << "--- CPU Runs: ---"; - TestInference(dirname, cpu_feeds, - cpu_fetchs1, FLAGS_repeat); + TestInference( + dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat); LOG(INFO) << output1.dims(); #ifdef PADDLE_WITH_CUDA @@ -57,8 +57,8 @@ TEST(inference, image_classification) { // Run inference on CUDA GPU LOG(INFO) << "--- GPU Runs: ---"; - TestInference(dirname, cpu_feeds, - cpu_fetchs2, FLAGS_repeat); + TestInference( + dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat); LOG(INFO) << output2.dims(); CheckError(output1, output2); diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 064e400f0c750872ab2142c5fc8e28dd3da85b1a..c3a8d0889c6a6dd9591837ccc523da56f8d13661 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -89,7 +89,7 @@ void CheckError(const paddle::framework::LoDTensor& output1, EXPECT_EQ(count, 0U) << "There are " << count << " different elements."; } -template +template void TestInference(const std::string& dirname, const std::vector& cpu_feeds, const std::vector& cpu_fetchs, @@ -175,8 +175,15 @@ void TestInference(const std::string& dirname, } // Ignore the profiling results of the first run - executor.Run(*inference_program, scope, feed_targets, fetch_targets, - CreateVars); + std::unique_ptr ctx; + if (PrepareContext) { + ctx = executor.Prepare(*inference_program, 0); + executor.RunPreparedContext(ctx.get(), scope, feed_targets, fetch_targets, + CreateVars); + } else { + executor.Run(*inference_program, scope, feed_targets, fetch_targets, + CreateVars); + } // Enable the profiler paddle::platform::EnableProfiler(state); @@ -187,8 +194,15 @@ void TestInference(const std::string& dirname, "run_inference", paddle::platform::DeviceContextPool::Instance().Get(place)); - executor.Run(*inference_program, scope, feed_targets, fetch_targets, - CreateVars); + if (PrepareContext) { + // Note: if you change the inference_program, you need to call + // executor.Prepare() again to get a new ExecutorPrepareContext. + executor.RunPreparedContext(ctx.get(), scope, feed_targets, + fetch_targets, CreateVars); + } else { + executor.Run(*inference_program, scope, feed_targets, fetch_targets, + CreateVars); + } } // Disable the profiler and print the timing information diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 5ff987ad8b3ba3c9195e87e6c11e70ac98fa0a11..7d6781c2c38822eaabb64eda9c76ff657bbdeeb8 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -100,7 +100,7 @@ function(op_library TARGET) endif() # Define operators that don't need pybind here. - foreach(manual_pybind_op "net_op" "compare_op" "logical_op" "nccl_op" "tensor_array_read_write_op") + foreach(manual_pybind_op "compare_op" "logical_op" "nccl_op" "tensor_array_read_write_op") if ("${TARGET}" STREQUAL "${manual_pybind_op}") set(pybind_flag 1) endif() @@ -199,7 +199,6 @@ else() set(DEPS_OPS ${DEPS_OPS} send_op prefetch_op recv_op listen_and_serv_op send_vars_op send_barrier_op) endif() -op_library(cond_op DEPS framework_proto tensor net_op) op_library(cross_entropy_op DEPS cross_entropy) op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) op_library(softmax_op DEPS softmax) @@ -246,9 +245,17 @@ op_library(channel_send_op DEPS concurrency) op_library(channel_recv_op DEPS concurrency) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) + +# The fully connected layer is deleted when the WITH_MKLDNN flag is OFF +# Because the fully connected layer has only one MKLDNN's operator +if(NOT WITH_MKLDNN) + list(REMOVE_ITEM GENERAL_OPS fc_op) +endif(NOT WITH_MKLDNN) + foreach(src ${GENERAL_OPS}) op_library(${src}) endforeach() + file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(logical_and);\nUSE_NO_KERNEL_OP(read_from_array);\n") add_subdirectory(reader) @@ -259,7 +266,6 @@ endforeach() set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") cc_test(gather_test SRCS gather_test.cc DEPS tensor) -cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor) cc_test(beam_search_op_test SRCS beam_search_op_test.cc DEPS lod_tensor beam_search_op) diff --git a/paddle/fluid/operators/average_accumulates_op.cu b/paddle/fluid/operators/average_accumulates_op.cu index 046f72b471fa7ffcc82d84262a668c90a7f577a8..104e24f6ee2e2503d98f3a3991a903d8dbc4bdfe 100644 --- a/paddle/fluid/operators/average_accumulates_op.cu +++ b/paddle/fluid/operators/average_accumulates_op.cu @@ -25,12 +25,14 @@ void GetAccumulators( auto* in_num_accumulates = ctx.Input("in_num_accumulates"); auto* in_num_updates = ctx.Input("in_num_updates"); auto stream = ctx.cuda_device_context().stream(); - memory::Copy(platform::CPUPlace(), old_num_accumulates_, - platform::CUDAPlace(), in_old_num_accumulates->data(), - sizeof(int64_t), stream); - memory::Copy(platform::CPUPlace(), num_accumulates_, platform::CUDAPlace(), + auto cuda_place = + boost::get(in_old_num_accumulates->place()); + memory::Copy(platform::CPUPlace(), old_num_accumulates_, cuda_place, + in_old_num_accumulates->data(), sizeof(int64_t), + stream); + memory::Copy(platform::CPUPlace(), num_accumulates_, cuda_place, in_num_accumulates->data(), sizeof(int64_t), stream); - memory::Copy(platform::CPUPlace(), num_updates_, platform::CUDAPlace(), + memory::Copy(platform::CPUPlace(), num_updates_, cuda_place, in_num_updates->data(), sizeof(int64_t), stream); } @@ -42,14 +44,16 @@ void SetAccumulators( auto* out_old_num_accumulates = ctx.Output("out_old_num_accumulates"); auto* out_num_accumulates = ctx.Output("out_num_accumulates"); auto* out_num_updates = ctx.Output("out_num_updates"); + auto cuda_place = + boost::get(out_old_num_accumulates->place()); - memory::Copy(platform::CUDAPlace(), out_old_num_accumulates->data(), + memory::Copy(cuda_place, out_old_num_accumulates->data(), platform::CPUPlace(), &old_num_accumulates_, sizeof(int64_t), stream); - memory::Copy(platform::CUDAPlace(), out_num_accumulates->data(), + memory::Copy(cuda_place, out_num_accumulates->data(), platform::CPUPlace(), &num_accumulates_, sizeof(int64_t), stream); - memory::Copy(platform::CUDAPlace(), out_num_updates->data(), + memory::Copy(cuda_place, out_num_updates->data(), platform::CPUPlace(), &num_updates_, sizeof(int64_t), stream); } diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 36049ee6a4a0d2a251b6d10cf1ff05a9d9845089..c9939e8602ed341d37784ca292a55326899e8e65 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/batch_norm_op.h" +#include #include "paddle/fluid/framework/data_layout.h" namespace paddle { diff --git a/paddle/fluid/operators/batch_norm_op.cu.cc b/paddle/fluid/operators/batch_norm_op.cu.cc index 6ceacc39924a7558e380aaf563aaf234f1bf30a5..cb1927bc0f2eb735f0a3184df5f0f8fada2f9dca 100644 --- a/paddle/fluid/operators/batch_norm_op.cu.cc +++ b/paddle/fluid/operators/batch_norm_op.cu.cc @@ -13,9 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/batch_norm_op.h" -#include "paddle/fluid/framework/data_layout.h" - #include +#include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" @@ -115,23 +114,11 @@ class BatchNormKernel const auto *bias = ctx.Input("Bias"); auto *y = ctx.Output("Y"); - auto *mean_out = ctx.Output("MeanOut"); - auto *variance_out = ctx.Output("VarianceOut"); - auto *saved_mean = ctx.Output("SavedMean"); - auto *saved_variance = ctx.Output("SavedVariance"); // alloc memory y->mutable_data(ctx.GetPlace()); - mean_out->mutable_data>(ctx.GetPlace()); - variance_out->mutable_data>(ctx.GetPlace()); - saved_mean->mutable_data>(ctx.GetPlace()); - saved_variance->mutable_data>(ctx.GetPlace()); auto &dev_ctx = ctx.template device_context(); - math::SetConstant> - functor; - functor(dev_ctx, saved_mean, static_cast>(0)); - functor(dev_ctx, saved_variance, static_cast>(0)); auto handle = dev_ctx.cudnn_handle(); @@ -160,6 +147,21 @@ class BatchNormKernel // Run training mode. // obtain running mean and running inv var, and see if we need to // initialize them. + + auto *mean_out = ctx.Output("MeanOut"); + auto *variance_out = ctx.Output("VarianceOut"); + mean_out->mutable_data>(ctx.GetPlace()); + variance_out->mutable_data>(ctx.GetPlace()); + + auto *saved_mean = ctx.Output("SavedMean"); + auto *saved_variance = ctx.Output("SavedVariance"); + saved_mean->mutable_data>(ctx.GetPlace()); + saved_variance->mutable_data>(ctx.GetPlace()); + math::SetConstant> + functor; + functor(dev_ctx, saved_mean, static_cast>(0)); + functor(dev_ctx, saved_variance, static_cast>(0)); + double this_factor = 1. - momentum; CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardTraining( diff --git a/paddle/fluid/operators/batch_size_like.h b/paddle/fluid/operators/batch_size_like.h index 0bdf27e620a3a7c7b62b955f708a5e2aad1a6986..dd51a11fbe6ad5e528197b67536518c4b31fa355 100644 --- a/paddle/fluid/operators/batch_size_like.h +++ b/paddle/fluid/operators/batch_size_like.h @@ -13,7 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/box_coder_op.h b/paddle/fluid/operators/box_coder_op.h index 3c7cac1cd17042994287effc31a918ebd4353c4c..77fc6c2b62af42e6526b889aeef2d9bab795baec 100644 --- a/paddle/fluid/operators/box_coder_op.h +++ b/paddle/fluid/operators/box_coder_op.h @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index 9a139ab27ec53395a8d1ab1347dbce93ea68fd8e..3a6a357e81949014a70e5bae1ee0e1c8b9d0c2ce 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/compare_op.h" +#include #include "paddle/fluid/framework/op_registry.h" namespace paddle { diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 0eedd8ee51ebfff6f553d8e19e97c3a45a95fa6a..4a36b03cb63ac3ea61be1bbc56b8dd0adbe7d334 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/concat_op.h" + +#include #include namespace paddle { @@ -33,7 +35,10 @@ class ConcatOp : public framework::OperatorWithKernel { size_t axis = static_cast(ctx->Attrs().Get("axis")); const size_t n = ins.size(); - PADDLE_ENFORCE_GT(n, 1, "Input tensors count should > 1."); + PADDLE_ENFORCE_GT(n, 0, "Input tensors count should > 0."); + if (n == 1) { + VLOG(3) << "Warning: concat op have only one input, may waste memory"; + } auto out_dims = ins[0]; size_t in_zero_dims_size = out_dims.size(); diff --git a/paddle/fluid/operators/cond_op.cc b/paddle/fluid/operators/cond_op.cc deleted file mode 100644 index 15dce9e3e28fa0200e332534f42752838da4db92..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/cond_op.cc +++ /dev/null @@ -1,235 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/cond_op.h" -#include "paddle/fluid/operators/gather.h" -#include "paddle/fluid/operators/scatter.h" -#include "paddle/fluid/platform/device_context.h" - -namespace paddle { -namespace operators { - -using Scope = framework::Scope; -using Variable = framework::Variable; -using Tensor = framework::Tensor; -using LoDTensor = framework::LoDTensor; -using DDim = framework::DDim; - -framework::Scope& CondOp::AddSubScope(const Scope& scope) const { - auto sub_scopes_var = scope.FindVar("SubScopes"); - PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, - "Output(SubScopes) of CondOp should not be null."); - auto sub_scopes = sub_scopes_var->GetMutable>(); - auto& sub_scope = scope.NewScope(); - sub_scopes->push_back(&sub_scope); - return sub_scope; -} - -std::vector& CondOp::GetSubScopes( - const framework::Scope& scope) const { - auto sub_scopes_var = scope.FindVar("SubScopes"); - PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, - "Output(SubScopes) of CondOp should not be null."); - return *sub_scopes_var->GetMutable>(); -} - -LoDTensor& CondOp::AddIndexTensor(const Scope& scope) const { - auto index_tensors_var = scope.FindVar("IndexTensors"); - PADDLE_ENFORCE_NOT_NULL(index_tensors_var, - "Output(IndexTensors) of CondOp should not be null."); - auto& index_tensors = - *index_tensors_var->GetMutable>(); - index_tensors.push_back(LoDTensor()); - return index_tensors.back(); -} - -std::vector& CondOp::GetIndexTensors( - const framework::Scope& scope) const { - auto* index_tensors_var = scope.FindVar("IndexTensors"); - PADDLE_ENFORCE_NOT_NULL(index_tensors_var, - "Output(IndexTensors) of CondOp should not be null."); - return *index_tensors_var->GetMutable>(); -} - -void CondOp::PrepareDataForSubnet( - const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const { - PADDLE_ENFORCE(!Inputs("Xs").empty(), "Inputs(Xs) of CondOp can't be empty."); - - for (int i = 0; i < BRANCH_NUM; ++i) { - // Create two sub scopes for true and false branches - // sub_scopes[0] for the true branch - // sub_scopes[1] for the false branch - AddSubScope(scope); - // Create two tensors for true and false indices: - // index_tensors[0] for the true branch - // index_tensors[1] for the false branch - AddIndexTensor(scope); - } - - Variable* cond_var = scope.FindVar(Input("Cond")); - PADDLE_ENFORCE_NOT_NULL(cond_var, - "Input(Cond) of CondOp should not be null."); - const LoDTensor* cond = cond_var->GetMutable(); - - // get the true/false index at runtime according to cond tensor - // index_vectors[0]: vector, contains all index for cond[i] == true - // index_vectors[1]: vector, contains all index for cond[i] == false - std::vector> index_vectors; - index_vectors.resize(BRANCH_NUM); - - const int* cond_data = cond->data(); - for (int i = 0; i < cond->dims()[0]; ++i) { - if (cond_data[i]) - index_vectors[TRUE_BRANCH].push_back(i); - else - index_vectors[FALSE_BRANCH].push_back(i); - } - - // put index_vectors[0] and index_vectors[1] into two tensors: - // index_tensors[0] and index_tensors[1] - std::vector& index_tensors = GetIndexTensors(scope); - std::vector& sub_scopes = GetSubScopes(scope); - - for (int i = 0; i < BRANCH_NUM; ++i) { - DDim dim = {static_cast(index_vectors[i].size())}; - int* index_tensor_data_ptr = - index_tensors[i].mutable_data(dim, platform::CPUPlace()); - memcpy(index_tensor_data_ptr, index_vectors[i].data(), - dim[0] * sizeof(int)); - } - - // create input in subscopes according to index_vectors - for (auto& input : Inputs("Xs")) { - Variable* var_parent = scope.FindVar(input); - PADDLE_ENFORCE_NOT_NULL(var_parent); - const auto* tensor_parent = &var_parent->Get(); - - for (int i = 0; i < BRANCH_NUM; ++i) { - Variable* var_child = sub_scopes[i]->FindVar(input); - PADDLE_ENFORCE_NOT_NULL(var_child); - auto* tensor_child = var_child->GetMutable(); - - // Resize child - DDim dim = tensor_parent->dims(); - dim[0] = index_tensors[i].dims()[0]; - tensor_child->mutable_data(dim, platform::CPUPlace()); - - CPUGather(dev_ctx, *tensor_parent, index_tensors[i], tensor_child); - } - } - - // create output_tensors in subscope for sub_net - for (int i = 0; i < BRANCH_NUM; ++i) { - for (auto& output : (*sub_net_op_[i]).Outputs()) { - for (auto& var_name : output.second) { - sub_scopes[i]->Var(var_name); - } - } - } -} - -void CondOp::MergeDataFromSubnet(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const { - std::vector& sub_scopes = GetSubScopes(scope); - const std::vector& index_tensors = - GetIndexTensors(scope); - - // Infer the output dim, out_dim[0] = true_dim[0] + false_dim[0] - PADDLE_ENFORCE(!Outputs("Outs").empty(), - "Outputs(Outs) of CondOp can't be empty."); - for (auto& output : Outputs("Outs")) { - const LoDTensor* tensor_t_out = - &sub_scopes[TRUE_BRANCH]->FindVar(output)->Get(); - PADDLE_ENFORCE_NOT_NULL(tensor_t_out, "True output should not be NULL"); - const LoDTensor* tensor_f_out = - &sub_scopes[FALSE_BRANCH]->FindVar(output)->Get(); - PADDLE_ENFORCE_NOT_NULL(tensor_f_out, "False output should not be NULL"); - - auto* var_out = scope.FindVar(output); - PADDLE_ENFORCE_NOT_NULL(var_out, "Output not found"); - LoDTensor* tensor_out = var_out->GetMutable(); - PADDLE_ENFORCE_NOT_NULL(tensor_t_out, - "True output tensor should not be NULL"); - - DDim true_dim = tensor_t_out->dims(); - DDim false_dim = tensor_f_out->dims(); - true_dim[0] = 0; - false_dim[0] = 0; - PADDLE_ENFORCE_EQ(true_dim, false_dim, - "Outputs not of the same shape except the first dim"); - - DDim out_dim = tensor_t_out->dims(); - out_dim[0] = tensor_t_out->dims()[0] + tensor_f_out->dims()[0]; - tensor_out->Resize(out_dim); - tensor_out->mutable_data(platform::CPUPlace()); - } - - // merge output results: - // output_tensor = true_output_tensor + false_output_tensor - for (auto& output : Outputs("Outs")) { - Variable* var_parent = scope.FindVar(output); - PADDLE_ENFORCE_NOT_NULL(var_parent); - auto* tensor_parent = var_parent->GetMutable(); - - for (int i = 0; i < BRANCH_NUM; ++i) { - Variable* var_child = sub_scopes[i]->FindVar(output); - PADDLE_ENFORCE_NOT_NULL(var_child); - auto* tensor_child = &var_child->Get(); - ScatterAssign(dev_ctx, *tensor_child, index_tensors[i], - tensor_parent); - } - } -} - -void CondOp::RunImpl(const Scope& scope, const platform::Place& place) const { - // get device context from pool - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - auto& dev_ctx = *pool.Get(place); - - PrepareDataForSubnet(scope, dev_ctx); - std::vector& sub_scopes = GetSubScopes(scope); - for (int i = 0; i < BRANCH_NUM; ++i) { - sub_net_op_[i]->Run(*sub_scopes[i], place); - } - MergeDataFromSubnet(scope, dev_ctx); -} - -class CondOpProtoAndCheckerMaker : public framework::OpProtoAndCheckerMaker { - public: - CondOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Cond", "The condition, which is a bool vector"); - AddInput("Xs", "Inputs of Subnets").AsDuplicable(); - AddOutput("Outs", "Outputs of Cond_Op after merge").AsDuplicable(); - - AddOutput("SubScopes", "sub scopes for true and false branches"); - AddOutput("IndexTensors", "Index Tensors contains indices for true/false"); - - AddComment(R"DOC( -Sample Dependent Conditional Operator. - -Given Cond[i] as a 1/0 vector to indicate true/false: -Out[i] = subnet_true[i], if Cond[i] == true -Out[i] = subnet_false[i], if Cond[i] == false - -)DOC"); - } -}; - -} // namespace operators -} // namespace paddle - -REGISTER_OP_WITHOUT_GRADIENT(cond, paddle::operators::CondOp, - paddle::operators::CondOpProtoAndCheckerMaker); diff --git a/paddle/fluid/operators/cond_op.h b/paddle/fluid/operators/cond_op.h deleted file mode 100644 index a04fae2182005d4eb08305e943449977bfb637f9..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/cond_op.h +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include -#include "glog/logging.h" -#include "paddle/fluid/framework/ddim.h" -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/operators/net_op.h" - -namespace paddle { -namespace operators { - -/* - * @brief CondOp is a dynamic if-else Operator - * - * It has a input tensor named cond indicating which netop each instance will - * run. - * - * if cond == 1, it will run true_net, which is a NetOp. - * - * if cond == 0, it will run false_net, which is another NetOp. - */ -class CondOp : public framework::OperatorBase { - public: - CondOp(const std::string& type, const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs) - : OperatorBase(type, inputs, outputs, attrs) { - sub_net_op_.resize(BRANCH_NUM); - } - - CondOp(const CondOp& o) - : framework::OperatorBase( - static_cast(o)) { - // TODO(yuyang18): Implement copy ctor well. - PADDLE_THROW("Not implemented"); - } - - framework::Scope& AddSubScope(const framework::Scope& scope) const; - std::vector& GetSubScopes( - const framework::Scope& scope) const; - - framework::LoDTensor& AddIndexTensor(const framework::Scope& scope) const; - std::vector& GetIndexTensors( - const framework::Scope& scope) const; - - void PrepareDataForSubnet(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const; - void MergeDataFromSubnet(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const; - - /* - * Set True Block - */ - void set_truenet(std::unique_ptr&& net) { - sub_net_op_[TRUE_BRANCH] = std::move(net); - } - - /* - * Set False Block - */ - void set_falsenet(std::unique_ptr&& net) { - sub_net_op_[FALSE_BRANCH] = std::move(net); - } - - private: - void RunImpl(const framework::Scope& scope, - const platform::Place& place) const override; - - private: - const int TRUE_BRANCH = 0; - const int FALSE_BRANCH = 1; - const int BRANCH_NUM = 2; - - // sub_net_op_[0]: subnet_t - // sub_net_op_[1]: subnet_f - std::vector> sub_net_op_; -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index b2a3cfc89f18eff24c941c664b1184b4485ab895..08f5939d42a41d235a94eff16cf2f558068d6aaa 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/conv_transpose_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h index d4e4b641ece9ed120904ded6f8baed65a2666213..bfc0177c2a0da1627fbca532764fdae8167b6b2a 100644 --- a/paddle/fluid/operators/conv_transpose_op.h +++ b/paddle/fluid/operators/conv_transpose_op.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/im2col.h" diff --git a/paddle/fluid/operators/crf_decoding_op.h b/paddle/fluid/operators/crf_decoding_op.h index 2b2a733fb9f162755e5c548fec617937d86689dd..3f5fab3b382bea97f43e4bc1b2cd436c956ba264 100644 --- a/paddle/fluid/operators/crf_decoding_op.h +++ b/paddle/fluid/operators/crf_decoding_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/crop_op.h b/paddle/fluid/operators/crop_op.h index c5ac6849789587f2f41588f79bd538f7b79a7478..f05c2e23284e3a24cf48442996f671ec6084c391 100644 --- a/paddle/fluid/operators/crop_op.h +++ b/paddle/fluid/operators/crop_op.h @@ -13,7 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/strided_memcpy.h" diff --git a/paddle/fluid/operators/ctc_align_op.cu b/paddle/fluid/operators/ctc_align_op.cu index 54e0b1d9ad83c5f01f3f0dfbc2a95c642c0aaadc..bbad74e96d9c6c1be24639b63e472f18a599cfab 100644 --- a/paddle/fluid/operators/ctc_align_op.cu +++ b/paddle/fluid/operators/ctc_align_op.cu @@ -15,6 +15,7 @@ limitations under the License. */ #include #include #include +#include #include "paddle/fluid/operators/ctc_align_op.h" namespace paddle { diff --git a/paddle/fluid/operators/ctc_align_op.h b/paddle/fluid/operators/ctc_align_op.h index 70698d99589ae9e2e18ec8b1c1bb3bc8c7476131..9c5c6f5aa03632fe3079074d4b164f871fad634d 100644 --- a/paddle/fluid/operators/ctc_align_op.h +++ b/paddle/fluid/operators/ctc_align_op.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index 8bbfd1f15925992efdeaaffbbe7b350ffbcee889..661dfa69fe1580ff3890f12defcd124225be0c06 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -35,7 +35,8 @@ bool RPCClient::AsyncSendVariable(const std::string& ep, const framework::Scope* p_scope = &scope; const auto ch = GetChannel(ep_val); - framework::Async([var_name_val, p_ctx, ep_val, p_scope, time_out, ch, this] { + framework::AsyncIO([var_name_val, p_ctx, ep_val, p_scope, time_out, ch, + this] { auto* var = p_scope->FindVar(var_name_val); ::grpc::ByteBuffer req; @@ -65,9 +66,8 @@ bool RPCClient::AsyncSendVariable(const std::string& ep, } void ProcGetResponse(const VarHandle& var_h, - // const sendrecv::VariableMessage& ret_msg) { const ::grpc::ByteBuffer& ret_msg) { - framework::Variable* outvar = NULL; + framework::Variable* outvar = nullptr; DeserializeFromByteBuffer(ret_msg, *var_h.ctx, var_h.scope, &outvar); } @@ -90,7 +90,8 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, const framework::Scope* p_scope = &scope; const auto ch = GetChannel(ep_val); - framework::Async([var_name_val, ep_val, p_scope, p_ctx, time_out, ch, this] { + framework::AsyncIO([var_name_val, ep_val, p_scope, p_ctx, time_out, ch, + this] { // prepare input sendrecv::VariableMessage req; req.set_varname(var_name_val); @@ -133,8 +134,8 @@ bool RPCClient::AsyncPrefetchVariable(const std::string& ep, const framework::Scope* p_scope = &scope; const auto ch = GetChannel(ep_val); - framework::Async([in_var_name_val, out_var_name_val, ep_val, p_scope, p_ctx, - time_out, ch, this] { + framework::AsyncIO([in_var_name_val, out_var_name_val, ep_val, p_scope, p_ctx, + time_out, ch, this] { auto* var = p_scope->FindVar(in_var_name_val); ::grpc::ByteBuffer req; @@ -197,7 +198,7 @@ bool RPCClient::Wait() { std::vector> waits(req_count_); for (int i = 0; i < req_count_; i++) { - waits[i] = framework::Async([i, &a, this] { a[i] = Proceed(); }); + waits[i] = framework::AsyncIO([i, &a, this] { a[i] = Proceed(); }); } for (int i = 0; i < req_count_; i++) { diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index d5fc163bc25409e0607b149b61c6266b38119d9d..119e146e078e476b2768a8495ea63e468f952fd2 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -161,6 +161,7 @@ class RequestPrefetch final : public RequestBase { ::grpc::ByteBuffer reply; std::string var_name = request_->OutVarname(); + VLOG(3) << "prefetch var " << var_name; auto var_desc = program_->Block(0).FindVar(var_name); framework::Scope* local_scope = &scope_->NewScope(); auto* var = local_scope->FindVar(var_name); @@ -216,10 +217,10 @@ void AsyncGRPCServer::RunSyncUpdate() { std::function prefetch_register = std::bind(&AsyncGRPCServer::TryToRegisterNewPrefetchOne, this); + // TODO(wuyi): Run these "HandleRequest" in thread pool t_send_.reset( new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, cq_send_.get(), "cq_send", send_register))); - t_get_.reset( new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, cq_get_.get(), "cq_get", get_register))); diff --git a/paddle/fluid/operators/detail/serde_test.cc b/paddle/fluid/operators/detail/serde_test.cc index f8cae6b26acf9d37ca286487065d70ede4c03120..cb5f89583436b059ac4d6509dac9f2e3868561aa 100644 --- a/paddle/fluid/operators/detail/serde_test.cc +++ b/paddle/fluid/operators/detail/serde_test.cc @@ -107,7 +107,7 @@ void RunSerdeTestSelectedRows(platform::Place place) { for (int i = 0; i < tensor_numel; ++i) { EXPECT_FLOAT_EQ(tensor_data2[i], 32.7); } - for (int64_t i = 0; i < rows2->size(); ++i) { + for (size_t i = 0; i < rows2->size(); ++i) { EXPECT_EQ(rows_data2[i], i); } EXPECT_EQ(slr2->height(), 1000); diff --git a/paddle/fluid/operators/elementwise_op.h b/paddle/fluid/operators/elementwise_op.h index f04d8d8fd82ed2336dff9c5b88808dc32de6630a..a33634ab2503f988a8a692682ddb238d4794a3c0 100644 --- a/paddle/fluid/operators/elementwise_op.h +++ b/paddle/fluid/operators/elementwise_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" @@ -106,18 +107,18 @@ information. However, the output only shares the LoD information with input $X$. protected: std::string comment_; - void Replace(std::string& src, std::string from, std::string to) { + void Replace(std::string* src, std::string from, std::string to) { std::size_t len_from = std::strlen(from.c_str()); std::size_t len_to = std::strlen(to.c_str()); - for (std::size_t pos = src.find(from); pos != std::string::npos; - pos = src.find(from, pos + len_to)) { - src.replace(pos, len_from, to); + for (std::size_t pos = src->find(from); pos != std::string::npos; + pos = src->find(from, pos + len_to)) { + src->replace(pos, len_from, to); } } void SetComment(std::string name, std::string equation) { - Replace(comment_, "{name}", name); - Replace(comment_, "{equation}", equation); + Replace(&comment_, "{name}", name); + Replace(&comment_, "{equation}", equation); } }; diff --git a/paddle/fluid/operators/gru_op.cc b/paddle/fluid/operators/gru_op.cc index 2a91dcbcd418fcd61445b7d744789bdeee11d2f2..2490b83b8c50ce4a68095be10d78a380174c1a3f 100644 --- a/paddle/fluid/operators/gru_op.cc +++ b/paddle/fluid/operators/gru_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/gru_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/gru_op.h b/paddle/fluid/operators/gru_op.h index 0886bebc41d8b0f28745e88685f3954f86c823a1..1d5c291495c0f0c0d8da9ff6949888b4cbb6036d 100644 --- a/paddle/fluid/operators/gru_op.h +++ b/paddle/fluid/operators/gru_op.h @@ -13,15 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/gru_compute.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence2batch.h" -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/op_registry.h" - namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/im2sequence_op.cc b/paddle/fluid/operators/im2sequence_op.cc index 048391549dd8df24cc215d04431c306ac4c7e5be..5b387d8d344dfc3475a537827acd9e125fe6693c 100644 --- a/paddle/fluid/operators/im2sequence_op.cc +++ b/paddle/fluid/operators/im2sequence_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/im2sequence_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/im2sequence_op.h b/paddle/fluid/operators/im2sequence_op.h index a6a83fefbc6266fa718dcad78b3a018526f124db..d792c68f784d8ffec0eb303a6ab9b59c9f121fa7 100644 --- a/paddle/fluid/operators/im2sequence_op.h +++ b/paddle/fluid/operators/im2sequence_op.h @@ -13,7 +13,7 @@ limitations under the License. */ #pragma once - +#include #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/label_smooth_op.cc b/paddle/fluid/operators/label_smooth_op.cc index eef25f8a06ddb3311f3cfea21b64d8f7d7e58f24..c2a8c7f867a4483a7fda2f4336a64ab109ce86e8 100644 --- a/paddle/fluid/operators/label_smooth_op.cc +++ b/paddle/fluid/operators/label_smooth_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/label_smooth_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/linear_chain_crf_op.h b/paddle/fluid/operators/linear_chain_crf_op.h index 800a1303e1a427e7bd5e6c04354b8a5fbd816712..d5162bcd742c05980c89394b5d011bd078b61211 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.h +++ b/paddle/fluid/operators/linear_chain_crf_op.h @@ -100,7 +100,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel { auto x_row_max = EigenMatrix::From(emission_row_max); x_row_max.device(place) = x.maximum(Eigen::DSizes(1)) - .reshape(Eigen::DSizes(int(batch_size), 1)); + .reshape(Eigen::DSizes(static_cast(batch_size), 1)); auto x_exps = EigenMatrix::From(*emission_exps); x_exps.device(place) = diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 9188f2d989e601b7a97dedaf71f7080829cdb7c3..5d293665f0bcc098126ad3ec6c9bf34ff54c3b6f 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -13,7 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include // NOLINT +#include #include "paddle/fluid/operators/listen_and_serv_op.h" @@ -88,8 +89,9 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, auto ins = Inputs("X"); auto fan_in = Attr("Fanin"); - auto *block = Attr(kOptimizeBlock); - auto *program = block->Program(); + auto *optimize_block = Attr(kOptimizeBlock); + auto *prefetch_block = Attr(kPrefetchBlock); + auto *program = optimize_block->Program(); size_t num_blocks = program->Size(); PADDLE_ENFORCE_GE(num_blocks, 2, "server program should have at least 2 blocks"); @@ -97,18 +99,25 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, framework::Executor executor(dev_place); std::vector block_list; for (size_t blkid = 1; blkid < num_blocks; ++blkid) { - block_list.push_back(blkid); + if (blkid != prefetch_block->ID()) { + block_list.push_back(blkid); + } } - auto prepared = executor.Prepare(*program, block_list); + auto optimize_prepared = executor.Prepare(*program, block_list); // Insert placeholder for block0 which holds current op itself. - prepared.insert(prepared.begin(), - std::shared_ptr(nullptr)); + optimize_prepared.insert( + optimize_prepared.begin(), + std::shared_ptr(nullptr)); rpc_service_->SetScope(&recv_scope); rpc_service_->SetDevCtx(&dev_ctx); // TODO(qiao) set proper fields for table lookup and update rpc_service_->SetExecutor(&executor); - rpc_service_->SetPrefetchBlkdId(0); + VLOG(3) << "prefetch block id is " << prefetch_block->ID(); + auto prefetch_prepared = executor.Prepare(*program, prefetch_block->ID()); + rpc_service_->SetPrefetchBlkdId(prefetch_block->ID()); + rpc_service_->SetPrefetchPreparedCtx(prefetch_prepared.get()); + prefetch_prepared.release(); rpc_service_->SetProgram(program); // start the server listening after all member initialized. server_thread_.reset(new std::thread(RunServer, rpc_service_)); @@ -166,16 +175,18 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, parallel_blkids.push_back(1); double ts = detail::GetTimestamp(); for (size_t blkid = 2; blkid < num_blocks; ++blkid) { - if (program->Block(blkid).Parent() != last_parent_blkid) { - ParallelExecuteBlocks(parallel_blkids, &executor, prepared, program, - &recv_scope); - parallel_blkids.clear(); - last_parent_blkid = program->Block(blkid).Parent(); + if (blkid != prefetch_block->ID()) { + if (program->Block(blkid).Parent() != last_parent_blkid) { + ParallelExecuteBlocks(parallel_blkids, &executor, optimize_prepared, + program, &recv_scope); + parallel_blkids.clear(); + last_parent_blkid = program->Block(blkid).Parent(); + } + parallel_blkids.push_back(blkid); } - parallel_blkids.push_back(blkid); } - ParallelExecuteBlocks(parallel_blkids, &executor, prepared, program, - &recv_scope); + ParallelExecuteBlocks(parallel_blkids, &executor, optimize_prepared, + program, &recv_scope); VLOG(2) << "run all blocks spent " << detail::GetTimestamp() - ts << "(ms)"; // Reset the received sparse variables, the sum operator would not @@ -211,6 +222,8 @@ from send_op and send back variables to recv_op. .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); AddAttr(kOptimizeBlock, "BlockID to run on server side."); + AddAttr(kPrefetchBlock, + "prefetch block to run on server side."); AddAttr("Fanin", "How many clients send to this server.") .SetDefault(1); } diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index 0da87afc961e896f04b4f0028bf9b17d5e992548..759b2a462ba5b938991aa86be9b9dc3e59fe3f7e 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -16,6 +16,7 @@ limitations under the License. */ #include #include +#include #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/lod_tensor.h" @@ -27,6 +28,7 @@ namespace paddle { namespace operators { constexpr char kOptimizeBlock[] = "OptimizeBlock"; +constexpr char kPrefetchBlock[] = "PrefetchBlock"; void RunServer(std::shared_ptr service); diff --git a/paddle/fluid/operators/logical_op.cc b/paddle/fluid/operators/logical_op.cc index 6a7db31cf36f31064259abeb0348e682be9f917c..41aa00ee8ac10e0776c066fc3c37f97b0dd40cc3 100644 --- a/paddle/fluid/operators/logical_op.cc +++ b/paddle/fluid/operators/logical_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/logical_op.h" +#include #include "paddle/fluid/framework/op_registry.h" namespace paddle { diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index bf33be310686640fa187a07cf46a157b7f433340..5e59bd1b178ad1803f6f70c5f3f9fd7af495ac3c 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -78,6 +78,9 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { "(boolean, default false) " "Sparse update.") .SetDefault(false); + AddAttr("is_distributed", + "(boolean, default false) distributed lookup table.") + .SetDefault(false); AddAttr("padding_idx", "(int64, default -1) " "If the value is -1, it makes no effect to lookup. " diff --git a/paddle/fluid/operators/lrn_op.cc b/paddle/fluid/operators/lrn_op.cc index cb1568398125bbb57da974096da527200c1e0975..553a06c3dcdbb9de43afcace75ebec7c5e819d4a 100644 --- a/paddle/fluid/operators/lrn_op.cc +++ b/paddle/fluid/operators/lrn_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/lrn_op.h" +#include #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" #endif diff --git a/paddle/fluid/operators/lstm_op.cc b/paddle/fluid/operators/lstm_op.cc index d75537741ef1d13b61ad6e244b2bba1ae5509da5..e062d62c66c25e386c7643e310034bc1481ec43d 100644 --- a/paddle/fluid/operators/lstm_op.cc +++ b/paddle/fluid/operators/lstm_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/lstm_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h index 11f9f223b5d9a8091c51c93cee3f9c23b62e5573..a1ef0eb278dea7205cd8052bbe006b0ae4e3a466 100644 --- a/paddle/fluid/operators/lstm_op.h +++ b/paddle/fluid/operators/lstm_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/lstm_compute.h" diff --git a/paddle/fluid/operators/lstm_unit_op.cu b/paddle/fluid/operators/lstm_unit_op.cu index 76245a1b5a9c8ba9c7ee7d7c03a95e2595a01591..acf094238fff92711edf00b4180266138362add1 100644 --- a/paddle/fluid/operators/lstm_unit_op.cu +++ b/paddle/fluid/operators/lstm_unit_op.cu @@ -18,6 +18,7 @@ https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op_gpu.c #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/cross_entropy_op.h" +#include "paddle/fluid/operators/lstm_unit_op.h" #include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/hostdevice.h" diff --git a/paddle/fluid/operators/lstmp_op.cc b/paddle/fluid/operators/lstmp_op.cc index a881ef82ec3cefa826f5f0856cc4fc13c7d7afc0..82541517e122d5da2674b55561ba72af970a2567 100644 --- a/paddle/fluid/operators/lstmp_op.cc +++ b/paddle/fluid/operators/lstmp_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/lstmp_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index dfa7f74d5116b4e3f1508f8bef94c598711e8124..172db548960135fbc1841cf58b73894d4f74d838 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/lstm_compute.h" diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index 82e12943148a806bae719c722944d6a9d5236b7c..9badf26c9bb80acad029be3d1b63377cef63d929 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -39,13 +39,14 @@ void gemm( cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; - float h_alpha = static_cast(alpha); - float h_beta = static_cast(beta); - // TODO(kexinzhao): add processing code for compute capability < 53 case PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53, "cublas fp16 gemm requires GPU compute capability >= 53"); +#if CUDA_VERSION >= 8000 + float h_alpha = static_cast(alpha); + float h_beta = static_cast(beta); + cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT; #if CUDA_VERSION >= 9000 if (context.GetComputeCapability() >= 70) { @@ -56,7 +57,7 @@ void gemm( PADDLE_ENFORCE(platform::dynload::cublasSetMathMode(context.cublas_handle(), CUBLAS_DEFAULT_MATH)); } -#endif +#endif // CUDA_VERSION >= 9000 // cublasHgemm does true FP16 computation which is slow for non-Volta // GPUs. So use cublasGemmEx instead which does pesudo FP16 computation: @@ -66,6 +67,18 @@ void gemm( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &h_beta, C, CUDA_R_16F, N, CUDA_R_32F, algo)); +#else + // CUDA 7.5 does not support cublasGemmEx, hence we fall back to use hgemm + const half h_alpha = static_cast(alpha); + const half h_beta = static_cast(beta); + const half* h_A = reinterpret_cast(A); + const half* h_B = reinterpret_cast(B); + half* h_C = reinterpret_cast(C); + + PADDLE_ENFORCE(platform::dynload::cublasHgemm( + context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb, + h_A, lda, &h_beta, h_C, N)); +#endif // CUDA_VERSION >= 8000 } template <> @@ -255,6 +268,7 @@ void batched_gemm( const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float16 alpha, const float16* A, const float16* B, const float16 beta, float16* C, const int batchCount, const int strideA, const int strideB) { +#if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (transA == CblasNoTrans) ? K : M; @@ -275,9 +289,13 @@ void batched_gemm( // TODO(kexinzhao): add processing code for compute capability < 53 case PADDLE_ENFORCE_GE(context.GetComputeCapability(), 53, "cublas Hgemm requires GPU compute capability >= 53"); + PADDLE_ENFORCE(platform::dynload::cublasHgemmStridedBatched( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &h_alpha, h_B, ldb, strideB, h_A, lda, strideA, &h_beta, h_C, ldc, strideC, batchCount)); +#else + PADDLE_ENFORCE(false, "HgemmStridedBatched is not supported on cuda <= 7.5"); +#endif } template <> @@ -286,6 +304,7 @@ void batched_gemm( const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, const int batchCount, const int strideA, const int strideB) { +#if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (transA == CblasNoTrans) ? K : M; @@ -300,6 +319,9 @@ void batched_gemm( PADDLE_ENFORCE(platform::dynload::cublasSgemmStridedBatched( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, &beta, C, ldc, strideC, batchCount)); +#else + PADDLE_ENFORCE(false, "SgemmStridedBatched is not supported on cuda <= 7.5"); +#endif } template <> @@ -308,6 +330,7 @@ void batched_gemm( const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C, const int batchCount, const int strideA, const int strideB) { +#if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. int lda = (transA == CblasNoTrans) ? K : M; @@ -322,6 +345,9 @@ void batched_gemm( PADDLE_ENFORCE(platform::dynload::cublasDgemmStridedBatched( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, strideB, A, lda, strideA, &beta, C, ldc, strideC, batchCount)); +#else + PADDLE_ENFORCE(false, "DgemmStridedBatched is not supported on cuda <= 7.5"); +#endif } template <> diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index 85855928521b8b4cc5e8746b0b5f841cc2587618..1f5255887391218b766aa23842e443c8b2ad080f 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/matmul_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/matmul_op.h b/paddle/fluid/operators/matmul_op.h index 1cd8fe55dcbd23eae771550a363bf0a07e9bf585..f2e9cfdcdbf93326ae193776a7d5f6a324373603 100644 --- a/paddle/fluid/operators/matmul_op.h +++ b/paddle/fluid/operators/matmul_op.h @@ -13,7 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - +#include +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/matmul.h" diff --git a/paddle/fluid/operators/maxout_op.cc b/paddle/fluid/operators/maxout_op.cc index efaae7d5f2d20484d90f79d9e13ec2f5ed6e06c9..4e28d98834d27351be99106d6760eae46baf8938 100644 --- a/paddle/fluid/operators/maxout_op.cc +++ b/paddle/fluid/operators/maxout_op.cc @@ -13,6 +13,8 @@ * limitations under the License. */ #include "paddle/fluid/operators/maxout_op.h" +#include + namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index 7de9d94979fdc3f3352c556cc8b655ad4bc7e201..a302b24560e680076d62d02b422c6410467deb1d 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -13,7 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/minus_op.h" -#include "paddle/fluid/operators/net_op.h" + +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/momentum_op.cu b/paddle/fluid/operators/momentum_op.cu index da4a6af298f61a20e60ff1b8358f30bb0aca2280..5eb9d9950248bb50bb823f071c7fff0ddcc47234 100644 --- a/paddle/fluid/operators/momentum_op.cu +++ b/paddle/fluid/operators/momentum_op.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/momentum_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index 90af1e2d602ac039b4d98a69a889ff8b1b85ffc6..5038287527c70d376d8c8a1cc8e4cca0b563126a 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/mul_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/net_op.cc b/paddle/fluid/operators/net_op.cc deleted file mode 100644 index 0c2da744177b602246719d701257fc1b509ad81e..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/net_op.cc +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/operators/net_op.h" -#include -#include "paddle/fluid/framework/op_registry.h" - -namespace paddle { -namespace operators { - -const char NetOp::kAll[] = "all"; - -void NetOp::CompleteAddOp(bool calc) { - add_op_done_ = true; - if (!calc) return; - std::set input_set; - std::set output_set; - for (auto& op : ops_) { - for (auto& ipt : op->Inputs()) { - for (auto& var_name : ipt.second) { - // If input variable has been in output set, then it will be - // added into intermediate_outputs_. Otherwise, it will be - // added into input set. - if (Contains(output_set, var_name)) { - intermediate_outputs_.insert(var_name); - } else { - input_set.insert(var_name); - } - } - } - - for (auto& opt : op->Outputs()) { - for (auto& var_name : opt.second) { - output_set.insert(var_name); - } - } - } - auto& inputs = inputs_[kAll]; - inputs.reserve(input_set.size()); - std::copy(input_set.begin(), input_set.end(), std::back_inserter(inputs)); - auto& outputs = outputs_[kAll]; - outputs.reserve(output_set.size()); - std::copy(output_set.begin(), output_set.end(), std::back_inserter(outputs)); -} - -std::string NetOp::DebugStringEx(const framework::Scope* scope) const { - std::ostringstream os; - os << OperatorBase::DebugStringEx(scope) << std::endl; - for (auto& op : ops_) { - std::istringstream is(op->DebugStringEx(scope)); - for (std::string line; std::getline(is, line);) { - os << " " << line << std::endl; - } - } - return os.str(); -} - -bool NetOp::IsNetOp() const { return true; } - -std::vector NetOp::OutputVars(bool has_intermediate) const { - std::vector all; - for (auto& pair : this->outputs_) { - for (auto& var_name : pair.second) { - all.push_back(var_name); - } - } - if (has_intermediate) { - return all; - } - std::vector ret_val; - for (auto& each : all) { - if (!Contains(intermediate_outputs_, each)) { - ret_val.push_back(each); - } - } - return ret_val; -} - -NetOp::NetOp(const std::string& type, const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs) - : framework::OperatorBase(type, inputs, outputs, attrs) {} - -std::unique_ptr NetOp::Clone() const { - PADDLE_ENFORCE( - add_op_done_, - "Must clone a sealed NetOp, invoke Net::CompleteAddOp before clone"); - return std::unique_ptr(new NetOp(*this)); -} - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/net_op.h b/paddle/fluid/operators/net_op.h deleted file mode 100644 index cbf8820cf4991bc24893f13646364dea0955a128..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/net_op.h +++ /dev/null @@ -1,130 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include "paddle/fluid/framework/framework.pb.h" -#include "paddle/fluid/framework/op_registry.h" - -namespace paddle { -namespace operators { - -/** - * @brief Network is also a type of Operator - * - * It will manage the operators it has. - * - * Network is the container and controller of a set of operators. - - * A network object knows all Operators belonging to this network. Variables, - * which are inputs and outputs of these operators, are created and managed by a - * hierarchy of Scope objects. - * - * This is the base class of network, all the networks should implement the APIs - * it defines. - */ -class NetOp : public framework::OperatorBase { - public: - static const char kAll[]; - NetOp() - : framework::OperatorBase("plain_net", framework::VariableNameMap{}, - framework::VariableNameMap{}, - framework::AttributeMap{}) {} - - NetOp(const std::string& type, const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs); - - NetOp(const NetOp& o) : framework::OperatorBase(o.type_, {}, {}, o.attrs_) { - this->ops_.reserve(o.ops_.size()); - std::transform( - o.ops_.begin(), o.ops_.end(), std::back_inserter(this->ops_), - [](const std::unique_ptr& op) { - return std::unique_ptr(op->Clone()); - }); - this->CompleteAddOp(); - } - - bool SupportGPU() const override { - for (auto& op : ops_) { - if (!op->SupportGPU()) { - return false; - } - } - return true; - } - - void AppendOp(const framework::OperatorBase& op) { AppendOp(op.Clone()); } - - /** - * @brief Add an operator by ptr - */ - void AppendOp(std::unique_ptr op) { - PADDLE_ENFORCE(!add_op_done_, - "Cannot AppendOp when this network is sealed"); - PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); - ops_.push_back(std::move(op)); - } - - void InsertOp(size_t pos, std::unique_ptr op) { - PADDLE_ENFORCE(!add_op_done_, - "Cannot InsertOp when this network is sealed"); - PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); - PADDLE_ENFORCE_LE(pos, ops_.size(), "Out of range"); - ops_.insert(ops_.begin() + pos, std::move(op)); - } - - void InsertOp(size_t pos, const framework::OperatorBase& op) { - InsertOp(pos, op.Clone()); - } - - void CompleteAddOp(bool calculate = true); - - std::string DebugStringEx( - const framework::Scope* scope = nullptr) const override; - - bool IsNetOp() const override; - std::vector OutputVars(bool has_intermediate) const override; - - std::unique_ptr Clone() const override; - - std::vector> ops_; - - private: - /** - * @brief Run the network. - * - * Run all the operators with the `scope`, if no scope is provided, default - * scope will be used instead. If no OpContext is provicded, default context - * will be used. - */ - void RunImpl(const framework::Scope& scope, - const platform::Place& place) const override { - for (auto& op : ops_) { - op->Run(scope, place); - } - } - - bool add_op_done_{false}; - std::set intermediate_outputs_; - - template - static bool Contains(T container, KeyType key) { - return container.find(key) != container.end(); - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/net_op_test.cc b/paddle/fluid/operators/net_op_test.cc deleted file mode 100644 index 3b5f57548585398c441fd8038ba8b053c27392cf..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/net_op_test.cc +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#include "paddle/fluid/operators/net_op.h" - -#include - -namespace paddle { -namespace operators { -using Scope = framework::Scope; -using DeviceContext = platform::DeviceContext; - -static int run_cnt = 0; - -class TestOp : public framework::OperatorBase { - public: - using framework::OperatorBase::OperatorBase; - DEFINE_OP_CLONE_METHOD(TestOp); - - private: - void RunImpl(const Scope& scope, - const platform::Place& place) const override { - ++run_cnt; - } -}; - -template -void AssertSameVectorWithoutOrder(const std::vector& expected, - const std::vector& actual) { - ASSERT_EQ(expected.size(), actual.size()); - std::unordered_set expected_set; - for (auto& tmp : expected) { - expected_set.insert(tmp); - } - for (auto& act : actual) { - ASSERT_NE(expected_set.end(), expected_set.find(act)); - } -} - -TEST(OpKernel, all) { - auto net = std::make_shared(); - ASSERT_NE(net, nullptr); - - net->AppendOp(std::unique_ptr( - new TestOp("test", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"Out", {"y"}}}, framework::AttributeMap{}))); - net->AppendOp(std::unique_ptr( - new TestOp("test", {{"X", {"y"}}, {"W", {"w2"}}, {"b", {"b2"}}}, - {{"Out", {"z"}}}, framework::AttributeMap{}))); - - net->CompleteAddOp(); - AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, - net->Inputs(NetOp::kAll)); - AssertSameVectorWithoutOrder({"y", "z"}, net->Outputs(NetOp::kAll)); - - auto final_outs = net->OutputVars(false); - - ASSERT_EQ(final_outs.size(), 1UL); - ASSERT_EQ(final_outs[0], "z"); -} - -TEST(NetOp, insert_op) { - NetOp net; - auto op1 = std::unique_ptr( - new framework::NOP("empty", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"Out", {"y"}}}, framework::AttributeMap{})); - net.AppendOp(*op1); - net.InsertOp(0, *op1); - ASSERT_EQ(2UL, net.ops_.size()); - net.InsertOp(2, std::move(op1)); - ASSERT_EQ(3UL, net.ops_.size()); -} - -TEST(NetOp, Clone) { - NetOp net; - net.AppendOp(std::unique_ptr(new framework::NOP{ - "empty", framework::VariableNameMap{}, framework::VariableNameMap{}, - framework::AttributeMap{}})); - net.AppendOp(std::unique_ptr(new framework::NOP{ - "empty2", framework::VariableNameMap{}, framework::VariableNameMap{}, - framework::AttributeMap{}})); - net.CompleteAddOp(true); - auto new_net_op = net.Clone(); - ASSERT_NE(new_net_op, nullptr); - ASSERT_TRUE(new_net_op->IsNetOp()); - auto* new_net = static_cast(new_net_op.get()); - ASSERT_EQ(2UL, new_net->ops_.size()); - ASSERT_EQ(new_net->ops_[0]->Type(), "empty"); - ASSERT_EQ(new_net->ops_[1]->Type(), "empty2"); -} - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/pad_op.h b/paddle/fluid/operators/pad_op.h index a36abe3789574cb64f05001e34d534cf352a60b2..c93c096575a30dd9344894ead4b81acc16930e21 100644 --- a/paddle/fluid/operators/pad_op.h +++ b/paddle/fluid/operators/pad_op.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/pool_mkldnn_op.cc b/paddle/fluid/operators/pool_mkldnn_op.cc index c88578570c1acdecaa97dd8b12a702778fef2b7e..63eaaedcd5fc3df17902511dc02b25bf43ccd241 100644 --- a/paddle/fluid/operators/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/pool_mkldnn_op.cc @@ -83,9 +83,11 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { dev_ctx.SetBlob(key_pool_workspace_memory, workspace_memory); auto src_memory = - mkldnn::memory({src_md, mkldnn_engine}, (void*)input_data); + mkldnn::memory({src_md, mkldnn_engine}, + static_cast(const_cast(input_data))); auto dst_memory = - mkldnn::memory({dst_md, mkldnn_engine}, (void*)output_data); + mkldnn::memory({dst_md, mkldnn_engine}, + static_cast(const_cast(output_data))); auto pool_prim = mkldnn::pooling_forward(*pool_pd, src_memory, dst_memory, *workspace_memory); @@ -195,9 +197,11 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { pool_bwd_desc, mkldnn_engine, *pool_pd); auto diff_src_memory = - mkldnn::memory({diff_src_md, mkldnn_engine}, (void*)in_x_grad_data); + mkldnn::memory({diff_src_md, mkldnn_engine}, + static_cast(const_cast(in_x_grad_data))); auto diff_dst_memory = - mkldnn::memory({diff_dst_md, mkldnn_engine}, (void*)out_grad_data); + mkldnn::memory({diff_dst_md, mkldnn_engine}, + static_cast(const_cast(out_grad_data))); auto bwd_prim = mkldnn::pooling_backward( pool_bwd_pd, diff_dst_memory, *workspace_memory, diff_src_memory); diff --git a/paddle/fluid/operators/pool_op.h b/paddle/fluid/operators/pool_op.h index 2fec50ef25e0d2621a87963acdf142d24970329d..a48127ea6983d3d4ea12ec4925f30af233002ef2 100644 --- a/paddle/fluid/operators/pool_op.h +++ b/paddle/fluid/operators/pool_op.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/pool_with_index_op.h b/paddle/fluid/operators/pool_with_index_op.h index 83e7bd138ae25c6d3e09c3d01178d6887205bf98..b55fa76eae34c3179d40f31ed6a57d3ecbbaaccf 100644 --- a/paddle/fluid/operators/pool_with_index_op.h +++ b/paddle/fluid/operators/pool_with_index_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/prefetch_op.cc b/paddle/fluid/operators/prefetch_op.cc index 09ab7da663b5ef5f099b9f65b0df661ceea0d9e2..f9ae01ab5d2972d2a74b36ae6035985d1d874bb6 100644 --- a/paddle/fluid/operators/prefetch_op.cc +++ b/paddle/fluid/operators/prefetch_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include // NOLINT #include #include "paddle/fluid/framework/data_type.h" @@ -50,8 +50,8 @@ class PrefetchOp : public framework::OperatorBase { for (size_t i = 0; i < ins.size(); i++) { if (NeedSend(scope, ins[i])) { - VLOG(3) << "sending " << ins[i] << " to " << epmap[i] << "to get " - << outs[i] << "back"; + VLOG(3) << "sending " << ins[i] << " to " << epmap[i] << " to get " + << outs[i] << " back"; rpc_client->AsyncPrefetchVariable(epmap[i], ctx, scope, ins[i], outs[i]); } else { @@ -71,7 +71,7 @@ class PrefetchOpMaker : public framework::OpProtoAndCheckerMaker { "(RPCClient) The RPC client object which will be" "initialized at most once."); AddOutput("Out", - "(SelectedRows) result " + "(LoDTensor) result " "to be fetched from parameter server") .AsDuplicable(); AddAttr>( diff --git a/paddle/fluid/operators/prelu_op.cc b/paddle/fluid/operators/prelu_op.cc index 447b854544b72043ea09c09c134af3a48c305561..8eaa12a4a6cfc09fd4e2c3642bc8825fe2af6d6b 100644 --- a/paddle/fluid/operators/prelu_op.cc +++ b/paddle/fluid/operators/prelu_op.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/prelu_op.h" -#include "paddle/fluid/operators/net_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/prior_box_op.cc b/paddle/fluid/operators/prior_box_op.cc index 82e54139c8c1f42b1d8f74811a6793ec5c66473e..058b13eeb872aaa77a88da37db64a6d59fbdd1cf 100644 --- a/paddle/fluid/operators/prior_box_op.cc +++ b/paddle/fluid/operators/prior_box_op.cc @@ -45,7 +45,7 @@ class PriorBoxOp : public framework::OperatorWithKernel { bool flip = ctx->Attrs().Get("flip"); std::vector aspect_ratios_vec; - ExpandAspectRatios(aspect_ratios, flip, aspect_ratios_vec); + ExpandAspectRatios(aspect_ratios, flip, &aspect_ratios_vec); size_t num_priors = aspect_ratios_vec.size() * min_sizes.size(); if (max_sizes.size() > 0) { diff --git a/paddle/fluid/operators/prior_box_op.cu b/paddle/fluid/operators/prior_box_op.cu index 76bf2b3b7de7a24c80e927c16199f89c5b7fb794..0ea8909296f8f52d252b0ec258666cf32d69a8bb 100644 --- a/paddle/fluid/operators/prior_box_op.cu +++ b/paddle/fluid/operators/prior_box_op.cu @@ -96,7 +96,7 @@ class PriorBoxOpCUDAKernel : public framework::OpKernel { auto clip = ctx.Attr("clip"); std::vector aspect_ratios; - ExpandAspectRatios(input_aspect_ratio, flip, aspect_ratios); + ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios); T step_w = static_cast(ctx.Attr("step_w")); T step_h = static_cast(ctx.Attr("step_h")); diff --git a/paddle/fluid/operators/prior_box_op.h b/paddle/fluid/operators/prior_box_op.h index 1e4a12aac1c5f1c3b7e2e1bc83170de9ad590fc3..1c62fd8d2c4d4e4deba4ca6442efbaff83e36c35 100644 --- a/paddle/fluid/operators/prior_box_op.h +++ b/paddle/fluid/operators/prior_box_op.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/transform.h" @@ -22,23 +24,23 @@ namespace operators { inline void ExpandAspectRatios(const std::vector& input_aspect_ratior, bool flip, - std::vector& output_aspect_ratior) { + std::vector* output_aspect_ratior) { constexpr float epsilon = 1e-6; - output_aspect_ratior.clear(); - output_aspect_ratior.push_back(1.0f); + output_aspect_ratior->clear(); + output_aspect_ratior->push_back(1.0f); for (size_t i = 0; i < input_aspect_ratior.size(); ++i) { float ar = input_aspect_ratior[i]; bool already_exist = false; - for (size_t j = 0; j < output_aspect_ratior.size(); ++j) { - if (fabs(ar - output_aspect_ratior[j]) < epsilon) { + for (size_t j = 0; j < output_aspect_ratior->size(); ++j) { + if (fabs(ar - output_aspect_ratior->at(j)) < epsilon) { already_exist = true; break; } } if (!already_exist) { - output_aspect_ratior.push_back(ar); + output_aspect_ratior->push_back(ar); if (flip) { - output_aspect_ratior.push_back(1.0f / ar); + output_aspect_ratior->push_back(1.0f / ar); } } } @@ -68,7 +70,7 @@ class PriorBoxOpKernel : public framework::OpKernel { auto clip = ctx.Attr("clip"); std::vector aspect_ratios; - ExpandAspectRatios(input_aspect_ratio, flip, aspect_ratios); + ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios); T step_w = static_cast(ctx.Attr("step_w")); T step_h = static_cast(ctx.Attr("step_h")); diff --git a/paddle/fluid/operators/rank_loss_op.cc b/paddle/fluid/operators/rank_loss_op.cc index 767eef56861ea075ec2450b1456e7c5c807ce25d..a1127f11a75e54168ca9682a0189255d37ee8571 100644 --- a/paddle/fluid/operators/rank_loss_op.cc +++ b/paddle/fluid/operators/rank_loss_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/rank_loss_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index 2925b8a85da1b0d19672124e49c8fd22c8b4e6bf..bf02b9958927580608b95d6b8ecfddc7231a02d4 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -66,13 +66,7 @@ class ReadOp : public framework::OperatorBase { std::vector out_arg_names = Outputs("Out"); std::vector ins; reader->ReadNext(&ins); - if (ins.empty()) { - reader->ReInit(); - reader->ReadNext(&ins); - PADDLE_ENFORCE( - !ins.empty(), - "Reader can not read the next data even it has been re-initialized."); - } + PADDLE_ENFORCE(!ins.empty(), "There is no next data."); PADDLE_ENFORCE_EQ(ins.size(), out_arg_names.size()); for (size_t i = 0; i < ins.size(); ++i) { auto* out = diff --git a/paddle/fluid/operators/reader/CMakeLists.txt b/paddle/fluid/operators/reader/CMakeLists.txt index 6fa0195b9ae103418beb56cc4b0fa9ab59e93108..845528860f91d0b479bb3c4dbbe05e32c68dc16f 100644 --- a/paddle/fluid/operators/reader/CMakeLists.txt +++ b/paddle/fluid/operators/reader/CMakeLists.txt @@ -22,5 +22,6 @@ reader_library(create_batch_reader_op SRCS create_batch_reader_op.cc) reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_op.cc) reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc) reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc) +reader_library(create_threaded_reader_op SRCS create_threaded_reader_op.cc) # Export local libraries to parent set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE) diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index ed868786ab2a80efa42574ed4f579c633ce0becf..0b7c1d6af714558d35918dac62d92d9e0f86c970 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -33,28 +33,14 @@ static constexpr size_t kChannelSize = 0; // kCacheSize - 2 class DoubleBufferReader : public framework::DecoratedReader { public: - struct Item { - Item() : ctx_(nullptr) {} - Item(Item&& b) { - payloads_ = std::move(b.payloads_); - ctx_ = std::move(b.ctx_); - } - Item& operator=(Item&& b) { - payloads_ = std::move(b.payloads_); - ctx_ = std::move(b.ctx_); - return *this; - } - - std::vector payloads_; - platform::DeviceContext* ctx_; - }; - explicit DoubleBufferReader( ReaderBase* reader, platform::Place target_place = platform::CPUPlace()) : DecoratedReader(reader), place_(target_place) { + cpu_tensor_cache_.resize(kCacheSize); + gpu_tensor_cache_.resize(kCacheSize); #ifdef PADDLE_WITH_CUDA - for (size_t i = 0; i < kCacheSize; ++i) { - if (platform::is_gpu_place(place_)) { + if (platform::is_gpu_place(place_)) { + for (size_t i = 0; i < kCacheSize; ++i) { ctxs_.emplace_back(new platform::CUDADeviceContext( boost::get(place_))); } @@ -63,15 +49,16 @@ class DoubleBufferReader : public framework::DecoratedReader { StartPrefetcher(); } - bool HasNext() const override; void ReadNext(std::vector* out) override; void ReInit() override; ~DoubleBufferReader() { EndPrefetcher(); } private: + bool HasNext() const; + void StartPrefetcher() { - channel_ = framework::MakeChannel(kChannelSize); + channel_ = framework::MakeChannel(kChannelSize); prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); } @@ -87,8 +74,10 @@ class DoubleBufferReader : public framework::DecoratedReader { void PrefetchThreadFunc(); std::thread prefetcher_; - framework::Channel* channel_; + framework::Channel* channel_; platform::Place place_; + std::vector> cpu_tensor_cache_; + std::vector> gpu_tensor_cache_; std::vector> ctxs_; }; @@ -109,7 +98,9 @@ class CreateDoubleBufferReaderOp : public framework::OperatorBase { auto place_str = Attr("place"); platform::Place place; - if (place_str == "CPU") { + if (place_str == "AUTO") { + place = dev_place; + } else if (place_str == "CPU") { place = platform::CPUPlace(); } else { std::istringstream sin(place_str); @@ -140,28 +131,25 @@ class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { enum_range.insert(string::Sprintf("CUDA:%d", i)); } enum_range.insert("CPU"); - AddAttr("place", "The double buffer place, default is CPU") - .SetDefault("CPU") + enum_range.insert("AUTO"); + AddAttr("place", "The double buffer place") + .SetDefault("AUTO") .InEnum({enum_range}); } }; -bool DoubleBufferReader::HasNext() const { - while (!channel_->IsClosed() && !channel_->CanReceive()) { - } - return channel_->CanReceive(); -} - void DoubleBufferReader::ReadNext(std::vector* out) { - if (!HasNext()) { - PADDLE_THROW("There is no next data!"); - } - - Item batch; - channel_->Receive(&batch); - *out = batch.payloads_; - if (batch.ctx_) { - batch.ctx_->Wait(); + out->clear(); + if (HasNext()) { + size_t cached_tensor_id; + channel_->Receive(&cached_tensor_id); + if (platform::is_gpu_place(place_)) { + *out = gpu_tensor_cache_[cached_tensor_id]; + ctxs_[cached_tensor_id]->Wait(); + } else { + // CPU place + *out = cpu_tensor_cache_[cached_tensor_id]; + } } } @@ -171,40 +159,41 @@ void DoubleBufferReader::ReInit() { StartPrefetcher(); } +bool DoubleBufferReader::HasNext() const { + while (!channel_->IsClosed() && !channel_->CanReceive()) { + } + return channel_->CanReceive(); +} + void DoubleBufferReader::PrefetchThreadFunc() { VLOG(5) << "A new prefetch thread starts."; - std::vector> cpu_tensor_cache(kCacheSize); - std::vector> gpu_tensor_cache(kCacheSize); size_t cached_tensor_id = 0; - - while (reader_->HasNext()) { - Item batch; - auto& cpu_batch = cpu_tensor_cache[cached_tensor_id]; + while (true) { + auto& cpu_batch = cpu_tensor_cache_[cached_tensor_id]; reader_->ReadNext(&cpu_batch); + if (cpu_batch.empty()) { + // The underlying reader have no next data. + break; + } if (platform::is_gpu_place(place_)) { - auto& gpu_batch = gpu_tensor_cache[cached_tensor_id]; + auto& gpu_batch = gpu_tensor_cache_[cached_tensor_id]; auto* gpu_ctx = ctxs_[cached_tensor_id].get(); gpu_batch.resize(cpu_batch.size()); for (size_t i = 0; i < cpu_batch.size(); ++i) { framework::TensorCopy(cpu_batch[i], place_, *gpu_ctx, &gpu_batch[i]); gpu_batch[i].set_lod(cpu_batch[i].lod()); } - batch.payloads_ = gpu_batch; - batch.ctx_ = gpu_ctx; - } else { - // CPUPlace - batch.payloads_ = cpu_batch; } - ++cached_tensor_id; - cached_tensor_id %= kCacheSize; - try { - channel_->Send(&batch); + size_t tmp = cached_tensor_id; + channel_->Send(&tmp); } catch (paddle::platform::EnforceNotMet e) { VLOG(5) << "WARNING: The double buffer channel has been closed. The " "prefetch thread will terminate."; break; } + ++cached_tensor_id; + cached_tensor_id %= kCacheSize; } channel_->Close(); VLOG(5) << "Prefetch thread terminates."; diff --git a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc index b72ccc77a3e1ec30fd817471d3ffd667974ae684..0573345ba502b6a9af35710840d5acf7634f332f 100644 --- a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc +++ b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc @@ -25,22 +25,12 @@ class MultiPassReader : public framework::DecoratedReader { : DecoratedReader(reader), pass_num_(pass_num), pass_count_(0) {} void ReadNext(std::vector* out) override { - if (!HasNext()) { - PADDLE_THROW("There is no next data!"); - } reader_->ReadNext(out); - } - - bool HasNext() const override { - if (reader_->HasNext()) { - return true; - } else { + if (out->empty()) { ++pass_count_; - if (pass_count_ >= pass_num_) { - return false; - } else { + if (pass_count_ < pass_num_) { reader_->ReInit(); - return true; + reader_->ReadNext(out); } } } diff --git a/paddle/fluid/operators/reader/create_random_data_generator_op.cc b/paddle/fluid/operators/reader/create_random_data_generator_op.cc index 95d8674c08b63e872926ff8708d0c734da33684c..d1cb8e47da70cab784858caea7e791151fc104dd 100644 --- a/paddle/fluid/operators/reader/create_random_data_generator_op.cc +++ b/paddle/fluid/operators/reader/create_random_data_generator_op.cc @@ -52,8 +52,6 @@ class RandomDataGenerator : public framework::ReaderBase { void ReInit() override { return; } - bool HasNext() const override { return true; } - private: float min_; float max_; @@ -74,7 +72,7 @@ class CreateRandomDataGeneratorOp : public framework::OperatorBase { const auto& ranks = Attr>("ranks"); PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), - int(shape_concat.size()), + static_cast(shape_concat.size()), "The accumulate of all ranks should be equal to the " "shape concat's length."); std::vector shapes = RestoreShapes(shape_concat, ranks); diff --git a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc index adaa0b9e5f1ffcfbf3e9cd8fd060153575f270a6..2ae29725561769ebe6428002c9983246b8eec724 100644 --- a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc +++ b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include #include "paddle/fluid/operators/reader/reader_op_registry.h" #include "paddle/fluid/recordio/scanner.h" @@ -35,17 +33,15 @@ class RecordIOFileReader : public framework::FileReader { LOG(INFO) << "Creating file reader" << filename; } - bool HasNext() const override { return scanner_.HasNext(); } - void ReInit() override { scanner_.Reset(); } protected: void ReadNextImpl(std::vector* out) override { if (ThreadSafe) { std::lock_guard guard(*mutex_); - *out = framework::ReadFromRecordIO(scanner_, dev_ctx_); + *out = framework::ReadFromRecordIO(&scanner_, dev_ctx_); } else { - *out = framework::ReadFromRecordIO(scanner_, dev_ctx_); + *out = framework::ReadFromRecordIO(&scanner_, dev_ctx_); } } @@ -66,7 +62,7 @@ class CreateRecordIOReaderOp : public framework::OperatorBase { const auto& ranks = Attr>("ranks"); PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), - int(shape_concat.size()), + static_cast(shape_concat.size()), "The accumulate of all ranks should be equal to the " "shape concat's length."); std::string filename = Attr("filename"); diff --git a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc index b164ce232d6bea7b4ff0c67ee0a7dd83b14f61a2..13825d65913be95f4f444bd9d5271a036ec8b1e2 100644 --- a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc +++ b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc @@ -30,35 +30,33 @@ class ShuffleReader : public framework::DecoratedReader { std::random_device device; seed_ = device(); } - ReadIntoBuffers(); + ReloadBuffer(); } void ReadNext(std::vector* out) override { - if (!HasNext()) { - PADDLE_THROW("There is no next data!"); - } + out->clear(); if (iteration_pos_ >= buffer_.size()) { VLOG(10) << "Resetting shuffle buffer"; - ReadIntoBuffers(); + ReloadBuffer(); + if (buffer_.empty()) { + return; + } } *out = buffer_[iteration_pos_++]; } - bool HasNext() const override { - return iteration_pos_ < buffer_.size() || reader_->HasNext(); - } - private: - void ReadIntoBuffers() { + void ReloadBuffer() { buffer_.clear(); buffer_.reserve(buffer_size_); iteration_pos_ = 0; for (size_t i = 0; i < buffer_size_; ++i) { - if (!reader_->HasNext()) { + std::vector ins; + reader_->ReadNext(&ins); + if (ins.empty()) { break; } - buffer_.emplace_back(); - reader_->ReadNext(&buffer_.back()); + buffer_.emplace_back(ins); } std::mt19937 g(seed_); std::shuffle(buffer_.begin(), buffer_.end(), g); diff --git a/paddle/fluid/operators/reader/create_threaded_reader_op.cc b/paddle/fluid/operators/reader/create_threaded_reader_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..cbf709d5e734c0f2adf3735dc28043c1340349da --- /dev/null +++ b/paddle/fluid/operators/reader/create_threaded_reader_op.cc @@ -0,0 +1,94 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/reader/reader_op_registry.h" + +namespace paddle { +namespace operators { +namespace reader { + +class ThreadedReader : public framework::DecoratedReader { + public: + ThreadedReader(ReaderBase* reader, bool safe_mode) + : DecoratedReader(reader), safe_mode_(safe_mode) {} + + void ReadNext(std::vector* out) override { + std::lock_guard lock(mutex_); + reader_->ReadNext(out); + } + + void ReInit() override { + if (safe_mode_) { + PADDLE_THROW( + "ThreadedReader::ReInit() is disabled when 'safe_mode' is true."); + } + VLOG(5) << "ThreadedReader::ReInit() is invoked! It might be buggy in " + "multi-thread environment."; + reader_->ReInit(); + } + + private: + bool safe_mode_; + std::mutex mutex_; +}; + +class CreateThreadedReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { + auto* out = detail::Ref(scope.FindVar(Output("Out"))) + .GetMutable(); + if (out->Get() != nullptr) { + return; + } + const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) + ->Get(); + bool safe_mode = Attr("safe_mode"); + out->Reset(new ThreadedReader(underlying_reader.Get(), safe_mode)); + } +}; + +class CreateThreadedReaderOpMaker : public DecoratedReaderMakerBase { + public: + CreateThreadedReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) + : DecoratedReaderMakerBase(op_proto, op_checker) { + AddAttr("safe_mode", + "When 'safe_mode' is true, 'ReInit()' is disabled to avoid " + "unexpected bugs in multi-thread environment.") + .SetDefault(true); + AddComment(R"DOC( + CreateThreadedReader Operator + + This operator creates a threaded reader. A threaded reader's + 'ReadNext()' can be invoked by several threads at the same + time. + When the attribute 'safe_mode' is true, the threaded reader's + 'ReInit()' is disabled to avoid unexpected bugs in multi-thread + environment. + )DOC"); + } +}; + +} // namespace reader +} // namespace operators +} // namespace paddle + +namespace reader = paddle::operators::reader; +REGISTER_DECORATED_READER_OPERATOR(create_threaded_reader, + reader::CreateThreadedReaderOp, + reader::CreateThreadedReaderOpMaker); diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index eacedeea8835d27b712b287824b9d30b03ebebbf..779dc8a6a0deb7792e0540071e3a2588102fa708 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include // NOLINT + #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" @@ -19,38 +21,23 @@ namespace paddle { namespace operators { namespace reader { -class MultipleReader : public framework::ReaderBase { +class MultiFileReader : public framework::ReaderBase { public: - class ThreadBufferMap { - public: - std::vector& operator[]( - const std::thread::id& thread_id) { - std::lock_guard lock(mutex_); - return buffer_[thread_id]; - } - - void Clear() { buffer_.clear(); } - - private: - std::mutex mutex_; - std::unordered_map> - buffer_; - }; - - MultipleReader(const std::vector& file_names, - const std::vector& dims, size_t thread_num) - : file_names_(file_names), dims_(dims) { + MultiFileReader(const std::vector& file_names, + const std::vector& dims, size_t thread_num, + size_t buffer_size) + : file_names_(file_names), dims_(dims), buffer_size_(buffer_size) { prefetchers_.resize(thread_num); StartNewScheduler(); } void ReadNext(std::vector* out) override; - bool HasNext() const override; void ReInit() override; - ~MultipleReader() { EndScheduler(); } + ~MultiFileReader() { EndScheduler(); } private: + bool HasNext(); void StartNewScheduler(); void EndScheduler(); void ScheduleThreadFunc(); @@ -60,39 +47,36 @@ class MultipleReader : public framework::ReaderBase { std::vector dims_; std::thread scheduler_; std::vector prefetchers_; + size_t buffer_size_; framework::Channel* waiting_file_idx_; framework::Channel* available_thread_idx_; framework::Channel>* buffer_; - mutable ThreadBufferMap thread_buffer_map_; }; -void MultipleReader::ReadNext(std::vector* out) { - if (!HasNext()) { - PADDLE_THROW("There is no next data!"); +void MultiFileReader::ReadNext(std::vector* out) { + out->clear(); + if (HasNext()) { + buffer_->Receive(out); } - auto& thread_local_buffer = thread_buffer_map_[std::this_thread::get_id()]; - *out = thread_local_buffer; - thread_local_buffer.clear(); -} - -bool MultipleReader::HasNext() const { - auto& thread_local_buffer = thread_buffer_map_[std::this_thread::get_id()]; - return thread_local_buffer.empty() ? buffer_->Receive(&thread_local_buffer) - : true; } -void MultipleReader::ReInit() { +void MultiFileReader::ReInit() { EndScheduler(); - thread_buffer_map_.Clear(); StartNewScheduler(); } -void MultipleReader::StartNewScheduler() { +bool MultiFileReader::HasNext() { + while (!buffer_->IsClosed() && !buffer_->CanReceive()) { + } + return buffer_->CanReceive(); +} + +void MultiFileReader::StartNewScheduler() { size_t thread_num = prefetchers_.size(); waiting_file_idx_ = framework::MakeChannel(file_names_.size()); available_thread_idx_ = framework::MakeChannel(thread_num); buffer_ = - framework::MakeChannel>(thread_num); + framework::MakeChannel>(buffer_size_); for (size_t i = 0; i < file_names_.size(); ++i) { waiting_file_idx_->Send(&i); @@ -105,7 +89,7 @@ void MultipleReader::StartNewScheduler() { scheduler_ = std::thread([this] { ScheduleThreadFunc(); }); } -void MultipleReader::EndScheduler() { +void MultiFileReader::EndScheduler() { available_thread_idx_->Close(); buffer_->Close(); waiting_file_idx_->Close(); @@ -117,8 +101,8 @@ void MultipleReader::EndScheduler() { delete waiting_file_idx_; } -void MultipleReader::ScheduleThreadFunc() { - VLOG(5) << "MultipleReader schedule thread starts."; +void MultiFileReader::ScheduleThreadFunc() { + VLOG(5) << "MultiFileReader schedule thread starts."; size_t completed_thread_num = 0; size_t thread_idx; while (available_thread_idx_->Receive(&thread_idx)) { @@ -150,17 +134,20 @@ void MultipleReader::ScheduleThreadFunc() { p.join(); } } - VLOG(5) << "MultipleReader schedule thread terminates."; + VLOG(5) << "MultiFileReader schedule thread terminates."; } -void MultipleReader::PrefetchThreadFunc(std::string file_name, - size_t thread_idx) { +void MultiFileReader::PrefetchThreadFunc(std::string file_name, + size_t thread_idx) { VLOG(5) << "The prefetch thread of file '" << file_name << "' starts."; std::unique_ptr reader = CreateReaderByFileName(file_name, dims_); - while (reader->HasNext()) { + while (true) { std::vector ins; reader->ReadNext(&ins); + if (ins.empty()) { + break; + } try { buffer_->Send(&ins); } catch (paddle::platform::EnforceNotMet e) { @@ -197,11 +184,13 @@ class OpenFilesOp : public framework::OperatorBase { const auto& file_names = Attr>("file_names"); PADDLE_ENFORCE(!file_names.empty(), "No file to be read!"); const size_t thread_num = Attr("thread_num"); + const size_t buffer_size = Attr("buffer_size"); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset(new MultipleReader( - file_names, RestoreShapes(shape_concat, ranks), thread_num)); + out->Reset(new MultiFileReader(file_names, + RestoreShapes(shape_concat, ranks), + thread_num, buffer_size)); } }; @@ -212,11 +201,12 @@ class OpenFilesOpMaker : public FileReaderMakerBase { AddAttr>("file_names", "Files to be read."); AddAttr("thread_num", "The maximal concurrent prefetch thread number.") .GreaterThan(0); + AddAttr("buffer_size", "The size of prefetch buffer.").GreaterThan(0); AddComment(R"DOC( OpenFiles Operator - An OpenFilesOp creates a MultipleReader, which is able to + An OpenFilesOp creates a MultiFileReader, which is able to read data multi-threaded from multiple files. )DOC"); } diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index 083c1fae5e2016ada6309aba78bdfa6ad7fef89c..a4dcf704a63ae3bad6567ddb042ea23513bccff7 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include // NOLINT #include #include "paddle/fluid/framework/data_type.h" @@ -19,7 +20,6 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" -#include #include "paddle/fluid/operators/detail/grpc_client.h" namespace paddle { diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index 807e5ad951b893a4c027a96d743f0606b70cf160..8320c257c9ab15efec29eabe99eca5b6f74c9e31 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -60,7 +60,7 @@ class ReshapeOp : public framework::OperatorWithKernel { static framework::DDim ValidateShape(const std::vector shape, const framework::DDim &in_dims) { const int64_t in_size = framework::product(in_dims); - // only one dimension canbe set to -1, whose size will be automatically + // only one dimension can be set to -1, whose size will be automatically // infered. const int64_t unk_dim_val = -1; const int64_t copy_dim_val = 0; @@ -119,13 +119,15 @@ class ReshapeKernel : public framework::OpKernel { auto *shape_tensor = ctx.Input("Shape"); framework::DDim out_dims = out->dims(); + if (shape_tensor) { auto *shape_data = shape_tensor->data(); + framework::Tensor cpu_shape_tensor; if (platform::is_gpu_place(ctx.GetPlace())) { - framework::Tensor cpu_shape_tensor; TensorCopy(*shape_tensor, platform::CPUPlace(), ctx.device_context(), &cpu_shape_tensor); shape_data = cpu_shape_tensor.data(); + ctx.device_context().Wait(); } auto shape = std::vector(shape_data, shape_data + shape_tensor->numel()); @@ -145,6 +147,7 @@ class ReshapeKernel : public framework::OpKernel { if (!inplace) { out->mutable_data(ctx.GetPlace()); framework::TensorCopy(*in, ctx.GetPlace(), ctx.device_context(), out); + ctx.device_context().Wait(); // TensorCopy will resize to in_dims. out->Resize(out_dims); } else { @@ -167,6 +170,7 @@ class ReshapeGradKernel : public framework::OpKernel { auto in_dims = d_x->dims(); if (!inplace) { framework::TensorCopy(*d_out, ctx.GetPlace(), ctx.device_context(), d_x); + ctx.device_context().Wait(); d_x->Resize(in_dims); } else { d_x->ShareDataWith(*d_out); diff --git a/paddle/fluid/operators/roi_pool_op.h b/paddle/fluid/operators/roi_pool_op.h index f38c5a3c0c9952b37f7db468ea00470a00b5ff6f..54e07490319cf1da749bd33449a7b51efd6c3d65 100644 --- a/paddle/fluid/operators/roi_pool_op.h +++ b/paddle/fluid/operators/roi_pool_op.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc index b16d06df8d0f7f57a5ec2f2be9a2cbb12a8ba55d..1e938638c9182972a2ae2436166ff0aa49efd4be 100644 --- a/paddle/fluid/operators/scale_op.cc +++ b/paddle/fluid/operators/scale_op.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/scale_op.h" -#include "paddle/fluid/operators/net_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter_op.cu b/paddle/fluid/operators/scatter_op.cu index ef7d700659d8d713715a10910baf739954ba0786..a70b9091727935ddcbb83dd5775729969f7d64e5 100644 --- a/paddle/fluid/operators/scatter_op.cu +++ b/paddle/fluid/operators/scatter_op.cu @@ -12,9 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "gather.cu.h" +#include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/gather_op.h" -#include "scatter.cu.h" +#include "paddle/fluid/operators/scatter.cu.h" +#include "paddle/fluid/operators/scatter_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter_op.h b/paddle/fluid/operators/scatter_op.h index 2151d8a9240fc88966533f4a07d5cf56b6c1c3bc..d29947b55e751a3e7993f765198364f4debe2472 100644 --- a/paddle/fluid/operators/scatter_op.h +++ b/paddle/fluid/operators/scatter_op.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "gather.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "scatter.h" +#include "paddle/fluid/operators/gather.h" +#include "paddle/fluid/operators/scatter.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter_test.cc b/paddle/fluid/operators/scatter_test.cc index b67af3c3710eafc57b660a48e4c340d5eefe7e5b..750245153a7df6c4a7ce088038005dcab1685b5f 100644 --- a/paddle/fluid/operators/scatter_test.cc +++ b/paddle/fluid/operators/scatter_test.cc @@ -13,44 +13,48 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/scatter.h" -#include "paddle/fluid/framework/ddim.h" -#include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/platform/place.h" - #include #include #include +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/place.h" TEST(scatter, ScatterUpdate) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators; + // using namespace paddle::framework; + // using namespace paddle::platform; + // using namespace paddle::operators; - Tensor* src = new Tensor(); - Tensor* index = new Tensor(); - Tensor* output = new Tensor(); + paddle::framework::Tensor* src = new paddle::framework::Tensor(); + paddle::framework::Tensor* index = new paddle::framework::Tensor(); + paddle::framework::Tensor* output = new paddle::framework::Tensor(); float* p_src = nullptr; int* p_index = nullptr; - p_src = src->mutable_data(make_ddim({1, 4}), CPUPlace()); - p_index = index->mutable_data(make_ddim({1}), CPUPlace()); + p_src = src->mutable_data(paddle::framework::make_ddim({1, 4}), + paddle::platform::CPUPlace()); + p_index = index->mutable_data(paddle::framework::make_ddim({1}), + paddle::platform::CPUPlace()); - for (size_t i = 0; i < 4; ++i) p_src[i] = float(i); + for (size_t i = 0; i < 4; ++i) p_src[i] = static_cast(i); p_index[0] = 1; - float* p_output = output->mutable_data(make_ddim({4, 4}), CPUPlace()); + float* p_output = output->mutable_data( + paddle::framework::make_ddim({4, 4}), paddle::platform::CPUPlace()); auto* cpu_place = new paddle::platform::CPUPlace(); paddle::platform::CPUDeviceContext ctx(*cpu_place); - ScatterAssign(ctx, *src, *index, output); + paddle::operators::ScatterAssign(ctx, *src, *index, output); - for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], float(0)); - for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data()[i], float(0)); - for (size_t i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], float(i - 4)); + for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], 0.0f); + for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data()[i], 0.0f); + for (size_t i = 4; i < 8; ++i) { + EXPECT_EQ(p_output[i], static_cast(i - 4)); + } for (size_t i = 4; i < 8; ++i) - EXPECT_EQ(output->data()[i], float(i - 4)); - for (size_t i = 8; i < 16; ++i) EXPECT_EQ(p_output[i], float(0)); - for (size_t i = 8; i < 16; ++i) EXPECT_EQ(output->data()[i], float(0)); + EXPECT_EQ(output->data()[i], static_cast(i - 4)); + for (size_t i = 8; i < 16; ++i) EXPECT_EQ(p_output[i], 0.0f); + for (size_t i = 8; i < 16; ++i) EXPECT_EQ(output->data()[i], 0.0f); delete src; delete index; diff --git a/paddle/fluid/operators/send_barrier_op.cc b/paddle/fluid/operators/send_barrier_op.cc index 8d02a6f29177536562e38372eb0336424aa0a47c..12b844daaa33162b86b7daffa2e4c49785701662 100644 --- a/paddle/fluid/operators/send_barrier_op.cc +++ b/paddle/fluid/operators/send_barrier_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include // NOLINT #include #include "paddle/fluid/framework/data_type.h" @@ -19,7 +20,6 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" -#include #include "paddle/fluid/operators/detail/grpc_client.h" namespace paddle { diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index d47f66de2161dce7ed162db4c2e23859e19596cb..82ff087d0a7a4b482aef842e618f593b17dca171 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include // NOLINT #include #include "paddle/fluid/framework/data_type.h" diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 542bc3fde2a3616807eea560be85fb42026d5825..3bf5d57809019d3ae469471c2ee2e7aac70b9faf 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include #include -#include +#include // NOLINT #include "gtest/gtest.h" #include "paddle/fluid/framework/op_registry.h" @@ -37,11 +37,11 @@ namespace m = paddle::operators::math; std::unique_ptr listen_and_serv_op; int selected_port; -void InitTensorsInScope(f::Scope &scope, p::CPUPlace &place) { +void InitTensorsInScope(const p::CPUPlace &place, f::Scope *scope) { p::CPUDeviceContext ctx(place); for (int i = 0; i < 2; ++i) { auto var_name = paddle::string::Sprintf("x%d", i); - auto var = scope.Var(var_name); + auto var = scope->Var(var_name); auto tensor = var->GetMutable(); tensor->Resize({10, 10}); float *expect = tensor->mutable_data(place); @@ -50,20 +50,20 @@ void InitTensorsInScope(f::Scope &scope, p::CPUPlace &place) { } } - auto out_var = scope.Var("Out"); + auto out_var = scope->Var("Out"); auto out_tensor = out_var->GetMutable(); out_tensor->Resize({10, 10}); out_tensor->mutable_data(place); // allocate } -void InitSelectedRowsInScope(f::Scope &scope, p::CPUPlace &place) { +void InitSelectedRowsInScope(const p::CPUPlace &place, f::Scope *scope) { p::CPUDeviceContext ctx(place); int64_t height = 10; int64_t row_numel = 10; m::SetConstant set_one; // init x0 std::vector rows0{0, 4, 7}; - auto x0_var = scope.Var("x0"); + auto x0_var = scope->Var("x0"); auto x0 = x0_var->GetMutable(); x0->set_rows(rows0); x0->set_height(height); @@ -74,7 +74,7 @@ void InitSelectedRowsInScope(f::Scope &scope, p::CPUPlace &place) { // init x1 std::vector rows1{2, 9}; - auto x1_var = scope.Var("x1"); + auto x1_var = scope->Var("x1"); auto x1 = x1_var->GetMutable(); x1->set_rows(rows1); x1->set_height(height); @@ -83,7 +83,7 @@ void InitSelectedRowsInScope(f::Scope &scope, p::CPUPlace &place) { f::make_ddim({static_cast(rows1.size()), row_numel}), place); set_one(ctx, x1_value, 1.0); - auto out_var = scope.Var("Out"); + auto out_var = scope->Var("Out"); auto out = out_var->GetMutable(); auto out_value = out->mutable_value(); out->set_height(height); @@ -117,15 +117,16 @@ void StartServerNet(bool is_sparse) { f::Scope scope; p::CPUPlace place; if (is_sparse) { - InitSelectedRowsInScope(scope, place); + InitSelectedRowsInScope(place, &scope); } else { - InitTensorsInScope(scope, place); + InitTensorsInScope(place, &scope); } // sub program run in listen_and_serv_op, for simple test we use sum f::ProgramDesc program; const auto &root_block = program.Block(0); auto *optimize_block = program.AppendBlock(root_block); + auto *prefetch_block = program.AppendBlock(root_block); // X for server side tensors, RX for received tensers, must be of same shape. AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block); @@ -135,6 +136,7 @@ void StartServerNet(bool is_sparse) { attrs.insert({"ParamList", std::vector({"Out"})}); attrs.insert({"GradList", std::vector({"x1"})}); attrs.insert({"OptimizeBlock", optimize_block}); + attrs.insert({"PrefetchBlock", prefetch_block}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); LOG(INFO) << "selected port before run " << selected_port; @@ -148,7 +150,7 @@ TEST(SendRecvOp, CPUDense) { // local net f::Scope scope; p::CPUPlace place; - InitTensorsInScope(scope, place); + InitTensorsInScope(place, &scope); // create rpc client var scope.Var("RPC_CLIENT_VAR"); @@ -191,7 +193,7 @@ TEST(SendRecvOp, CPUSparse) { f::Scope scope; p::CPUPlace place; p::CPUDeviceContext ctx(place); - InitSelectedRowsInScope(scope, place); + InitSelectedRowsInScope(place, &scope); scope.Var("RPC_CLIENT_VAR"); f::AttributeMap attrs; selected_port = static_cast( diff --git a/paddle/fluid/operators/send_recv_util.h b/paddle/fluid/operators/send_recv_util.h index 196f56f6340a75b599b8dd15957dfe6835f9bf59..113513eb6b327773ab4a1c062fb8a3f06fddfbca 100644 --- a/paddle/fluid/operators/send_recv_util.h +++ b/paddle/fluid/operators/send_recv_util.h @@ -12,6 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#pragma once +#include + namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/send_vars_op.cc b/paddle/fluid/operators/send_vars_op.cc index 2cbd9e2394800dc3b9c5be1163d16bbec435c533..56b3713d6af28d0787e114a672a503e86cbd85fd 100644 --- a/paddle/fluid/operators/send_vars_op.cc +++ b/paddle/fluid/operators/send_vars_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include // NOLINT #include #include "paddle/fluid/framework/data_type.h" @@ -36,7 +36,7 @@ class SendVarsOp : public framework::OperatorBase { auto ins = Inputs("X"); std::vector epmap = Attr>("epmap"); - int sync_send = Attr("sync_sent"); + int sync_send = Attr("sync_send"); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); diff --git a/paddle/fluid/operators/sequence_concat_op.h b/paddle/fluid/operators/sequence_concat_op.h index 9f04c4199130de3cead6f23ef111453ca752c0e3..71c9f45287c29628a2f2c8c649e9e5270317ef6a 100644 --- a/paddle/fluid/operators/sequence_concat_op.h +++ b/paddle/fluid/operators/sequence_concat_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/strided_memcpy.h" diff --git a/paddle/fluid/operators/sequence_conv_op.h b/paddle/fluid/operators/sequence_conv_op.h index ee48339c52e348e7b3060bbdd462177375aee9f5..b59504bb9893b720247841bdad5aa577992b7fb6 100644 --- a/paddle/fluid/operators/sequence_conv_op.h +++ b/paddle/fluid/operators/sequence_conv_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/context_project.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/sequence_erase_op.cc b/paddle/fluid/operators/sequence_erase_op.cc index 32b9d7f7c1528a365cd21122e4df0e3c1407a49e..73c0e89512972cda002bd902ee0c78b4b77d8502 100644 --- a/paddle/fluid/operators/sequence_erase_op.cc +++ b/paddle/fluid/operators/sequence_erase_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sequence_erase_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_erase_op.h b/paddle/fluid/operators/sequence_erase_op.h index b490c34f543875f73e0862c08c25bcb57611e2f4..265390528a15aa060900276f98128d754fc907fe 100644 --- a/paddle/fluid/operators/sequence_erase_op.h +++ b/paddle/fluid/operators/sequence_erase_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/op_registry.h" namespace paddle { diff --git a/paddle/fluid/operators/sequence_expand_op.cc b/paddle/fluid/operators/sequence_expand_op.cc index 786fe63e7580ce16b946d5049a490eed2c3c6ced..ae52849162ae4d78cc69ddbb98f58059f55683cb 100644 --- a/paddle/fluid/operators/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_expand_op.cc @@ -84,12 +84,11 @@ class SequenceExpandOp : public framework::OperatorWithKernel { } } out_dims[0] = out_first_dim; - ctx->SetOutputDim("Out", out_dims); } else { out_dims[0] = -1; - ctx->SetOutputDim("Out", out_dims); - ctx->ShareLoD("X", /*->*/ "Out"); } + ctx->SetOutputDim("Out", out_dims); + ctx->ShareLoD("X", /*->*/ "Out"); } }; diff --git a/paddle/fluid/operators/sequence_expand_op.cu b/paddle/fluid/operators/sequence_expand_op.cu index bb51bb2902eea797de3449fcb6c8b52b4f0e7fbf..c00765e5d59af068e5682b39ebace5f3d7a62250 100644 --- a/paddle/fluid/operators/sequence_expand_op.cu +++ b/paddle/fluid/operators/sequence_expand_op.cu @@ -12,8 +12,135 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#define EIGEN_USE_GPU +#include #include "paddle/fluid/operators/sequence_expand_op.h" +#include "paddle/fluid/platform/cuda_helper.h" + +namespace paddle { +namespace operators { + +using LoDTensor = framework::LoDTensor; + +template +__global__ void sequence_expand_kernel(const T* x_data, const size_t* x_lod, + const size_t* ref_lod, + const size_t* offset, + const size_t lod_size, + /* default=1, + the instance length*/ + const int x_item_length, T* out_data) { + int bid = blockIdx.x; + if (bid >= lod_size - 1) return; + + int x_item_count = x_lod[bid + 1] - x_lod[bid]; + int repeats = ref_lod[bid + 1] - ref_lod[bid]; + int out_offset = static_cast(offset[bid]); + int x_offset = x_lod[bid]; + for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) { + for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) { + for (int tid_x = threadIdx.x; tid_x < x_item_length; + tid_x += blockDim.x) { + out_data[(out_offset + tid_z * x_item_count + tid_y) * x_item_length + + tid_x] = x_data[(x_offset + tid_y) * x_item_length + tid_x]; + } + } + } +} + +template +__global__ void sequence_expand_grad_kernel( + const T* dout_data, const size_t* ref_lod, const size_t* dx_lod, + const size_t* offset, const size_t lod_size, + /* default=1, + the instance length*/ + const int x_item_length, T* dx_data) { + int bid = blockIdx.x; + if (bid >= lod_size - 1) return; + int x_item_count = dx_lod[bid + 1] - dx_lod[bid]; + int repeats = ref_lod[bid + 1] - ref_lod[bid]; + int out_offset = static_cast(offset[bid]); + int x_offset = dx_lod[bid]; + + for (int tid_z = threadIdx.z; tid_z < repeats; tid_z += blockDim.z) { + for (int tid_y = threadIdx.y; tid_y < x_item_count; tid_y += blockDim.y) { + for (int tid_x = threadIdx.x; tid_x < x_item_length; + tid_x += blockDim.x) { + platform::CudaAtomicAdd( + &dx_data[(x_offset + tid_y) * x_item_length + tid_x], + dout_data[(out_offset + tid_z * x_item_count + tid_y) * + x_item_length + + tid_x]); + } + } + } +} + +void GetOutputOffset(const framework::Vector& x_lod, + const framework::Vector& ref_lod, + framework::Vector* out_offset) { + size_t offset = 0; + int lod_size = static_cast(x_lod.size()); + for (int i = 0; i < static_cast(x_lod.size()); ++i) { + (*out_offset)[i] = offset; + if (i < lod_size - 1) { + offset += (ref_lod[i + 1] - ref_lod[i]) * (x_lod[i + 1] - x_lod[i]); + } + } +} + +template +struct SequenceExpandFunctor { + void operator()( + const platform::CUDADeviceContext& context, const LoDTensor& x, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand referenced lod*/ + LoDTensor* out) { + int x_item_length = x.numel() / x.dims()[0]; + framework::Vector out_offset(x_lod.size()); + GetOutputOffset(x_lod, ref_lod, &out_offset); + + int thread_x = std::min(32, std::max(static_cast(ref_lod.size()), 16)); + int thread_y = 16; + int thread_z = 1024 / thread_x / thread_y; + int block_x = static_cast(ref_lod.size()); + dim3 block_size(thread_x, thread_y, thread_z); + dim3 grid_size(block_x, 1); + + sequence_expand_kernel<<>>( + x.data(), x_lod.CUDAData(context.GetPlace()), + ref_lod.CUDAData(context.GetPlace()), + out_offset.CUDAData(context.GetPlace()), x_lod.size(), x_item_length, + out->mutable_data(context.GetPlace())); + } +}; + +template +struct SequenceExpandGradFunctor { + void operator()(const platform::CUDADeviceContext& context, + const LoDTensor& dout, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand based lod*/ + LoDTensor* dx) { + int x_item_length = framework::product(dx->dims()) / dx->dims()[0]; + framework::Vector out_offset(x_lod.size()); + GetOutputOffset(x_lod, ref_lod, &out_offset); + + int thread_x = std::min(32, std::max(static_cast(ref_lod.size()), 16)); + int thread_y = 16; + int thread_z = 1024 / thread_x / thread_y; + int block_x = static_cast(ref_lod.size()); + dim3 block_size(thread_x, thread_y, thread_z); + dim3 grid_size(block_x, 1); + sequence_expand_grad_kernel<<>>( + dout.data(), ref_lod.CUDAData(context.GetPlace()), + x_lod.CUDAData(context.GetPlace()), + out_offset.CUDAData(context.GetPlace()), ref_lod.size(), x_item_length, + dx->mutable_data(context.GetPlace())); + } +}; + +} // namespace operators +} // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_expand_op.h b/paddle/fluid/operators/sequence_expand_op.h index db7d8bd6821fabd9714a160970558291ec47197f..d62c387c3eebf9df0ab532f4e891da006f239468 100644 --- a/paddle/fluid/operators/sequence_expand_op.h +++ b/paddle/fluid/operators/sequence_expand_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include // std::iota #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memcpy.h" @@ -26,6 +27,57 @@ template using EigenMatrix = framework::EigenMatrix; +template +struct SequenceExpandFunctor { + void operator()( + const DeviceContext& ctx, const LoDTensor& x, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand referenced lod*/ + LoDTensor* out); +}; + +template +struct SequenceExpandGradFunctor { + void operator()( + const DeviceContext& ctx, const LoDTensor& dout, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand referenced lod*/ + LoDTensor* dx); +}; + +template +struct SequenceExpandFunctor { + void operator()( + const platform::CPUDeviceContext& context, const LoDTensor& x, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand referenced lod*/ + LoDTensor* out) { + int out_offset = 0; + auto& eigen_place = *context.eigen_device(); + for (size_t i = 1; i < ref_lod.size(); ++i) { + int repeat_num = ref_lod[i] - ref_lod[i - 1]; + int x_start = x_lod[i - 1]; + int x_end = x_lod[i]; + int x_seq_len = x_end - x_start; + if (repeat_num > 0) { + auto x_sub_tensor = x.Slice(x_start, x_end); + x_sub_tensor.Resize({1, x_sub_tensor.numel()}); + int out_start = out_offset; + if (out->lod().size() == 1) { + out_start = out->lod()[0][out_offset]; + } + auto out_sub_tensor = + out->Slice(out_start, out_start + x_seq_len * repeat_num); + out_sub_tensor.Resize({repeat_num, x_sub_tensor.dims()[1]}); + EigenMatrix::From(out_sub_tensor).device(eigen_place) = + EigenMatrix::From(x_sub_tensor) + .broadcast(Eigen::array({{repeat_num, 1}})); + } + out_offset += repeat_num; + } + } +}; + template class SequenceExpandKernel : public framework::OpKernel { public: @@ -47,45 +99,36 @@ class SequenceExpandKernel : public framework::OpKernel { return; } - auto& out_lod = *out->mutable_lod(); + // x lod level is at most 1. + framework::Vector out_lod; if (x_lod.size() == 1) { - out_lod.resize(1); - out_lod[0] = {0}; - } - - int out_offset = 0; - auto& eigen_place = - *context.template device_context().eigen_device(); - for (size_t i = 1; i < y_lod[ref_level].size(); ++i) { - int repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1]; - int x_start = i - 1; - int x_end = i; - if (x_lod.size() == 1) { - x_start = x_lod[0][i - 1]; - x_end = x_lod[0][i]; - } - int x_seq_len = x_end - x_start; - if (repeat_num > 0) { - auto x_sub_tensor = x->Slice(x_start, x_end); - x_sub_tensor.Resize({1, x_sub_tensor.numel()}); - int out_start = out_offset; - if (x_lod.size() == 1) { - out_start = out_lod[0][out_offset]; - } - auto out_sub_tensor = - out->Slice(out_start, out_start + x_seq_len * repeat_num); - out_sub_tensor.Resize({repeat_num, x_sub_tensor.dims()[1]}); - EigenMatrix::From(out_sub_tensor).device(eigen_place) = - EigenMatrix::From(x_sub_tensor) - .broadcast(Eigen::array({{repeat_num, 1}})); - } - for (int j = 0; j < repeat_num; ++j) { - if (x_lod.size() == 1) { - out_lod[0].push_back(out_lod[0].back() + x_seq_len); + out_lod.push_back(0); + int out_offset = 0; + for (size_t i = 1; i < y_lod[ref_level].size(); ++i) { + int repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1]; + int x_start = x_lod[0][i - 1]; + int x_end = x_lod[0][i]; + int x_seq_len = x_end - x_start; + for (int j = 0; j < repeat_num; ++j) { + out_lod.push_back(out_lod.back() + x_seq_len); + out_offset++; } - out_offset++; } + // write lod to out if x has lod + auto& ref_lod = *out->mutable_lod(); + ref_lod[0] = out_lod; } + framework::Vector ref_x_lod; + if (x->lod().size() == 1) { + ref_x_lod = x->lod()[0]; + } else { + // x_lod doesn't has lod, use fake x lod, level = 0 + ref_x_lod.resize(x->dims()[0] + 1); + std::iota(ref_x_lod.begin(), ref_x_lod.end(), 0); + } + SequenceExpandFunctor functor; + functor(context.template device_context(), *x, ref_x_lod, + y_lod[ref_level], out); } }; @@ -101,6 +144,36 @@ class SequenceExpandKernel : public framework::OpKernel { * Grad(X).lod = Input(X).lod * * */ +template +struct SequenceExpandGradFunctor { + void operator()( + const platform::CPUDeviceContext& context, const LoDTensor& dout, + const framework::Vector& x_lod, /*expand source lod*/ + const framework::Vector& ref_lod, /*expand referenced lod*/ + LoDTensor* dx) { + math::SetConstant set_zero; + set_zero(context, dx, static_cast(0)); + + int dout_offset = 0; + for (size_t i = 1; i < ref_lod.size(); ++i) { + int repeat_num = ref_lod[i] - ref_lod[i - 1]; + if (repeat_num > 0) { + int x_start = x_lod[i - 1]; + int x_end = x_lod[i]; + int x_seq_len = x_end - x_start; + auto dx_sub = dx->Slice(x_start, x_end); + dx_sub.Resize(flatten_to_1d(dx_sub.dims())); + int dout_end = dout_offset + repeat_num * x_seq_len; + auto dout_sub = dout.Slice(dout_offset, dout_end); + dout_sub.Resize({repeat_num, dx_sub.dims()[0]}); + math::ColwiseSum col_sum; + col_sum(context, dout_sub, &dx_sub); + dout_offset += repeat_num * x_seq_len; + } + } + } +}; + template class SequenceExpandGradKernel : public framework::OpKernel { public: @@ -114,43 +187,26 @@ class SequenceExpandGradKernel : public framework::OpKernel { g_x->mutable_data(context.GetPlace()); g_x->set_lod(x->lod()); - auto& x_lod = x->lod(); auto& y_lod = y->lod(); - if (ref_level == -1) ref_level = y_lod.size() - 1; - // just copy the gradient if (y_lod[ref_level].size() <= 1) { framework::TensorCopy(*g_out, context.GetPlace(), g_x); return; } - auto& dev_ctx = context.template device_context(); - - math::SetConstant set_zero; - set_zero(dev_ctx, g_x, static_cast(0)); - - int g_out_offset = 0; - for (size_t i = 1; i < y_lod[ref_level].size(); ++i) { - int repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1]; - if (repeat_num > 0) { - int x_start = i - 1; - int x_end = i; - if (x_lod.size() == 1) { - x_start = x_lod[0][i - 1]; - x_end = x_lod[0][i]; - } - int x_seq_len = x_end - x_start; - auto g_x_sub = g_x->Slice(x_start, x_end); - g_x_sub.Resize(flatten_to_1d(g_x_sub.dims())); - int g_out_end = g_out_offset + repeat_num * x_seq_len; - auto g_out_sub = g_out->Slice(g_out_offset, g_out_end); - g_out_sub.Resize({repeat_num, g_x_sub.dims()[0]}); - math::ColwiseSum col_sum; - col_sum(dev_ctx, g_out_sub, &g_x_sub); - g_out_offset += repeat_num * x_seq_len; - } + framework::Vector ref_x_lod; + framework::Vector ref_lod = y_lod[ref_level]; + if (x->lod().size() == 1) { + ref_x_lod = x->lod()[0]; + } else { + // x_lod doesn't has lod, use fake x lod, level = 0 + ref_x_lod.resize(x->dims()[0] + 1); + std::iota(ref_x_lod.begin(), ref_x_lod.end(), 0); } + SequenceExpandGradFunctor functor; + functor(context.template device_context(), *g_out, ref_x_lod, + ref_lod, g_x); } }; diff --git a/paddle/fluid/operators/sequence_pool_op.cc b/paddle/fluid/operators/sequence_pool_op.cc index 3d4d54a3a3f292d34b1a7645a0db4bdd3208ba6d..933c8c26239d49221819a583f999389ed6fb6cb6 100644 --- a/paddle/fluid/operators/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_pool_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sequence_pool_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_pool_op.h b/paddle/fluid/operators/sequence_pool_op.h index c58d677c92b7a20eb54dc5f9a447566e91bdc3d4..2aa20792f24305a106c500a3d7a6e3d363bc31d8 100644 --- a/paddle/fluid/operators/sequence_pool_op.h +++ b/paddle/fluid/operators/sequence_pool_op.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_softmax_op.cc index e8b4df04286d327f568f4c43886f9fcf89cc4a88..d2c1317bef95deca36f7f4198407f5350a1be035 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_softmax_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sequence_softmax_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc index 074fa9e00f2ec531f324ff10113d95144687d500..06cb0550ad7d4ad0241a4f439ea9ac16d9714c38 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/sgd_op.cc @@ -35,8 +35,8 @@ class SGDOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, "Learning rate should have 1 element"); auto param_dim = ctx->GetInputDim("Param"); - // TODO(qijun): check dimensions of Param and Grad at complie - // and run time. + // TODO(qijun): check dimensions of Param and Grad at compile + // and runtime. ctx->SetOutputDim("ParamOut", param_dim); } diff --git a/paddle/fluid/operators/sgd_op.h b/paddle/fluid/operators/sgd_op.h index 8d2bdf75903b4958e14605781f65c5a214cb5300..cfc8793e1e05a7d4fa9207ae77a664b391b9a986 100644 --- a/paddle/fluid/operators/sgd_op.h +++ b/paddle/fluid/operators/sgd_op.h @@ -65,7 +65,8 @@ class SGDOpKernel : public framework::OpKernel { auto &grad_rows = grad->rows(); size_t grad_row_numel = grad_value.numel() / grad_rows.size(); - PADDLE_ENFORCE_EQ(grad_row_numel, param_out->numel() / grad_height); + PADDLE_ENFORCE_EQ(static_cast(grad_row_numel), + param_out->numel() / grad_height); auto *grad_data = grad_value.data(); auto *out_data = param_out->data(); @@ -73,7 +74,7 @@ class SGDOpKernel : public framework::OpKernel { for (size_t i = 0; i < grad_rows.size(); i++) { PADDLE_ENFORCE(grad_rows[i] < grad_height, "Input rows index should less than height"); - for (int64_t j = 0; j < grad_row_numel; j++) { + for (size_t j = 0; j < grad_row_numel; j++) { out_data[grad_rows[i] * grad_row_numel + j] -= lr[0] * grad_data[i * grad_row_numel + j]; } @@ -107,7 +108,7 @@ class SGDOpKernel : public framework::OpKernel { PADDLE_ENFORCE(grad.rows()[i] < grad.height(), "Input rows index should less than height"); int64_t id_index = param.index(grad.rows()[i]); - for (int64_t j = 0; j < grad_row_width; j++) { + for (size_t j = 0; j < grad_row_width; j++) { out_data[id_index * grad_row_width + j] -= lr[0] * grad_data[i * grad_row_width + j]; } diff --git a/paddle/fluid/operators/softmax_mkldnn_op.cc b/paddle/fluid/operators/softmax_mkldnn_op.cc index cf0244e8662e827a90d8472a097315680579ff6d..dc2f1763446b2aaf72b20c72e8e37ec920abd120 100644 --- a/paddle/fluid/operators/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/softmax_mkldnn_op.cc @@ -12,12 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "mkldnn.hpp" #include "paddle/fluid/operators/softmax_op.h" #include "paddle/fluid/platform/mkldnn_helper.h" -#include - namespace paddle { namespace operators { @@ -63,9 +62,11 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { softmax_md, 1 /*dim: C*/); // create memory primitives auto softmax_src_memory = - memory({softmax_md, mkldnn_engine}, (void*)input_data); + memory({softmax_md, mkldnn_engine}, + static_cast(const_cast(input_data))); auto softmax_dst_memory = - memory({softmax_md, mkldnn_engine}, (void*)output_data); + memory({softmax_md, mkldnn_engine}, + static_cast(const_cast(output_data))); auto softmax_prim_desc = softmax_forward::primitive_desc(softmax_desc, mkldnn_engine); auto softmax = softmax_forward(softmax_prim_desc, softmax_src_memory, diff --git a/paddle/fluid/operators/split_ids_op.cc b/paddle/fluid/operators/split_ids_op.cc index a54f8a2878c8606e6b487552324d1e7dfa94b9b8..a53cbc8ac5199061dafdc7f4cf560b9e4fc577ab 100644 --- a/paddle/fluid/operators/split_ids_op.cc +++ b/paddle/fluid/operators/split_ids_op.cc @@ -48,11 +48,11 @@ class SplitIdsOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutputs("Out"), "SplitIdsOp must has output Out."); auto ids_var_type = ctx->GetInputsVarType("Ids").front(); - PADDLE_ENFORCE_EQ(ids_var_type, framework::proto::VarType::LOD_TENSOR); - auto ids_dims = ctx->GetInputDim("Ids"); - PADDLE_ENFORCE_EQ(ids_dims.size(), 2); - PADDLE_ENFORCE_EQ(ids_dims[1], 1); + if (ids_var_type == framework::proto::VarType::LOD_TENSOR) { + PADDLE_ENFORCE_EQ(ids_dims.size(), 2); + PADDLE_ENFORCE_EQ(ids_dims[1], 1); + } } }; @@ -60,8 +60,9 @@ class SplitIdsOpInferVarType : public framework::VarTypeInference { public: void operator()(const framework::OpDesc &op_desc, framework::BlockDesc *block) const override { + auto *input_var = block->Var(op_desc.Input("Ids")[0]); for (auto &out_var : op_desc.Output("Out")) { - block->Var(out_var)->SetType(framework::proto::VarType::LOD_TENSOR); + block->Var(out_var)->SetType(input_var->GetType()); } } }; @@ -73,4 +74,5 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(split_ids, ops::SplitIdsOp, ops::SplitIdsOpMaker, ops::SplitIdsOpInferVarType); REGISTER_OP_CPU_KERNEL( - split_ids, ops::SplitIdsOpKernel); + split_ids, ops::SplitIdsOpKernel, + ops::SplitIdsOpKernel); diff --git a/paddle/fluid/operators/split_ids_op.h b/paddle/fluid/operators/split_ids_op.h index d36ed398ebce661a62ca92696b0089b5289d5b1c..d263426e073d95ad6d584c7370baf596587a993d 100644 --- a/paddle/fluid/operators/split_ids_op.h +++ b/paddle/fluid/operators/split_ids_op.h @@ -24,35 +24,65 @@ namespace operators { template class SplitIdsOpKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { + void Compute(const framework::ExecutionContext &ctx) const override { auto place = ctx.GetPlace(); if (!platform::is_cpu_place(place)) { PADDLE_THROW("SplitIds do not support GPU kernel"); } - auto& ids_dims = ctx.Input("Ids")->dims(); - const T* ids = ctx.Input("Ids")->data(); - auto outs = ctx.MultiOutput("Out"); - const size_t shard_num = outs.size(); + const auto *ids_var = ctx.InputVar("Ids"); + if (ids_var->IsType()) { + const auto &ids_dims = ctx.Input("Ids")->dims(); + const T *ids = ctx.Input("Ids")->data(); + auto outs = ctx.MultiOutput("Out"); + const size_t shard_num = outs.size(); - std::vector> out_ids; - out_ids.resize(outs.size()); + std::vector> out_ids; + out_ids.resize(outs.size()); - // split id by their shard_num. - for (int i = 0; i < ids_dims[0]; ++i) { - T id = ids[i]; - size_t shard_id = static_cast(id) % shard_num; - out_ids[shard_id].push_back(id); - } + // split id by their shard_num. + for (int i = 0; i < ids_dims[0]; ++i) { + T id = ids[i]; + size_t shard_id = static_cast(id) % shard_num; + out_ids[shard_id].push_back(id); + } + + // create tensor for each shard and send to parameter server + for (size_t i = 0; i < out_ids.size(); ++i) { + auto *shard_t = outs[i]; + std::vector ids = out_ids[i]; + auto *shard_data = shard_t->mutable_data( + framework::make_ddim({static_cast(ids.size()), 1}), place); + for (size_t i = 0; i < ids.size(); ++i) { + shard_data[i] = ids[i]; + } + } + } else if (ids_var->IsType()) { + const auto *ids_selected_rows = ctx.Input("Ids"); + auto &ids_dims = ids_selected_rows->value().dims(); + PADDLE_ENFORCE_EQ(ids_dims[0], + static_cast(ids_selected_rows->rows().size()), + ""); + const T *ids = ids_selected_rows->value().data(); + const auto &ids_rows = ids_selected_rows->rows(); + auto outs = ctx.MultiOutput("Out"); + const size_t shard_num = outs.size(); + // get rows for outputs + for (auto &id : ids_rows) { + size_t shard_id = static_cast(id) % shard_num; + outs[shard_id]->mutable_rows()->push_back(id); + } - // create tensor for each shard and send to parameter server - for (size_t i = 0; i < out_ids.size(); ++i) { - auto* shard_t = outs[i]; - std::vector ids = out_ids[i]; - auto* shard_data = shard_t->mutable_data( - framework::make_ddim({static_cast(ids.size()), 1}), place); - for (size_t i = 0; i < ids.size(); ++i) { - shard_data[i] = ids[i]; + int64_t row_width = ids_dims[1]; + for (auto &out : outs) { + out->set_height(ids_selected_rows->height()); + framework::DDim ddim = framework::make_ddim( + {static_cast(out->rows().size()), row_width}); + T *output = out->mutable_value()->mutable_data(ddim, place); + for (int64_t i = 0; i < ddim[0]; ++i) { + memcpy(output + i * row_width, ids + out->rows()[i] * row_width, + row_width * sizeof(T)); + } } } } diff --git a/paddle/fluid/operators/split_op.cc b/paddle/fluid/operators/split_op.cc index dffac772f11bee2fa3dcdf469a86adc57369b54d..e745509ec8c1f2ec305d7d4aabfdd43d847124b5 100644 --- a/paddle/fluid/operators/split_op.cc +++ b/paddle/fluid/operators/split_op.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/split_op.h" -#include "paddle/fluid/operators/net_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/split_op.h b/paddle/fluid/operators/split_op.h index ae8562c0c503fec13dff61e04845ba0832848f5f..e2c41f44ab3ea3c42837974dae749278c9356ba5 100644 --- a/paddle/fluid/operators/split_op.h +++ b/paddle/fluid/operators/split_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include +#include // NOLINT #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/strided_memcpy.h" diff --git a/paddle/fluid/operators/strided_memcpy.h b/paddle/fluid/operators/strided_memcpy.h index 22c1db82e9f5aff6aa9a311cd1093b33fa7e6db9..7a10218e1556698f3e0a1828db5de8851dd1c90b 100644 --- a/paddle/fluid/operators/strided_memcpy.h +++ b/paddle/fluid/operators/strided_memcpy.h @@ -37,8 +37,8 @@ inline void StridedMemcpy(const platform::DeviceContext& dev_ctx, const T* src, const framework::DDim& src_stride, const framework::DDim& dst_dim, const framework::DDim& dst_stride, T* dst) { - using namespace detail; - StridedCopyDimVisitor func(dev_ctx, src, src_stride, dst_stride, dst); + paddle::operators::detail::StridedCopyDimVisitor func( + dev_ctx, src, src_stride, dst_stride, dst); boost::apply_visitor(func, dst_dim); } diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 9061e137bd1c789d34665729c48c1c2ea9525c8e..108f26fafe7af76eaa613d77ed77748ee43ea234 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -10,9 +10,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sum_op.h" + #include #include #include + #include "paddle/fluid/framework/var_type_inference.h" #include "paddle/fluid/operators/detail/safe_ref.h" @@ -37,7 +39,10 @@ class SumOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputsDim("X"); size_t N = x_dims.size(); - PADDLE_ENFORCE_GT(N, 1, "Input tensors count should > 1."); + PADDLE_ENFORCE_GT(N, 0, "Input tensors count should > 0."); + if (N == 1) { + VLOG(3) << "Warning: sum have only one input, may waste memory"; + } framework::DDim in_dim({0}); for (auto& x_dim : x_dims) { diff --git a/paddle/fluid/operators/top_k_op.cu b/paddle/fluid/operators/top_k_op.cu index bfd26c2f2294f954adc81a1719650c46372098c4..d7f4d383ce0d9e1ff42fc12c96aaf0ceb532e5db 100644 --- a/paddle/fluid/operators/top_k_op.cu +++ b/paddle/fluid/operators/top_k_op.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/platform/assert.h" namespace paddle { @@ -133,71 +134,71 @@ __device__ __forceinline__ void GetTopK(Pair topk[], const T* val, int* col, } template -__device__ __forceinline__ void ThreadGetTopK(Pair topk[], int& beam, +__device__ __forceinline__ void ThreadGetTopK(Pair topk[], int* beam, int beam_size, const T* src, - bool& firstStep, bool& is_empty, - Pair& max, int dim, + bool* firstStep, bool* is_empty, + Pair* max, int dim, const int tid) { - if (beam > 0) { - int length = beam < beam_size ? beam : beam_size; - if (firstStep) { - firstStep = false; + if (*beam > 0) { + int length = (*beam) < beam_size ? *beam : beam_size; + if (*firstStep) { + *firstStep = false; GetTopK(topk, src, tid, dim, length); } else { for (int k = 0; k < MaxLength; k++) { - if (k < MaxLength - beam) { - topk[k] = topk[k + beam]; + if (k < MaxLength - (*beam)) { + topk[k] = topk[k + *beam]; } else { topk[k].set(-INFINITY, -1); } } - if (!is_empty) { - GetTopK(topk + MaxLength - beam, src, tid, dim, max, + if (!(*is_empty)) { + GetTopK(topk + MaxLength - *beam, src, tid, dim, *max, length); } } - max = topk[MaxLength - 1]; - if (max.v == -1) is_empty = true; - beam = 0; + *max = topk[MaxLength - 1]; + if ((*max).v == -1) *is_empty = true; + *beam = 0; } } template -__device__ __forceinline__ void ThreadGetTopK(Pair topk[], int& beam, +__device__ __forceinline__ void ThreadGetTopK(Pair topk[], int* beam, int beam_size, const T* val, - int* col, bool& firstStep, - bool& is_empty, Pair& max, + int* col, bool* firstStep, + bool* is_empty, Pair* max, int dim, const int tid) { - if (beam > 0) { - int length = beam < beam_size ? beam : beam_size; - if (firstStep) { - firstStep = false; + if (*beam > 0) { + int length = (*beam) < beam_size ? *beam : beam_size; + if (*firstStep) { + *firstStep = false; GetTopK(topk, val, col, tid, dim, length); } else { for (int k = 0; k < MaxLength; k++) { - if (k < MaxLength - beam) { - topk[k] = topk[k + beam]; + if (k < MaxLength - *beam) { + topk[k] = topk[k + *beam]; } else { topk[k].set(-INFINITY, -1); } } - if (!is_empty) { - GetTopK(topk + MaxLength - beam, val, col, tid, dim, max, + if (!(*is_empty)) { + GetTopK(topk + MaxLength - *beam, val, col, tid, dim, max, length); } } - max = topk[MaxLength - 1]; - if (max.v == -1) is_empty = true; - beam = 0; + *max = topk[MaxLength - 1]; + if ((*max).v == -1) *is_empty = true; + *beam = 0; } } template __device__ __forceinline__ void BlockReduce(Pair* sh_topk, int* maxid, Pair topk[], T** topVal, - int64_t** topIds, int& beam, int& k, + int64_t** topIds, int* beam, int* k, const int tid, const int warp) { while (true) { __syncthreads(); @@ -225,17 +226,17 @@ __device__ __forceinline__ void BlockReduce(Pair* sh_topk, int* maxid, (*topVal)++; (*topIds)++; } - if (tid == maxid[0]) beam++; - if (--k == 0) break; + if (tid == maxid[0]) (*beam)++; + if (--(*k) == 0) break; __syncthreads(); if (tid == maxid[0]) { - if (beam < MaxLength) { - sh_topk[tid] = topk[beam]; + if (*beam < MaxLength) { + sh_topk[tid] = topk[*beam]; } } if (maxid[0] / 32 == warp) { - if (__shfl(beam, (maxid[0]) % 32, 32) == MaxLength) break; + if (__shfl(*beam, (maxid[0]) % 32, 32) == MaxLength) break; } } } @@ -268,13 +269,13 @@ __global__ void KeMatrixTopK(T* output, int output_stride, int64_t* indices, topk[k].set(-INFINITY, -1); } while (k) { - ThreadGetTopK(topk, beam, k, - src + blockIdx.x * lds, firststep, - is_empty, max, dim, tid); + ThreadGetTopK(topk, &beam, k, + src + blockIdx.x * lds, &firststep, + &is_empty, &max, dim, tid); sh_topk[tid] = topk[0]; BlockReduce(sh_topk, maxid, topk, &output, - &indices, beam, k, tid, warp); + &indices, &beam, &k, tid, warp); } } @@ -308,9 +309,9 @@ class TopkOpCUDAKernel : public framework::OpKernel { KeMatrixTopK<<< grid, threads, 0, reinterpret_cast( ctx.device_context()) - .stream()>>>(output_data, output->dims()[1], - indices_data, input_data, - input_width, input_width, int(k)); + .stream()>>>( + output_data, output->dims()[1], indices_data, input_data, input_width, + input_width, static_cast(k)); } }; diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 87699362b2b5a14750a01345098ec5e6cc9be115..acaefaacdaa593c090d81084fdc1b3665314833f 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -24,7 +24,19 @@ template class CPUUniformRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* tensor = ctx.Output("Out"); + framework::Tensor* tensor = nullptr; + auto out_var = ctx.OutputVar("Out"); + if (out_var->IsType()) { + tensor = out_var->GetMutable(); + } else if (out_var->IsType()) { + auto shape = ctx.Attr>("shape"); + tensor = out_var->GetMutable()->mutable_value(); + tensor->Resize(framework::make_ddim(shape)); + } else { + PADDLE_THROW( + "uniform_random_op's output only" + "supports SelectedRows and Tensor"); + } T* data = tensor->mutable_data(ctx.GetPlace()); unsigned int seed = static_cast(ctx.Attr("seed")); std::minstd_rand engine; diff --git a/paddle/fluid/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu index 1232cd1eb332441b12e59a34b2c2f75669925fd0..e1c7323a30233f4ec4f60e46aa6088ee6d8601b7 100644 --- a/paddle/fluid/operators/uniform_random_op.cu +++ b/paddle/fluid/operators/uniform_random_op.cu @@ -43,7 +43,19 @@ template class GPUUniformRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* tensor = context.Output("Out"); + framework::Tensor* tensor = nullptr; + auto out_var = context.OutputVar("Out"); + if (out_var->IsType()) { + tensor = out_var->GetMutable(); + } else if (out_var->IsType()) { + auto shape = context.Attr>("shape"); + tensor = out_var->GetMutable()->mutable_value(); + tensor->Resize(framework::make_ddim(shape)); + } else { + PADDLE_THROW( + "uniform_random_op's output only" + "supports SelectedRows and Tensor"); + } T* data = tensor->mutable_data(context.GetPlace()); unsigned int seed = static_cast(context.Attr("seed")); if (seed == 0) { diff --git a/paddle/fluid/platform/cuda_helper.h b/paddle/fluid/platform/cuda_helper.h index 881d611d4ac26f992036f639097815aff625227b..8758af0804ae08fec6fa64d7387f197f046ce20e 100644 --- a/paddle/fluid/platform/cuda_helper.h +++ b/paddle/fluid/platform/cuda_helper.h @@ -33,22 +33,26 @@ constexpr int PADDLE_CUDA_NUM_THREADS = 512; USE_CUDA_ATOMIC(Add, float); USE_CUDA_ATOMIC(Add, int); USE_CUDA_ATOMIC(Add, unsigned int); -USE_CUDA_ATOMIC(Add, unsigned long long int); +// CUDA API uses unsigned long long int, we cannot use uint64_t here. +// It because unsigned long long int is not necessarily uint64_t +USE_CUDA_ATOMIC(Add, unsigned long long int); // NOLINT CUDA_ATOMIC_WRAPPER(Add, int64_t) { - static_assert(sizeof(int64_t) == sizeof(long long int), + // Here, we check long long int must be int64_t. + static_assert(sizeof(int64_t) == sizeof(long long int), // NOLINT "long long should be int64"); - return CudaAtomicAdd(reinterpret_cast(address), - static_cast(val)); + return CudaAtomicAdd( + reinterpret_cast(address), // NOLINT + static_cast(val)); // NOLINT } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600 USE_CUDA_ATOMIC(Add, double); #else CUDA_ATOMIC_WRAPPER(Add, double) { - unsigned long long int* address_as_ull = - reinterpret_cast(address); - unsigned long long int old = *address_as_ull, assumed; + unsigned long long int* address_as_ull = // NOLINT + reinterpret_cast(address); // NOLINT + unsigned long long int old = *address_as_ull, assumed; // NOLINT do { assumed = old; diff --git a/paddle/fluid/platform/cuda_profiler.h b/paddle/fluid/platform/cuda_profiler.h index ebd6aebd7688549c6fb14466cfa461b90a9fdde0..41d7c121469edd24c67b4288793cb95159fd4b62 100644 --- a/paddle/fluid/platform/cuda_profiler.h +++ b/paddle/fluid/platform/cuda_profiler.h @@ -11,12 +11,13 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once + #include -#include -#include -#include + +#include + +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/details/device_ptr_cast.h b/paddle/fluid/platform/details/cuda_transform_iterator_cast.h similarity index 50% rename from paddle/fluid/platform/details/device_ptr_cast.h rename to paddle/fluid/platform/details/cuda_transform_iterator_cast.h index 1c502a19c056c7fe434e68d568a0f59bf6315b95..06afc44c257bbeb0729323e1a42e1eead23ff075 100644 --- a/paddle/fluid/platform/details/device_ptr_cast.h +++ b/paddle/fluid/platform/details/cuda_transform_iterator_cast.h @@ -18,16 +18,22 @@ limitations under the License. */ #error device_ptr_cast must be include by .cu file #endif -#include +#include // For std::remove_pointer and std::is_pointer. + +#include "thrust/device_ptr.h" namespace paddle { namespace platform { namespace details { + +// PointerToThrustDevicePtr has two speicalizations, one casts a (CUDA +// device) pointer into thrust::device_ptr, the other keeps rest types +// un-casted. template -struct DevicePtrCast; +struct PointerToThrustDevicePtr; template -struct DevicePtrCast { +struct PointerToThrustDevicePtr { using ELEM = typename std::remove_pointer::type; using RTYPE = thrust::device_ptr; @@ -37,17 +43,26 @@ struct DevicePtrCast { }; template -struct DevicePtrCast { +struct PointerToThrustDevicePtr { using RTYPE = T; inline RTYPE operator()(RTYPE it) const { return it; } }; -// Cast T to thrust::device_ptr if T is a pointer. -// Otherwise, e.g., T is a iterator, return T itself. +// CastToCUDATransformIterator casts a pointer to thrust::device_ptr +// so it could be used as the iterator of thrust::transform. It +// doesn't cast other types. +// +// We need CastToCUDATransformIterator because it is often that we +// want to use device memory pointers as transform iterators, e.g., to +// transform a block of float32 to float16. In this case, we want +// CastToCUDATransformIterator to cast float16/32 pointers to +// thrust::device_ptr, otherwise they cannot work as the iterator +// required by thrust::transform. At the same time, we don't want to +// cast thrust::device_ptr to thrust::device_ptr repeatedly. template -auto DevPtrCast(T t) -> - typename DevicePtrCast::value>::RTYPE { - DevicePtrCast::value> cast; +auto CastToCUDATransformIterator(T t) -> + typename PointerToThrustDevicePtr::value>::RTYPE { + PointerToThrustDevicePtr::value> cast; return cast(t); } diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index f03165fae5ca16c5c263ce0683af7ec56e6a3766..1f733d71bdfb777d4a2f316a5fefc3c874879862 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -175,7 +175,7 @@ CUDADeviceContext::~CUDADeviceContext() { Place CUDADeviceContext::GetPlace() const { return place_; } void CUDADeviceContext::Wait() const { - std::lock_guard guard(mutex_); + std::lock_guard guard(mutex_); PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); PADDLE_ENFORCE(cudaGetLastError()); } diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index b17558337914e0ca8fdba283edf4024d94e85f0f..a9c1984616bc731e0557f2cb89282423aa9c3bac 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -98,13 +98,20 @@ class CUDADeviceContext : public DeviceContext { /*! \brief Return cuda stream in the device context. */ cudaStream_t stream() const; + template + void RecordEvent(cudaEvent_t ev, Callback callback) { + std::lock_guard guard(mutex_); + callback(); + PADDLE_ENFORCE(cudaEventRecord(ev, stream_)); + } + private: CUDAPlace place_; std::unique_ptr eigen_device_; std::unique_ptr eigen_stream_; - mutable std::mutex mutex_; + mutable std::recursive_mutex mutex_; cudaStream_t stream_; cudnnHandle_t cudnn_handle_; cublasHandle_t cublas_handle_; diff --git a/paddle/fluid/platform/dynload/CMakeLists.txt b/paddle/fluid/platform/dynload/CMakeLists.txt index 84dac2937de02b3374156ebc83e19dac9f9a3e7a..b93b925a72a55442c105e4280a3580f4ea5b93a1 100644 --- a/paddle/fluid/platform/dynload/CMakeLists.txt +++ b/paddle/fluid/platform/dynload/CMakeLists.txt @@ -1,6 +1,11 @@ cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags enforce) list(APPEND CUDA_SRCS cublas.cc cudnn.cc curand.cc nccl.cc) +if (WITH_TENSORRT) + list(APPEND CUDA_SRCS tensorrt.cc) +endif() + + configure_file(cupti_lib_path.h.in ${CMAKE_CURRENT_BINARY_DIR}/cupti_lib_path.h) if (CUPTI_FOUND) list(APPEND CUDA_SRCS cupti.cc) diff --git a/paddle/fluid/platform/dynload/cublas.cc b/paddle/fluid/platform/dynload/cublas.cc index eb541579a136de2a84ecc9773e0c312b405f7e86..361d3439b844e9f68d3fba0a0e41ec457118a4a9 100644 --- a/paddle/fluid/platform/dynload/cublas.cc +++ b/paddle/fluid/platform/dynload/cublas.cc @@ -28,6 +28,10 @@ CUBLAS_BLAS_ROUTINE_EACH(DEFINE_WRAP); CUBLAS_BLAS_ROUTINE_EACH_R2(DEFINE_WRAP); #endif +#ifdef CUBLAS_BLAS_ROUTINE_EACH_R3 +CUBLAS_BLAS_ROUTINE_EACH_R3(DEFINE_WRAP); +#endif + } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h index a41018d350e89881888d5e31089c2b9ecd76f6c0..1ab55d6b9bf8fdbd14c9c2bd978e3e99dba3e73e 100644 --- a/paddle/fluid/platform/dynload/cublas.h +++ b/paddle/fluid/platform/dynload/cublas.h @@ -71,7 +71,6 @@ extern void *cublas_dso_handle; __macro(cublasDgemm_v2); \ __macro(cublasHgemm); \ __macro(cublasSgemmEx); \ - __macro(cublasGemmEx); \ __macro(cublasSgeam_v2); \ __macro(cublasDgeam_v2); \ __macro(cublasCreate_v2); \ @@ -83,11 +82,6 @@ extern void *cublas_dso_handle; __macro(cublasDgemmBatched); \ __macro(cublasCgemmBatched); \ __macro(cublasZgemmBatched); \ - __macro(cublasSgemmStridedBatched); \ - __macro(cublasDgemmStridedBatched); \ - __macro(cublasCgemmStridedBatched); \ - __macro(cublasZgemmStridedBatched); \ - __macro(cublasHgemmStridedBatched); \ __macro(cublasSgetrfBatched); \ __macro(cublasSgetriBatched); \ __macro(cublasDgetrfBatched); \ @@ -95,10 +89,24 @@ extern void *cublas_dso_handle; CUBLAS_BLAS_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP) +// APIs available after CUDA 8.0 +#if CUDA_VERSION >= 8000 +#define CUBLAS_BLAS_ROUTINE_EACH_R2(__macro) \ + __macro(cublasGemmEx); \ + __macro(cublasSgemmStridedBatched); \ + __macro(cublasDgemmStridedBatched); \ + __macro(cublasCgemmStridedBatched); \ + __macro(cublasZgemmStridedBatched); \ + __macro(cublasHgemmStridedBatched); + +CUBLAS_BLAS_ROUTINE_EACH_R2(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP) +#endif + // APIs available after CUDA 9.0 #if CUDA_VERSION >= 9000 -#define CUBLAS_BLAS_ROUTINE_EACH_R2(__macro) __macro(cublasSetMathMode); -CUBLAS_BLAS_ROUTINE_EACH_R2(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP) +#define CUBLAS_BLAS_ROUTINE_EACH_R3(__macro) __macro(cublasSetMathMode); + +CUBLAS_BLAS_ROUTINE_EACH_R3(DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP) #endif #undef DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP diff --git a/paddle/fluid/platform/dynload/dynamic_loader.cc b/paddle/fluid/platform/dynload/dynamic_loader.cc index 3c1ccc7445ed27c711ab250aa223c66ae0da45dc..19c01dc5a968c7e1d2b0f15cf9a0e8427004e58b 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.cc +++ b/paddle/fluid/platform/dynload/dynamic_loader.cc @@ -45,6 +45,10 @@ DEFINE_string(nccl_dir, "", DEFINE_string(cupti_dir, "", "Specify path for loading cupti.so."); +DEFINE_string( + tensorrt_dir, "", + "Specify path for loading tensorrt library, such as libnvinfer.so."); + namespace paddle { namespace platform { namespace dynload { @@ -194,6 +198,14 @@ void* GetNCCLDsoHandle() { #endif } +void* GetTensorRtDsoHandle() { +#if defined(__APPLE__) || defined(__OSX__) + return GetDsoHandleFromSearchPath(FLAGS_tensorrt_dir, "libnvinfer.dylib"); +#else + return GetDsoHandleFromSearchPath(FLAGS_tensorrt_dir, "libnvinfer.so"); +#endif +} + } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/dynload/dynamic_loader.h b/paddle/fluid/platform/dynload/dynamic_loader.h index 4c85093a43e0e8d75b64c5b29d1ec68db1b44909..0de3559b6088086cb52c254535b6ec42da7dd724 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.h +++ b/paddle/fluid/platform/dynload/dynamic_loader.h @@ -25,6 +25,7 @@ void* GetCurandDsoHandle(); void* GetWarpCTCDsoHandle(); void* GetLapackDsoHandle(); void* GetNCCLDsoHandle(); +void* GetTensorRtDsoHandle(); } // namespace dynload } // namespace platform diff --git a/paddle/fluid/platform/dynload/tensorrt.cc b/paddle/fluid/platform/dynload/tensorrt.cc new file mode 100644 index 0000000000000000000000000000000000000000..f3c8e27944ca9b6419de87d752df3a83751039b1 --- /dev/null +++ b/paddle/fluid/platform/dynload/tensorrt.cc @@ -0,0 +1,30 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/fluid/platform/dynload/tensorrt.h" + +namespace paddle { +namespace platform { +namespace dynload { + +std::once_flag tensorrt_dso_flag; +void *tensorrt_dso_handle; + +#define DEFINE_WRAP(__name) DynLoad__##__name __name + +TENSORRT_RAND_ROUTINE_EACH(DEFINE_WRAP); + +} // namespace dynload +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/dynload/tensorrt.h b/paddle/fluid/platform/dynload/tensorrt.h new file mode 100644 index 0000000000000000000000000000000000000000..f584a49da0fefe0b064b5fb55b01ec132225ce5e --- /dev/null +++ b/paddle/fluid/platform/dynload/tensorrt.h @@ -0,0 +1,69 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +#include +#include + +#include // NOLINT + +#include "paddle/fluid/platform/dynload/dynamic_loader.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace platform { +namespace dynload { + +extern std::once_flag tensorrt_dso_flag; +extern void* tensorrt_dso_handle; + +#ifdef PADDLE_USE_DSO + +#define DECLARE_DYNAMIC_LOAD_TENSORRT_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using tensorrt_func = decltype(__name(args...)) (*)(Args...); \ + std::call_once(tensorrt_dso_flag, []() { \ + tensorrt_dso_handle = \ + paddle::platform::dynload::GetTensorRtDsoHandle(); \ + PADDLE_ENFORCE(tensorrt_dso_handle, "load tensorrt so failed"); \ + }); \ + void* p_##__name = dlsym(tensorrt_dso_handle, #__name); \ + PADDLE_ENFORCE(p_##__name, "load %s failed", #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ + extern DynLoad__##__name __name + +#else +#define DECLARE_DYNAMIC_LOAD_TENSORRT_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + tensorrtResult_t operator()(Args... args) { \ + return __name(args...); \ + } \ + }; \ + extern DynLoad__##__name __name +#endif + +#define TENSORRT_RAND_ROUTINE_EACH(__macro) \ + __macro(createInferBuilder_INTERNAL); \ + __macro(createInferRuntime_INTERNAL); + +TENSORRT_RAND_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_TENSORRT_WRAP) + +} // namespace dynload +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/float16.h b/paddle/fluid/platform/float16.h index 673e1bcae4af6d039bc969f1de6e4bcab3748cb5..ffd183af68514dbb1a8b3de39000c9ca3f56ddc3 100644 --- a/paddle/fluid/platform/float16.h +++ b/paddle/fluid/platform/float16.h @@ -873,6 +873,11 @@ HOSTDEVICE inline bool(isfinite)(const float16& a) { return !((isnan)(a)) && !((isinf)(a)); } +inline std::ostream& operator<<(std::ostream& os, const float16& a) { + os << static_cast(a); + return os; +} + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/float16_test.cc b/paddle/fluid/platform/float16_test.cc index d60aecf96c8828a5656f81fd3602cfb2e66990cf..a589e32b61a9b6a44bdc4529eee715d987d6922c 100644 --- a/paddle/fluid/platform/float16_test.cc +++ b/paddle/fluid/platform/float16_test.cc @@ -141,5 +141,10 @@ TEST(float16, lod_tensor_cpu) { } } +TEST(float16, print) { + float16 a = float16(1.0f); + std::cout << a << std::endl; +} + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/nccl_helper.h b/paddle/fluid/platform/nccl_helper.h index 29990043206509e4192bfff84832f09ef127d9dd..ca9ab2c7aecff47924f0198802d710b7661f5576 100644 --- a/paddle/fluid/platform/nccl_helper.h +++ b/paddle/fluid/platform/nccl_helper.h @@ -14,8 +14,9 @@ #pragma once -#include +#include // NOLINT #include +#include #include "paddle/fluid/platform/dynload/nccl.h" #include "paddle/fluid/platform/enforce.h" @@ -29,6 +30,8 @@ inline ncclDataType_t ToNCCLDataType(std::type_index type) { return ncclDouble; } else if (type == typeid(int)) { // NOLINT return ncclInt; + } else if (type == typeid(int64_t)) { // NOLINT + return ncclInt64; } else { PADDLE_THROW("Not supported"); } @@ -58,7 +61,7 @@ struct NCCLContext { ncclComm_t comm_; explicit NCCLContext(int dev_id) - : ctx_(new CUDADeviceContext(CUDAPlace(dev_id))) {} + : ctx_(new CUDADeviceContext(CUDAPlace(dev_id))), comm_{nullptr} {} cudaStream_t stream() const { return ctx_->stream(); } @@ -66,23 +69,23 @@ struct NCCLContext { return boost::get(ctx_->GetPlace()).device; } - static void InitNCCLContext(std::unordered_map &contexts, + static void InitNCCLContext(std::unordered_map *contexts, const std::vector &places) { std::vector comms; std::vector devs; - comms.resize(contexts.size()); - devs.reserve(contexts.size()); + comms.resize(contexts->size()); + devs.reserve(contexts->size()); for (auto &p : places) { devs.push_back(boost::get(p).device); } PADDLE_ENFORCE(platform::dynload::ncclCommInitAll( - &comms[0], static_cast(contexts.size()), &devs[0])); + &comms[0], static_cast(contexts->size()), &devs[0])); int i = 0; for (auto &dev_id : devs) { - contexts.at(dev_id).comm_ = comms[i++]; + contexts->at(dev_id).comm_ = comms[i++]; } } }; @@ -91,7 +94,8 @@ struct NCCLContextMap { std::unordered_map contexts_; std::vector order_; - NCCLContextMap(const std::vector &places) { + explicit NCCLContextMap(const std::vector &places) { + PADDLE_ENFORCE(!places.empty()); order_.reserve(places.size()); for (auto &p : places) { int dev_id = boost::get(p).device; @@ -102,15 +106,17 @@ struct NCCLContextMap { order_.size(), contexts_.size(), "NCCL Context Map does not support contain two or more same device"); - std::vector comms; - comms.resize(order_.size()); + if (places.size() > 1) { + std::vector comms; + comms.resize(order_.size()); - PADDLE_ENFORCE(platform::dynload::ncclCommInitAll( - &comms[0], static_cast(order_.size()), &order_[0])); + PADDLE_ENFORCE(platform::dynload::ncclCommInitAll( + &comms[0], static_cast(order_.size()), &order_[0])); - int i = 0; - for (auto &dev_id : order_) { - contexts_.at(dev_id).comm_ = comms[i++]; + int i = 0; + for (auto &dev_id : order_) { + contexts_.at(dev_id).comm_ = comms[i++]; + } } } diff --git a/paddle/fluid/platform/transform.h b/paddle/fluid/platform/transform.h index 917c48b47f8d70cd821d45dfbc6bafa494710ffa..7877d3e41c1c993662f5d91b263cbcb71db74c36 100644 --- a/paddle/fluid/platform/transform.h +++ b/paddle/fluid/platform/transform.h @@ -14,29 +14,44 @@ limitations under the License. */ #pragma once +#include +#include + #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/place.h" -#include -#include #ifdef __NVCC__ #include #include -#include "paddle/fluid/platform/details/device_ptr_cast.h" +#include "paddle/fluid/platform/details/cuda_transform_iterator_cast.h" #endif namespace paddle { namespace platform { -// Transform on host or device. It provides the same API in std library. +// Transform applys a unary or a binary functor on each element in a +// range defined by a pair of iterators. +// +// - The specialization for CPU calls std::transform. +// - The specialization for CUDA calls thrust::tranform. +// +// NOTE: We need to define InputIter and OutputIter defined as +// different types, because the InputIter points op's inputs and +// OutputIter pints to op's outputs. +// +// NOTE: We don't assume that InputIter to be const InputType* and +// OutputIter to be OutputType*, because we might use a iterator +// class, paddle::fluid::operators::RowwiseTRansformIterator. template struct Transform { + // The unary version. template void operator()(const DeviceContext& context, InputIter first, InputIter last, OutputIter result, UnaryOperation op); + // The binary version. template void operator()(const DeviceContext& context, InputIter1 first1, @@ -70,8 +85,9 @@ struct Transform { auto place = context.GetPlace(); PADDLE_ENFORCE(is_gpu_place(place), "It must use GPU place."); thrust::transform(thrust::cuda::par.on(context.stream()), - details::DevPtrCast(first), details::DevPtrCast(last), - details::DevPtrCast(result), op); + details::CastToCUDATransformIterator(first), + details::CastToCUDATransformIterator(last), + details::CastToCUDATransformIterator(result), op); } template { auto place = context.GetPlace(); PADDLE_ENFORCE(is_gpu_place(place), "It must use GPU place."); thrust::transform(thrust::cuda::par.on(context.stream()), - details::DevPtrCast(first1), details::DevPtrCast(last1), - details::DevPtrCast(first2), details::DevPtrCast(result), - op); + details::CastToCUDATransformIterator(first1), + details::CastToCUDATransformIterator(last1), + details::CastToCUDATransformIterator(first2), + details::CastToCUDATransformIterator(result), op); } }; #endif diff --git a/paddle/fluid/platform/transform_test.cu b/paddle/fluid/platform/transform_test.cu index 7b5cfd8f43473dc6bc784e98bd26fdd9e0ba9994..f65d1f60100edc85ba9745ed36f26a0ed160d80f 100644 --- a/paddle/fluid/platform/transform_test.cu +++ b/paddle/fluid/platform/transform_test.cu @@ -18,11 +18,12 @@ limitations under the License. */ #include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/transform.h" +namespace { + template class Scale { public: explicit Scale(const T& scale) : scale_(scale) {} - HOSTDEVICE T operator()(const T& a) const { return a * scale_; } private: @@ -35,11 +36,23 @@ class Multiply { HOSTDEVICE T operator()(const T& a, const T& b) const { return a * b; } }; +} // namespace + +using paddle::memory::Alloc; +using paddle::memory::Free; +using paddle::memory::Copy; + +using paddle::platform::CPUPlace; +using paddle::platform::CUDAPlace; +using paddle::platform::CPUDeviceContext; +using paddle::platform::CUDADeviceContext; + +using paddle::platform::Transform; + TEST(Transform, CPUUnary) { - using namespace paddle::platform; CPUDeviceContext ctx; float buf[4] = {0.1, 0.2, 0.3, 0.4}; - Transform trans; + Transform trans; trans(ctx, buf, buf + 4, buf, Scale(10)); for (int i = 0; i < 4; ++i) { ASSERT_NEAR(buf[i], static_cast(i + 1), 1e-5); @@ -47,14 +60,12 @@ TEST(Transform, CPUUnary) { } TEST(Transform, GPUUnary) { - using namespace paddle::platform; - using namespace paddle::memory; CUDAPlace gpu0(0); CUDADeviceContext ctx(gpu0); float cpu_buf[4] = {0.1, 0.2, 0.3, 0.4}; float* gpu_buf = static_cast(Alloc(gpu0, sizeof(float) * 4)); Copy(gpu0, gpu_buf, CPUPlace(), cpu_buf, sizeof(cpu_buf), ctx.stream()); - Transform trans; + Transform trans; trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, Scale(10)); ctx.Wait(); Copy(CPUPlace(), cpu_buf, gpu0, gpu_buf, sizeof(cpu_buf), ctx.stream()); @@ -65,10 +76,8 @@ TEST(Transform, GPUUnary) { } TEST(Transform, CPUBinary) { - using namespace paddle::platform; - using namespace paddle::memory; int buf[4] = {1, 2, 3, 4}; - Transform trans; + Transform trans; CPUDeviceContext ctx; trans(ctx, buf, buf + 4, buf, buf, Multiply()); for (int i = 0; i < 4; ++i) { @@ -77,14 +86,12 @@ TEST(Transform, CPUBinary) { } TEST(Transform, GPUBinary) { - using namespace paddle::platform; - using namespace paddle::memory; int buf[4] = {1, 2, 3, 4}; CUDAPlace gpu0(0); CUDADeviceContext ctx(gpu0); int* gpu_buf = static_cast(Alloc(gpu0, sizeof(buf))); Copy(gpu0, gpu_buf, CPUPlace(), buf, sizeof(buf), ctx.stream()); - Transform trans; + Transform trans; trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, gpu_buf, Multiply()); ctx.Wait(); Copy(CPUPlace(), buf, gpu0, gpu_buf, sizeof(buf), ctx.stream()); diff --git a/paddle/fluid/platform/variant.h b/paddle/fluid/platform/variant.h index 05ca33137de8db5291c8e38fc03457d05092cea8..45f60fc9d76560b133fa06198a24c7eaccc24088 100644 --- a/paddle/fluid/platform/variant.h +++ b/paddle/fluid/platform/variant.h @@ -14,29 +14,25 @@ limitations under the License. */ #pragma once -#ifdef __CUDACC__ -#ifdef __CUDACC_VER_MAJOR__ -// CUDA 9 define `__CUDACC_VER__` as a warning message, manually define -// __CUDACC_VER__ instead. +// Boost 1.41.0 requires __CUDACC_VER__, but in CUDA 9 __CUDACC_VER__ +// is removed, so we have to manually define __CUDACC_VER__ instead. +// For details, please refer to +// https://github.com/PaddlePaddle/Paddle/issues/6626 +#if defined(__CUDACC__) && defined(__CUDACC_VER_MAJOR__) #undef __CUDACC_VER__ - -#define __CUDACC_VER__ \ - (__CUDACC_VER_MAJOR__ * 10000 + __CUDACC_VER_MINOR__ * 100 + \ - __CUDACC_VER_BUILD__) -#endif - +#define __CUDACC_VER__ \ + __CUDACC_VER_BUILD__ + __CUDACC_VER_MAJOR__ * 10000 + \ + __CUDACC_VER_MINOR__ * 100 #endif -#include +#include "boost/config.hpp" -#ifdef PADDLE_WITH_CUDA - -// Because boost's variadic templates has bug on nvcc, boost will disable -// variadic template support when GPU enabled on nvcc. -// Define BOOST_NO_CXX11_VARIADIC_TEMPLATES on gcc/clang to generate same -// function symbols. -// +// Because Boost 1.41.0's variadic templates has bug on nvcc, boost +// will disable variadic template support in NVCC mode. Define +// BOOST_NO_CXX11_VARIADIC_TEMPLATES on gcc/clang to generate same +// function symbols. For details, // https://github.com/PaddlePaddle/Paddle/issues/3386 +#ifdef PADDLE_WITH_CUDA #ifndef BOOST_NO_CXX11_VARIADIC_TEMPLATES #define BOOST_NO_CXX11_VARIADIC_TEMPLATES #endif diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 884289a7fda65f9713392ec459219b4c89271e73..4fef351c2118e43697606c90a616cd870e78cd77 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -2,13 +2,13 @@ if(WITH_PYTHON) if(WITH_AMD_GPU) hip_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc - DEPS pybind python backward proto_desc memory executor prune init profiler feed_fetch_method + DEPS pybind python proto_desc memory executor prune init profiler feed_fetch_method parallel_executor ${GLOB_OP_LIB}) else() cc_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc - DEPS pybind python backward proto_desc memory executor prune init profiler feed_fetch_method + DEPS pybind python proto_desc memory executor prune init profiler feed_fetch_method parallel_executor ${GLOB_OP_LIB}) if(NOT APPLE AND NOT ANDROID) diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 2fe829036386086075a7f6ad0b9348a9e8c5e85a..93533e5c9d88a9113d4d3eacb01901a8c14b6324 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -18,7 +18,6 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/backward.h" #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/program_desc.h" @@ -125,23 +124,6 @@ void BindProgramDesc(pybind11::module *m) { }) .def("append_block", &pd::ProgramDesc::AppendBlock, pybind11::return_value_policy::reference) - .def("append_backward", - [](pd::ProgramDesc &program_desc, const pd::VarDesc &target, - const std::unordered_set &no_grad_vars) { - pd::ParamGradInfoMap param_grad_map = - AppendBackward(program_desc, target, no_grad_vars); - std::unordered_map< - std::string, std::tuple> - retv; - for (auto it = param_grad_map.begin(); it != param_grad_map.end(); - ++it) { - const auto &grad_info = it->second; - retv[it->first] = std::make_tuple( - grad_info.name_, grad_info.block_idx_, grad_info.op_idx_); - } - return retv; - }) .def("block", &pd::ProgramDesc::MutableBlock, pybind11::return_value_policy::reference) .def("num_blocks", &pd::ProgramDesc::Size) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 392404045578489014f2283b885c388d5a4586cf..a1e8ff6399f0812773a7bb753c90e4400b1763d9 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -20,9 +20,6 @@ limitations under the License. */ #include #include -#include "paddle/fluid/pybind/protobuf.h" - -#include "paddle/fluid/framework/backward.h" #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/feed_fetch_method.h" @@ -31,18 +28,18 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/parallel_executor.h" #include "paddle/fluid/framework/prune.h" #include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/selected_rows.h" -#include "paddle/fluid/operators/cond_op.h" -#include "paddle/fluid/operators/net_op.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/pybind/const_value.h" #include "paddle/fluid/pybind/exception.h" -#include "paddle/fluid/pybind/pybind.h" +#include "paddle/fluid/pybind/protobuf.h" +#include "paddle/fluid/pybind/pybind.h" // NOLINT #include "paddle/fluid/pybind/recordio.h" #include "paddle/fluid/pybind/tensor_py.h" @@ -239,11 +236,6 @@ All parameter, weight, gradient are variables in Paddle. }, py::return_value_policy::reference) #endif - .def("get_net", - [](Variable &self) -> operators::NetOp * { - return self.GetMutable(); - }, - py::return_value_policy::reference) .def("get_reader", [](Variable &self) -> framework::ReaderHolder * { PADDLE_ENFORCE(self.IsType()); @@ -252,7 +244,6 @@ All parameter, weight, gradient are variables in Paddle. py::return_value_policy::reference); py::class_(m, "Reader", "") - .def("has_next", &framework::ReaderHolder::HasNext) .def("reset", &framework::ReaderHolder::ReInit); py::class_(m, "Scope", "") @@ -389,11 +380,6 @@ All parameter, weight, gradient are variables in Paddle. desc.InitializationErrorString()); return OpRegistry::CreateOp(desc); }) - .def("backward", - [](const OperatorBase &forwardOp, - const std::unordered_set &no_grad_vars) { - return Backward(forwardOp, no_grad_vars).release(); - }) .def("run", [](OperatorBase &self, const Scope &scope, const platform::CPUPlace &place) { self.Run(scope, place); }) @@ -421,42 +407,6 @@ All parameter, weight, gradient are variables in Paddle. [](const OperatorBase &op) { return op.OutputVars(false); }) .def("support_gpu", &OperatorBase::SupportGPU); - py::class_(m, "Net") - .def_static("create", - []() -> operators::NetOp * { - auto *retv = new operators::NetOp; - retv->SetType("plain_net"); - return retv; - }) - .def("append_op", [](operators::NetOp &self, - const OperatorBase &op) { self.AppendOp(op); }) - .def("complete_add_op", &operators::NetOp::CompleteAddOp) - .def("complete_add_op", [](std::shared_ptr &self) { - self->CompleteAddOp(); - }); - - // cond_op - py::class_(m, "CondOp") - .def_static("create", - [](py::bytes protobin) -> operators::CondOp * { - proto::OpDesc desc; - PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), - "Cannot parse user input to OpDesc"); - PADDLE_ENFORCE(desc.IsInitialized(), - "User OpDesc is not initialized, reason %s", - desc.InitializationErrorString()); - auto cond_op = OpRegistry::CreateOp(desc); - return static_cast(cond_op.release()); - }) - .def("set_truenet", - [](operators::CondOp &self, const operators::NetOp &net) -> void { - self.set_truenet(net.Clone()); - }) - .def("set_falsenet", - [](operators::CondOp &self, const operators::NetOp &net) -> void { - self.set_falsenet(net.Clone()); - }); - py::class_(m, "Executor") .def(py::init()) .def("run", @@ -554,6 +504,7 @@ All parameter, weight, gradient are variables in Paddle. bcast_vars, main_program, loss_var_name, scope, local_scopes, allow_op_delay); }) + .def("bcast_params", &ParallelExecutor::BCastParamsToGPUs) .def("local_scopes", [](ParallelExecutor &self) -> std::vector * { return &self.GetLocalScopes(); diff --git a/paddle/fluid/pybind/recordio.cc b/paddle/fluid/pybind/recordio.cc index 0644d91425af1a1ac9363b1dec9e317689331fcb..330d104e0a774d905e463566f85bd2e64a080190 100644 --- a/paddle/fluid/pybind/recordio.cc +++ b/paddle/fluid/pybind/recordio.cc @@ -39,7 +39,7 @@ class RecordIOWriter { void CompleteAppendTensor() { auto& ctx = *platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); - framework::WriteToRecordIO(writer_, tensors_, ctx); + framework::WriteToRecordIO(&writer_, tensors_, ctx); tensors_.clear(); } diff --git a/paddle/fluid/recordio/chunk.cc b/paddle/fluid/recordio/chunk.cc index e7ebbba452c5c37113f0962e459da65c66b70873..82d9aa601cf450b8f90573d6c582bb12ced7a48a 100644 --- a/paddle/fluid/recordio/chunk.cc +++ b/paddle/fluid/recordio/chunk.cc @@ -14,13 +14,13 @@ #include "paddle/fluid/recordio/chunk.h" +#include #include #include #include #include "paddle/fluid/platform/enforce.h" -#include "snappy_stream/include/snappystream.hpp" -#include "zlib/include/zlib.h" +#include "snappystream.hpp" namespace paddle { namespace recordio { diff --git a/paddle/fluid/recordio/chunk_test.cc b/paddle/fluid/recordio/chunk_test.cc index 98ca99b9a018db2da9aa563741ff3cf30461c4ce..5177475c016097d9a118aa79f855672354b3ef53 100644 --- a/paddle/fluid/recordio/chunk_test.cc +++ b/paddle/fluid/recordio/chunk_test.cc @@ -43,5 +43,5 @@ TEST(Chunk, Compressor) { ch.Clear(); ch.Parse(ss); - ASSERT_EQ(ch.NumBytes(), 18); + ASSERT_EQ(ch.NumBytes(), 18ul); } diff --git a/paddle/fluid/recordio/header.cc b/paddle/fluid/recordio/header.cc index ed09d58f6a3e2dba50bf4407c0463480575b248e..c4822329a43a79adc81f0b0cf145b22661ac6f50 100644 --- a/paddle/fluid/recordio/header.cc +++ b/paddle/fluid/recordio/header.cc @@ -13,6 +13,9 @@ // limitations under the License. #include "paddle/fluid/recordio/header.h" + +#include + #include "paddle/fluid/platform/enforce.h" namespace paddle { diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 4885b74e6c6644704cff01dbf49975d6e87ce0c4..2b2a904974f3756576fb47851400e344c9357c57 100755 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -198,7 +198,7 @@ EOF # run paddle version to install python packages first RUN apt-get update &&\ ${NCCL_DEPS}\ - apt-get install -y wget python-pip dmidecode python-tk && pip install -U pip && \ + apt-get install -y wget python-pip dmidecode python-tk && pip install -U pip==9.0.3 && \ pip install /*.whl; apt-get install -f -y && \ apt-get clean -y && \ rm -f /*.whl && \ @@ -231,7 +231,7 @@ function gen_fluid_inference_lib() { Deploying fluid inference library ... ======================================== EOF - make inference_lib_dist + make -j `nproc` inference_lib_dist fi } diff --git a/paddle/utils/DynamicLoader.cpp b/paddle/utils/DynamicLoader.cpp index 5604a90038b06d2c1a4d9db70e4185cddfd25d3e..9ac4a56c6e300d299467630b39a32567af72cf40 100644 --- a/paddle/utils/DynamicLoader.cpp +++ b/paddle/utils/DynamicLoader.cpp @@ -32,6 +32,8 @@ DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so."); DEFINE_string(lapack_dir, "", "Specify path for loading liblapack.so."); +DEFINE_string(tensorrt_dir, "", "Specify path for loading libnvinfer.so."); + static inline std::string join(const std::string& part1, const std::string& part2) { // directory separator @@ -157,3 +159,12 @@ void GetLapackDsoHandle(void** dso_handle) { GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.so", dso_handle); #endif } + +void GetTensorRtDsoHandle(void** dso_handle) { +#if defined(__APPLE__) || defined(__OSX__) + GetDsoHandleFromSearchPath( + FLAGS_tensorrt_dir, "libnvinfer.dylib", dso_handle); +#else + GetDsoHandleFromSearchPath(FLAGS_tensorrt_dir, "libnvinfer.so", dso_handle); +#endif +} diff --git a/paddle/utils/DynamicLoader.h b/paddle/utils/DynamicLoader.h index 2e5ff76a06152b6a12818f06baaeaa6a69726ba8..02f519de4b3988fb6aca323aaa1751ee2c4bd738 100644 --- a/paddle/utils/DynamicLoader.h +++ b/paddle/utils/DynamicLoader.h @@ -58,3 +58,11 @@ void GetWarpCTCDsoHandle(void** dso_handle); * */ void GetLapackDsoHandle(void** dso_handle); + +/** + * @brief load the DSO of tensorrt + * + * @param **dso_handle dso handler + * + */ +void GetTensorRtDsoHandle(void** dso_handle); diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 7cbd7f22bf2968b29dc0665e893101b892808b5e..c7c0812fe2238d48903aa4c75bb8f1e9ecdb16c9 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,5 +1,5 @@ file(GLOB UTILS_PY_FILES . ./paddle/utils/*.py) -file(GLOB_RECURSE FLUID_PY_FILES ./paddle/fluid/ *.py) +file(GLOB_RECURSE FLUID_PY_FILES ./paddle/fluid/*.py) set(PY_FILES paddle/__init__.py ${UTILS_PY_FILES} ${FLUID_PY_FILES}) @@ -7,7 +7,7 @@ set(PY_FILES paddle/__init__.py if(NOT WITH_FLUID_ONLY) file(GLOB TRAINER_PY_FILES . ./paddle/trainer/*.py) file(GLOB HELPERS_PY_FILES . ./paddle/trainer_config_helpers/*.py) - file(GLOB_RECURSE V2_PY_FILES ./paddle/v2/ *.py) + file(GLOB_RECURSE V2_PY_FILES ./paddle/v2/*.py) set(PY_FILES ${PY_FILES} ${TRAINER_PY_FILES} ${HELPERS_PY_FILES} @@ -55,7 +55,7 @@ add_custom_target(copy_paddle_pybind ALL DEPENDS ${PADDLE_BINARY_DIR}/python/pad add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND touch stub.cc - COMMAND ${CMAKE_COMMAND} -E copy_directory ${PADDLE_SOURCE_DIR}/python/paddle ${PADDLE_BINARY_DIR}/python/paddle + COMMAND cp -r ${PADDLE_SOURCE_DIR}/python/paddle ${PADDLE_BINARY_DIR}/python COMMAND cp -r ${PADDLE_SOURCE_DIR}/paddle/py_paddle ${PADDLE_BINARY_DIR}/python/ COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_PYTHON_BUILD_DIR}/.timestamp diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index a5a3884750cce8cf19b92f1e5f131b50a18d3c97..f757411b853bacb9e03fc42fa2ef6593c3cde00f 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -29,6 +29,7 @@ import optimizer import backward import regularizer import average +import metrics from param_attr import ParamAttr, WeightNormParamAttr from data_feeder import DataFeeder from core import LoDTensor, CPUPlace, CUDAPlace, CUDAPinnedPlace diff --git a/python/paddle/fluid/average.py b/python/paddle/fluid/average.py index ded6eb085968343fcdc9f6e4b8353c08408df426..6abe8233b07c484494848c566e9898600a7d8f5c 100644 --- a/python/paddle/fluid/average.py +++ b/python/paddle/fluid/average.py @@ -13,6 +13,7 @@ # limitations under the License. import numpy as np +import warnings """ Class of all kinds of Average. @@ -22,6 +23,8 @@ import numpy as np wrappers of Python functions. """ +__all__ = ["WeightedAverage"] + def _is_number_(var): return isinstance(var, int) or isinstance(var, float) or (isinstance( @@ -34,6 +37,9 @@ def _is_number_or_matrix_(var): class WeightedAverage(object): def __init__(self): + warnings.warn( + "The %s is deprecated, please use fluid.metrics.Accuracy instead." % + (self.__class__.__name__), Warning) self.reset() def reset(self): diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 0ec3ebc7e3dba6e4cf89c8a76622761d210276cf..aa15392d7e4901e8ee23ad5b4370542232adc2a5 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -13,14 +13,17 @@ # limitations under the License. from __future__ import print_function -import framework -from framework import Program, default_main_program, default_startup_program, Parameter, Variable -import optimizer -from layer_helper import LayerHelper -import distributed_splitter as splitter + import math + +import distributed_splitter as splitter +import framework +from framework import Program, default_main_program, Variable from . import core -import debuger + +LOOKUP_TABLE_TYPE = "lookup_table" +LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad" +RPC_CLIENT_VAR_NAME = "RPC_CLIENT_VAR" class VarBlock: @@ -35,9 +38,9 @@ class VarBlock: class UnionFind(object): - """ Union-find data struct. + """ Union-find data structure. - Union-find is a data struct that keeps track of a set of elements partitioned + Union-find is a data structure that keeps track of a set of elements partitioned into a number of disjoint (non-overlapping) subsets. Reference: @@ -185,19 +188,66 @@ class DistributeTranspiler: assert (callable(split_method)) if program is None: program = default_main_program() - self.program = program - self.trainers = trainers + self.origin_program = program + self.trainer_num = trainers self.optimize_ops = optimize_ops # TODO(typhoonzero): currently trainer_id is fetched from cluster system # like Kubernetes, we should port this to use etcd later when developing # fluid distributed training with fault-tolerance. self.trainer_id = trainer_id pserver_endpoints = pservers.split(",") + self.pserver_endpoints = pserver_endpoints + + # process lookup_table_op + # 1. check all lookup_table_op is distributed + # 2. check all lookup_table_op share the same table. + distributed_lookup_table_ops = [] + # support only one distributed_lookup_table now + self.table_name = None + for op in program.global_block().ops: + if op.type == LOOKUP_TABLE_TYPE: + if op.attrs['is_distributed'] is True: + if self.table_name is None: + self.table_name = op.input("W")[0] + if self.table_name != op.input("W")[0]: + raise RuntimeError("all distributed lookup_table_ops" + " should have only one table") + distributed_lookup_table_ops.append(op) + else: + if self.table_name is not None: + assert op.input("W")[0] != self.table_name + + self.has_distributed_lookup_table = len( + distributed_lookup_table_ops) > 0 # step1: For large parameters and gradients, split them into smaller # blocks. param_list = [pg[0] for pg in params_grads] grad_list = [pg[1] for pg in params_grads] + + if self.has_distributed_lookup_table: + param_list = [ + param for param in param_list if param.name != self.table_name + ] + grad_list = [ + grad for grad in grad_list + if grad.name != framework.grad_var_name(self.table_name) + ] + self.table_param_grad = [ + param_grad for param_grad in params_grads + if param_grad[0].name == self.table_name + ][0] + table_grad_var = self.table_param_grad[1] + self.table_grad_list = [ + program.global_block().create_var( + name="%s.trainer_%d.pserver_%d" % + (table_grad_var.name, trainer_id, index), + type=table_grad_var.type, + shape=table_grad_var.shape, + dtype=table_grad_var.dtype) + for index in range(len(self.pserver_endpoints)) + ] + grad_blocks = split_dense_variable(grad_list, len(pserver_endpoints)) param_blocks = split_dense_variable(param_list, len(pserver_endpoints)) # step2: Create new vars for the parameters and gradients blocks and @@ -229,7 +279,7 @@ class DistributeTranspiler: self.param_grad_ep_mapping[ep]["grads"].append(grad) rpc_client_var = program.global_block().create_var( - name="RPC_CLIENT_VAR", + name=RPC_CLIENT_VAR_NAME, persistable=True, type=core.VarDesc.VarType.RAW) @@ -252,12 +302,19 @@ class DistributeTranspiler: outputs={"Out": [orig_param]}, attrs={"axis": 0}) + if self.has_distributed_lookup_table: + self._replace_lookup_table_op_with_prefetch(program, rpc_client_var, + eplist) + self._split_table_grad_and_add_send_vars(program, rpc_client_var, + pserver_endpoints) + def get_trainer_program(self): # remove optimize ops and add a send op to main_program - self.program.global_block().delete_ops(self.optimize_ops) + self.origin_program.global_block().delete_ops(self.optimize_ops) + self.origin_program.sync_with_cpp() # FIXME(typhoonzero): serialize once will fix error occurs when clone. - self.program.__str__() - return self.program + self.origin_program.__str__() + return self.origin_program def get_pserver_program(self, endpoint): """ @@ -293,8 +350,8 @@ class DistributeTranspiler: type=v.type, dtype=v.dtype, shape=v.shape) - if self.trainers > 1: - for trainer_id in xrange(self.trainers): + if self.trainer_num > 1: + for trainer_id in xrange(self.trainer_num): var = pserver_program.global_block().create_var( name="%s.trainer_%d" % (orig_var_name, trainer_id), persistable=False, @@ -308,7 +365,7 @@ class DistributeTranspiler: # step3 optimize_block = pserver_program.create_block(0) # step 4 - # Create a union-find data struct from optimize ops, + # Create a union-find data structure from optimize ops, # If two ops are connected, we could add these two ops # into one set. ufind = self._create_ufind(self.optimize_ops) @@ -383,6 +440,23 @@ class DistributeTranspiler: # __append_optimize_op__(glb_op, optimize_block) # break + # process distributed lookup_table + prefetch_block = None + if self.has_distributed_lookup_table: + pserver_index = self.pserver_endpoints.index(endpoint) + self._create_table_optimize_block(pserver_index, pserver_program, + append_block) + prefetch_block = self._create_prefetch_block( + pserver_index, pserver_program, optimize_block) + + # NOTE: if has_distributed_lookup_table is False, then prefetch_block will + # not be executed, so it's safe to use optimize_block to hold the place + if self.has_distributed_lookup_table: + assert prefetch_block is not None + else: + assert prefetch_block is None + prefetch_block = pserver_program.global_block() + # step5 append the listen_and_serv op pserver_program.global_block().append_op( type="listen_and_serv", @@ -391,8 +465,10 @@ class DistributeTranspiler: attrs={ "OptimizeBlock": optimize_block, "endpoint": endpoint, - "Fanin": self.trainers + "Fanin": self.trainer_num, + "PrefetchBlock": prefetch_block }) + pserver_program.sync_with_cpp() return pserver_program @@ -450,6 +526,197 @@ class DistributeTranspiler: attrs=op.attrs) return s_prog + # transpiler function for dis lookup_table + def _replace_lookup_table_op_with_prefetch(self, program, rpc_client_var, + eplist): + # 1. replace lookup_table_op with split_ids_op -> prefetch_op -> sum_op + self.prefetch_input_vars = None + self.prefetch_output_vars = None + + continue_search_lookup_table_op = True + while continue_search_lookup_table_op: + continue_search_lookup_table_op = False + all_ops = program.global_block().ops + for op in all_ops: + if op.type == LOOKUP_TABLE_TYPE: + continue_search_lookup_table_op = True + + op_index = list(all_ops).index(op) + ids_name = op.input("Ids") + out_name = op.output("Out") + + if self.prefetch_input_vars is None: + ids_var = program.global_block().vars[ids_name[0]] + self.prefetch_input_vars = self.create_splited_vars( + source_var=ids_var, + block=program.global_block(), + tag="_prefetch_in_") + if self.prefetch_output_vars is None: + out_var = program.global_block().vars[out_name[0]] + self.prefetch_output_vars = self.create_splited_vars( + source_var=out_var, + block=program.global_block(), + tag="_prefetch_out_") + + # insert split_ids_op + program.global_block().insert_op( + index=op_index, + type="split_ids", + inputs={ + 'Ids': [ + program.global_block().vars[varname] + for varname in ids_name + ] + }, + outputs={"Out": self.prefetch_input_vars}) + + # insert prefetch_op + program.global_block().insert_op( + index=op_index + 1, + type="prefetch", + inputs={'X': self.prefetch_input_vars}, + outputs={ + "Out": self.prefetch_output_vars, + "RPCClient": rpc_client_var + }, + attrs={"epmap": eplist}) + + # insert concat_op + program.global_block().insert_op( + index=op_index + 2, + type="concat", + inputs={'X': self.prefetch_output_vars}, + outputs={ + "Out": [ + program.global_block().vars[varname] + for varname in out_name + ] + }, + attrs={"axis": 0}) + + # delete lookup_table_op + program.global_block().delete_ops([op]) + program.sync_with_cpp() + # break for loop + break + + def _split_table_grad_and_add_send_vars(self, program, rpc_client_var, + pserver_endpoints): + # 2. add split_ids_op and send_vars_op to send gradient to pservers + # there should only be one table_name + all_ops = program.global_block().ops + table_grad_name = framework.grad_var_name(self.table_name) + for op in all_ops: + if table_grad_name in op.output_arg_names: + op_index = list(all_ops).index(op) + # insert split_ids_op + program.global_block().insert_op( + index=op_index + 1, + type="split_ids", + inputs={ + 'Ids': [program.global_block().vars[table_grad_name]] + }, + outputs={"Out": self.table_grad_list}) + program.global_block().insert_op( + index=op_index + 2, + type="send_vars", + inputs={'X': self.table_grad_list}, + outputs={"RPCClient": rpc_client_var}, + attrs={"sync_send": True, + "epmap": pserver_endpoints}) + break + + def _create_prefetch_block(self, pserver_index, pserver_program, + optimize_block): + # STEP: create prefetch block + table_var = pserver_program.global_block().vars[self.table_name] + prefetch_block = pserver_program.create_block(optimize_block.idx) + trainer_ids = self.prefetch_input_vars[pserver_index] + pserver_ids = pserver_program.global_block().create_var( + name=trainer_ids.name, + type=trainer_ids.type, + shape=trainer_ids.shape, + dtype=trainer_ids.dtype) + trainer_out = self.prefetch_output_vars[pserver_index] + pserver_out = pserver_program.global_block().create_var( + name=trainer_out.name, + type=trainer_out.type, + shape=trainer_out.shape, + dtype=trainer_out.dtype) + prefetch_block.append_op( + type=LOOKUP_TABLE_TYPE, + inputs={'Ids': pserver_ids, + "W": table_var}, + outputs={"Out": pserver_out}, + attrs={ + "is_sparse": True, # has no effect on lookup_table op + "is_distributed": True, + "padding_idx": -1 + }) + return prefetch_block + + def _create_table_optimize_block(self, pserver_index, pserver_program, + append_block): + def _clone_var(block, var, persistable=True): + assert isinstance(var, Variable) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + persistable=persistable) + + # STEP: create table optimize block + # create table param and grad var in pserver program + param_var = _clone_var( + pserver_program.global_block(), + self.origin_program.global_block().vars[self.table_name]) + grad_var = _clone_var( + pserver_program.global_block(), + self.origin_program.global_block().vars[framework.grad_var_name( + self.table_name)], + persistable=False) + + # create grad vars in pserver program + table_grad_var = self.table_param_grad[1] + table_grad_list = [ + pserver_program.global_block().create_var( + name="%s.trainer_%d.pserver_%d" % + (table_grad_var.name, index, pserver_index), + type=table_grad_var.type, + shape=table_grad_var.shape, + dtype=table_grad_var.dtype) for index in range(self.trainer_num) + ] + + # create table optimize block in pserver program + table_opt_op = [ + op for op in self.optimize_ops + if op.input("Param")[0] == self.table_name + ][0] + table_opt_block = pserver_program.create_block(append_block.idx) + # only support sgd now + assert table_opt_op.type == "sgd" + + # append sum op for table_grad_list + table_opt_block.append_op( + type="sum", + inputs={"X": table_grad_list}, + outputs={"Out": [grad_var]}) + + lr_var = pserver_program.global_block().vars[table_opt_op.input( + "LearningRate")[0]] + inputs = { + "Param": [param_var], + "Grad": [grad_var], + "LearningRate": [lr_var] + } + outputs = {"ParamOut": [param_var]} + table_opt_block.append_op( + type=table_opt_op.type, + inputs=inputs, + outputs=outputs, + attrs=table_opt_op.attrs) + # ====================== private transpiler functions ===================== def _create_vars_from_blocklist(self, program, @@ -511,7 +778,17 @@ class DistributeTranspiler: program.global_block().sync_with_cpp() return var_mapping - def _clone_var(self, block, var): + def create_splited_vars(self, source_var, block, tag): + return [ + block.create_var( + name=str(source_var.name + tag + str(index)), + type=source_var.type, + shape=source_var.shape, + dtype=source_var.dtype) + for index in range(len(self.pserver_endpoints)) + ] + + def _clone_var(self, block, var, persistable=True): assert isinstance(var, Variable) return block.create_var( name=var.name, @@ -519,12 +796,12 @@ class DistributeTranspiler: dtype=var.dtype, type=var.type, lod_level=var.lod_level, - persistable=True) + persistable=persistable) def _append_split_op(self, program, gradblocks): # Split variables that need to be split and append respective ops add_suffix = False - if self.trainers > 1: + if self.trainer_num > 1: add_suffix = True var_mapping = self._create_vars_from_blocklist( program, gradblocks, add_trainer_suffix=add_suffix) @@ -615,9 +892,9 @@ class DistributeTranspiler: return merged_var = \ pserver_block.vars[self._orig_varname(grad_block.name)] - if self.trainers > 1: + if self.trainer_num > 1: vars2merge = [] - for i in xrange(self.trainers): + for i in xrange(self.trainer_num): per_trainer_name = "%s.trainer_%d" % \ (self._orig_varname(grad_block.name), i) vars2merge.append(pserver_block.vars[per_trainer_name]) @@ -632,7 +909,7 @@ class DistributeTranspiler: type="scale", inputs={"X": merged_var}, outputs={"Out": merged_var}, - attrs={"scale": 1.0 / float(self.trainers)}) + attrs={"scale": 1.0 / float(self.trainer_num)}) new_inputs[key] = merged_var elif key == "Param": # param is already created on global program @@ -668,7 +945,7 @@ class DistributeTranspiler: new_shape = None if key in ["Param", "Grad", "LearningRate"]: continue - var = self.program.global_block().vars[opt_op.input(key)[0]] + var = self.origin_program.global_block().vars[opt_op.input(key)[0]] # update accumulator variable shape param_shape = new_inputs["Param"].shape new_shape = self._get_optimizer_input_shape(opt_op.type, key, @@ -681,8 +958,8 @@ class DistributeTranspiler: new_inputs[key] = tmpvar # change output's ParamOut variable - outputs = self._get_output_map_from_op(self.program.global_block().vars, - opt_op) + outputs = self._get_output_map_from_op( + self.origin_program.global_block().vars, opt_op) outputs["ParamOut"] = new_inputs["Param"] optimize_block.append_op( @@ -694,8 +971,8 @@ class DistributeTranspiler: def _append_pserver_non_opt_ops(self, optimize_block, opt_op): program = optimize_block.program # Append the ops for parameters that do not need to be optimized/updated - inputs = self._get_input_map_from_op(self.program.global_block().vars, - opt_op) + inputs = self._get_input_map_from_op( + self.origin_program.global_block().vars, opt_op) for varlist in inputs.itervalues(): if not isinstance(varlist, list): varlist = [varlist] @@ -708,8 +985,8 @@ class DistributeTranspiler: dtype=var.dtype, shape=var.shape) - outputs = self._get_output_map_from_op(self.program.global_block().vars, - opt_op) + outputs = self._get_output_map_from_op( + self.origin_program.global_block().vars, opt_op) for varlist in outputs.itervalues(): if not isinstance(varlist, list): @@ -782,7 +1059,6 @@ class DistributeTranspiler: if same_or_split_var(n, param) and n != param: return True return False - return False def _get_input_map_from_op(self, varmap, op): """Returns a dict from op input name to the vars in varmap.""" @@ -820,7 +1096,7 @@ class DistributeTranspiler: find_ops = [] # find ops which output is lr var - block = self.program.global_block() + block = self.origin_program.global_block() for op in block.ops: if set(op.output_arg_names) & lr_vars: find_ops.append(op) @@ -839,4 +1115,6 @@ class DistributeTranspiler: for op2 in find_ops: if ufind.is_connected(op1, op2): lr_ops.append(op1) + # we only need to append op for once + break return lr_ops diff --git a/python/paddle/fluid/evaluator.py b/python/paddle/fluid/evaluator.py index 19e5b61b0b32aba3fe1e7805704a3740e3854fc8..13475025b5c2a759779066f9d511ed8a786118d5 100644 --- a/python/paddle/fluid/evaluator.py +++ b/python/paddle/fluid/evaluator.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import warnings import numpy as np import layers @@ -59,6 +60,9 @@ class Evaluator(object): """ def __init__(self, name, **kwargs): + warnings.warn( + "The %s is deprecated, because maintain a modified program inside evaluator cause bug easily, please use fluid.metrics.%s instead." + % (self.__class__.__name__, self.__class__.__name__), Warning) self.states = [] self.metrics = [] self.helper = LayerHelper(name, **kwargs) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 33cf6918178ff746a6b130af0e23a69de0f532fe..4b841ef31dcb67ab660475cf6e231fd8a4ae83d6 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -818,6 +818,11 @@ class Block(object): del self.vars[name] self.sync_with_cpp() + def remove_var(self, name): + self.sync_with_cpp() + self.desc.remove_var(name) + del self.vars[name] + def create_parameter(self, *args, **kwargs): global_block = self.program.global_block() param = Parameter(global_block, *args, **kwargs) @@ -838,6 +843,11 @@ class Block(object): self.ops.insert(index, op) return op + def remove_op(self, index): + self.sync_with_cpp() + self.desc.remove_op(index, index + 1) + del self.ops[index] + def delete_ops(self, ops): # remove from cpp # FIXME(typhoonzero): remove only the first occurrence. @@ -846,6 +856,7 @@ class Block(object): end = list(self.ops).index(ops[-1]) except Exception, e: raise e + self.desc.remove_op(start, end + 1) def slice_ops(self, start, end): @@ -1108,24 +1119,6 @@ class Program(object): def current_block(self): return self.blocks[self.current_block_idx] - def append_backward(self, target, no_grad_set=None): - """ - return map(param_name -> (grad_name, block_index, op_index)) - """ - assert isinstance(target, Variable) - if no_grad_set is None: - no_grad_set = set() - try: - param_to_grad_info = self.desc.append_backward(target.desc, - no_grad_set) - except Exception as e: - raise core.EnforceNotMet( - str(e) + "\nCurrent protobuf is\n{0}".format( - self.to_string(False))) - - self.sync_with_cpp() - return param_to_grad_info - def create_block(self, parent_idx=None): new_block_idx = len(self.blocks) parent = self.current_block() if parent_idx is None else self.block( @@ -1190,6 +1183,8 @@ class Parameter(Variable): self.gradient_clip_attr = kwargs.get('gradient_clip_attr', None) + self.do_model_average = kwargs.get('do_model_average', None) + def __str__(self): return self.to_string(True) @@ -1210,7 +1205,7 @@ class Parameter(Variable): if with_details: res_str = Variable.to_string(self, throw_on_error, True) additional_attr = ("trainable", "optimize_attr", "regularizer", - "gradient_clip_attr") + "gradient_clip_attr", "do_model_average") for attr_name in additional_attr: res_str += "%s: %s\n" % (attr_name, str(getattr(self, attr_name))) diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 927f1e625a579737b98e60683d8d9ed90d5e7e03..4e132ed26183eaa5e572128e679cdbffd42e5a42 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -18,7 +18,8 @@ import contextlib __all__ = [ 'Constant', 'Uniform', 'Normal', 'Xavier', 'force_init_on_cpu', - 'init_on_cpu' + 'init_on_cpu', 'ConstantInitializer', 'UniformInitializer', + 'NormalInitializer', 'XavierInitializer' ] _force_init_on_cpu_ = False diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 969398bda4cfd0b2f5e39f45d34a1da9b216901f..e7d6c4e2521bee133c4794ed1db669b02fc2152b 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -21,8 +21,7 @@ from ..executor import global_scope __all__ = [ 'data', 'BlockGuardServ', 'ListenAndServ', 'Send', 'open_recordio_file', - 'open_files', 'read_file', 'create_shuffle_reader', - 'create_double_buffer_reader', 'create_multi_pass_reader' + 'open_files', 'read_file', 'shuffle', 'double_buffer' ] @@ -237,13 +236,9 @@ def monkey_patch_reader_methods(reader): var = scope.find_var(reader.name) return var.get_reader() - def eof(): - return not __get_reader__().has_next() - def reset(): return __get_reader__().reset() - reader.eof = eof reader.reset = reset reader.stop_gradient = True reader.persistable = True @@ -283,7 +278,42 @@ def _copy_reader_create_op_(block, op): return new_op -def open_recordio_file(filename, shapes, lod_levels, dtypes): +def open_recordio_file(filename, + shapes, + lod_levels, + dtypes, + pass_num=1, + for_parallel=False): + """ + Open a RecordIO file + + This layer takes a RecordIO file to read from and returns a Reader Variable. + Via the Reader Variable, we can get data from the given RecordIO file. + + Args: + filename(str): The RecordIO file's name. + shapes(list): List of tuples which declaring data shapes. + lod_levels(list): List of ints which declaring data lod_level. + dtypes(list): List of strs which declaring data type. + pass_num(int): Number of passes to run. + for_parallel(Bool): Set it as True if you are going to run + subsequent operators in parallel. + + Returns: + Variable: A Reader Variable via which we can get RecordIO file data. + + Examples: + .. code-block:: python + + reader = fluid.layers.io.open_recordio_file( + filename='./data.recordio', + shapes=[(3,224,224), (1)], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + + # Via the reader, we can use 'read_file' layer to get data: + image, label = fluid.layers.read_file(reader) + """ dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] shape_concat = [] ranks = [] @@ -310,10 +340,63 @@ def open_recordio_file(filename, shapes, lod_levels, dtypes): startup_var.persistable = True main_prog_var = _copy_reader_var_(default_main_program().current_block(), startup_var) + + if pass_num > 1: + main_prog_var = multi_pass(reader=main_prog_var, pass_num=pass_num) + + if for_parallel: + main_prog_var = parallel(reader=main_prog_var) + return monkey_patch_reader_methods(main_prog_var) -def open_files(filenames, thread_num, shapes, lod_levels, dtypes): +def open_files(filenames, + shapes, + lod_levels, + dtypes, + thread_num, + buffer_size=None, + pass_num=1, + for_parallel=False): + """ + Open files + + This layer takes a list of files to read from and returns a Reader Variable. + Via the Reader Variable, we can get data from given files. All files must + have name suffixs to indicate their formats, e.g., '*.recordio'. + + Args: + filenames(list): The list of file names. + shapes(list): List of tuples which declaring data shapes. + lod_levels(list): List of ints which declaring data lod_level. + dtypes(list): List of strs which declaring data type. + thread_num(int): The maximal concurrent prefetch thread number. + buffer_size(int): The size of prefetch buffer. + pass_num(int): Number of passes to run. + for_parallel(Bool): Set it as True if you are going to run + subsequent operators in parallel. + + Returns: + Variable: A Reader Variable via which we can get file data. + + Examples: + .. code-block:: python + + reader = fluid.layers.io.open_files(filenames=['./data1.recordio', + './data2.recordio'], + shapes=[(3,224,224), (1)], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + thread_num=2, + buffer_size=2) + + # Via the reader, we can use 'read_file' layer to get data: + image, label = fluid.layers.io.read_file(reader) + """ + if buffer_size is None: + buffer_size = thread_num + if isinstance(filenames, basestring): + filenames = [filenames] dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] shape_concat = [] ranks = [] @@ -322,29 +405,36 @@ def open_files(filenames, thread_num, shapes, lod_levels, dtypes): shape_concat.extend(shape) ranks.append(len(shape)) - var_name = unique_name('multiple_reader') - + multi_file_reader_name = unique_name('multi_file_reader') startup_blk = default_startup_program().current_block() - startup_var = startup_blk.create_var(name=var_name) + startup_reader = startup_blk.create_var(name=multi_file_reader_name) startup_blk.append_op( type='open_files', - outputs={'Out': [startup_var]}, + outputs={'Out': [startup_reader]}, attrs={ 'shape_concat': shape_concat, 'lod_levels': lod_levels, 'ranks': ranks, 'file_names': filenames, - 'thread_num': thread_num + 'thread_num': thread_num, + 'buffer_size': buffer_size }) - startup_var.desc.set_dtypes(dtypes) - startup_var.persistable = True - main_prog_var = _copy_reader_var_(default_main_program().current_block(), - startup_var) - return monkey_patch_reader_methods(main_prog_var) + startup_reader.desc.set_dtypes(dtypes) + startup_reader.persistable = True + main_prog_reader = _copy_reader_var_(default_main_program().current_block(), + startup_reader) + if pass_num > 1: + main_prog_reader = multi_pass( + reader=main_prog_reader, pass_num=pass_num) + + if for_parallel: + main_prog_reader = parallel(reader=main_prog_reader) + + return monkey_patch_reader_methods(main_prog_reader) -def __create_decorated_reader__(op_type, reader, attrs): +def __create_shared_decorated_reader__(op_type, reader, attrs): var_name = unique_name(op_type) startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=var_name) @@ -360,22 +450,41 @@ def __create_decorated_reader__(op_type, reader, attrs): return monkey_patch_reader_methods(main_prog_var) -def create_shuffle_reader(reader, buffer_size): - return __create_decorated_reader__('create_shuffle_reader', reader, - {'buffer_size': int(buffer_size)}) +def __create_unshared_decorated_reader__(op_type, reader, attrs): + new_reader_name = unique_name(op_type) + main_blk = default_main_program().current_block() + new_reader = main_blk.create_var(name=new_reader_name) + main_blk.append_op( + type=op_type, + inputs={'UnderlyingReader': reader}, + outputs={'Out': [new_reader]}, + attrs=attrs) + new_reader.persistable = True + new_reader.stop_gradient = True + return monkey_patch_reader_methods(new_reader) + + +def shuffle(reader, buffer_size): + return __create_unshared_decorated_reader__( + 'create_shuffle_reader', reader, {'buffer_size': int(buffer_size)}) -def create_double_buffer_reader(reader, place=None): +def double_buffer(reader, place=None): attrs = dict() if place is not None: attrs['place'] = str(place).upper() - return __create_decorated_reader__('create_double_buffer_reader', reader, - attrs) + return __create_unshared_decorated_reader__('create_double_buffer_reader', + reader, attrs) + + +def multi_pass(reader, pass_num): + return __create_shared_decorated_reader__( + 'create_multi_pass_reader', reader, {'pass_num': int(pass_num)}) -def create_multi_pass_reader(reader, pass_num): - return __create_decorated_reader__('create_multi_pass_reader', reader, - {'pass_num': int(pass_num)}) +def parallel(reader): + return __create_shared_decorated_reader__('create_threaded_reader', reader, + {}) def read_file(file_obj): diff --git a/python/paddle/fluid/layers/metric.py b/python/paddle/fluid/layers/metric.py index 3d9157ad4ef9381b70b4007c5bdca91f1482b427..f66dccfa2d040ea0a9d29daeaa1d2da640525959 100644 --- a/python/paddle/fluid/layers/metric.py +++ b/python/paddle/fluid/layers/metric.py @@ -15,12 +15,13 @@ All layers just related to metric. """ +import warnings from ..layer_helper import LayerHelper from ..initializer import Normal, Constant from ..framework import Variable from ..param_attr import ParamAttr -__all__ = ['accuracy'] +__all__ = ['accuracy', 'auc'] def accuracy(input, label, k=1, correct=None, total=None): @@ -55,3 +56,37 @@ def accuracy(input, label, k=1, correct=None, total=None): "Total": [total], }) return acc_out + + +def auc(input, label, curve='ROC', num_thresholds=200): + warnings.warn( + "This interface not recommended, fluid.layers.auc compute the auc at every minibatch, \ + but can not aggregate them and get the pass AUC, because pass \ + auc can not be averaged with weighted from the minibatch auc value. \ + Please use fluid.metrics.Auc, it can compute the auc value via Python natively, \ + which can get every minibatch and every pass auc value.", Warning) + helper = LayerHelper("auc", **locals()) + topk_out = helper.create_tmp_variable(dtype=input.dtype) + topk_indices = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="top_k", + inputs={"X": [input]}, + outputs={"Out": [topk_out], + "Indices": [topk_indices]}, + attrs={"k": k}) + auc_out = helper.create_tmp_variable(dtype="float32") + if correct is None: + correct = helper.create_tmp_variable(dtype="int64") + if total is None: + total = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="accuracy", + inputs={ + "Out": [topk_out], + "Indices": [topk_indices], + "Label": [label] + }, + attrs={"curve": curve, + "num_thresholds": num_thresholds}, + outputs={"AUC": [auc_out], }) + return auc_out diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d2e7d58524bfb11627b6acb36ef873c41b348f0f..bba8b64bd88c3edc6eda110dde38c0ced50439f6 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -77,6 +77,7 @@ __all__ = [ 'lod_reset', 'lrn', 'pad', + 'label_smooth', ] @@ -218,6 +219,7 @@ def fc(input, def embedding(input, size, is_sparse=False, + is_distributed=False, padding_idx=None, param_attr=None, dtype='float32'): @@ -268,8 +270,11 @@ def embedding(input, inputs={'Ids': input, 'W': w}, outputs={'Out': tmp}, - attrs={'is_sparse': is_sparse, - 'padding_idx': padding_idx}) + attrs={ + 'is_sparse': is_sparse, + 'is_distributed': is_distributed, + 'padding_idx': padding_idx + }) return tmp @@ -1516,7 +1521,8 @@ def batch_norm(input, in_place=False, name=None, moving_mean_name=None, - moving_variance_name=None): + moving_variance_name=None, + do_model_average_for_mean_and_var=False): """ This function helps create an operator to implement the BatchNorm layer using the configurations from the input parameters. @@ -1547,7 +1553,10 @@ def batch_norm(input, mean = helper.create_parameter( attr=ParamAttr( - name=moving_mean_name, initializer=Constant(0.0), trainable=False), + name=moving_mean_name, + initializer=Constant(0.0), + trainable=False, + do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=input.dtype) mean.stop_gradient = True @@ -1556,7 +1565,8 @@ def batch_norm(input, attr=ParamAttr( name=moving_variance_name, initializer=Constant(1.0), - trainable=False), + trainable=False, + do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=input.dtype) variance.stop_gradient = True @@ -3374,14 +3384,14 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): Here are some examples to explain it. 1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape - is [6, 8], the reshape operator will transform x into a 2-D tensor with + is [6, 8], the reshape operator will transform x into a 2-D tensor with shape [6, 8] and leaving x's data unchanged. 2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape specified is [2, 3, -1, 2], the reshape operator will transform x into a 4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this - case, one dimension of the target shape is set to -1, the value of this - dimension is inferred from the total element number of x and remaining + case, one dimension of the target shape is set to -1, the value of this + dimension is inferred from the total element number of x and remaining dimensions. 3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape @@ -3615,7 +3625,7 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): def pad(x, paddings, pad_value=0., name=None): """ Pads a tensor with a constant value given by :attr:`pad_value`, and the - padded width is specified by :attr:`paddings`. + padded width is specified by :attr:`paddings`. Specifically, the number of values padded before the contents of :attr:`x` in dimension :attr:`i` is indicated by :attr:`paddings[i]`, and the number @@ -3643,7 +3653,7 @@ def pad(x, paddings, pad_value=0., name=None): x (Variable): The input tensor variable. paddings (list): A list of integers. Its elements specify the padded width before and after for each dimension in turn. - The length of :attr:paddings must be + The length of :attr:paddings must be :math:`rank(x) \\times 2`. pad_value (float): The constant value used to pad. name(str|None): A name for this layer(optional). If set None, the layer @@ -3669,3 +3679,68 @@ def pad(x, paddings, pad_value=0., name=None): attrs={'paddings': paddings, 'pad_value': float(pad_value)}) return out + + +def label_smooth(label, + prior_dist=None, + epsilon=0.1, + dtype="float32", + name=None): + """ + Label smoothing is a mechanism to regularize the classifier layer and is + called label-smoothing regularization (LSR). + + Label smoothing is proposed to encourage the model to be less confident, + since optimizing the log-likelihood of the correct label directly may + cause overfitting and reduce the ability of the model to adapt. Label + smoothing replaces the ground-truth label :math:`y` with the weighted sum + of itself and some fixed distribution :math:`\mu`. For class :math:`k`, + i.e. + + .. math:: + + \\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k, + + where :math:`1 - \epsilon` and :math:`\epsilon` are the weights + respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually + uniform distribution is used for :math:`\mu`. + + See more details about label smoothing in https://arxiv.org/abs/1512.00567. + + Args: + label(Variable): The input variable containing the label data. The + label data should use one-hot representation. + prior_dist(Variable): The prior distribution to be used to smooth + labels. If not provided, an uniform distribution + is used. The shape of :attr:`prior_dist` should + be :math:`(1, class\_num)`. + epsilon(float): The weight used to mix up the original ground-truth + distribution and the fixed distribution. + dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, + float_64, int etc. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The tensor variable containing the smoothed labels. + + Examples: + .. code-block:: python + + label = layers.data(name="label", shape=[1], dtype="float32") + one_hot_label = layers.one_hot(input=label, depth=10) + smooth_label = layers.label_smooth( + label=one_hot_label, epsilon=0.1, dtype="float32") + """ + if epsilon > 1. or epsilon < 0.: + raise ValueError("The value of epsilon must be between 0 and 1.") + helper = LayerHelper("label_smooth", **locals()) + label.stop_gradient = True + smooth_label = helper.create_tmp_variable(dtype) + helper.append_op( + type="label_smooth", + inputs={"X": label, + "PriorDist": prior_dist} if prior_dist else {"X": label}, + outputs={"Out": smooth_label}, + attrs={"epsilon": float(epsilon)}) + return smooth_label diff --git a/python/paddle/fluid/memory_optimization_transpiler.py b/python/paddle/fluid/memory_optimization_transpiler.py index 41d1eca82e8b680977f44f1756c25c37340668a4..20ed19104207c1f0aa45db8f44570377011f3cde 100644 --- a/python/paddle/fluid/memory_optimization_transpiler.py +++ b/python/paddle/fluid/memory_optimization_transpiler.py @@ -29,17 +29,20 @@ dtype_to_size = { core.VarDesc.VarType.BOOL: 1 } -sub_block_ops = [ +SUB_BLOCK_OPS = [ "while", "while_grad", "parallel_do", "parallel_do_grad", "conditional_block", "conditional_block_grad" ] +SUB_BLOCK_PAIR = [("while", "while_grad"), ("parallel_do", "parallel_do_grad"), + ("conditional_block", "conditional_block_grad")] + PRINT_LOG = False class ControlFlowGraph(object): - def __init__(self, Program, ops, forward_num, skip_opt): - self._program = Program + def __init__(self, program, ops, forward_num, skip_opt): + self._program = program self._ops = ops self._forward_num = forward_num self._successors = defaultdict(set) @@ -51,6 +54,7 @@ class ControlFlowGraph(object): self._skip_opt = skip_opt def _add_connections(self, connections): + """Populates _successors and _presuccessors for two neighbor nodes.""" for node1, node2 in connections: self._add(node1, node2) @@ -58,7 +62,11 @@ class ControlFlowGraph(object): self._successors[node1].add(node2) self._presuccessors[node2].add(node1) + # TODO(panyx0718): We need to have a unified way of building intermediate + # representation. def _build_graph(self): + """Build a graph based on op sequence. + """ self.op_size = len(self._ops) op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)] self._add_connections(op_node_connections) @@ -82,15 +90,14 @@ class ControlFlowGraph(object): self._live_out[i].add(new_name) def _reach_fixed_point(self, live_in, live_out): + """Check if the liveness set has stablized.""" if len(live_in) != len(self._live_in): return False if len(live_out) != len(self._live_out): return False for i in range(self.op_size): - if live_in[i] != self._live_in[i]: - return False - for i in range(self.op_size): - if live_out[i] != self._live_out[i]: + if (live_in[i] != self._live_in[i] or + live_out[i] != self._live_out[i]): return False return True @@ -98,6 +105,8 @@ class ControlFlowGraph(object): self._build_graph() live_in = defaultdict(set) live_out = defaultdict(set) + # Repeatedly apply liveness updates until the algorithm stablize + # on a complete set live input vars and live output vars. while True: for i in range(self.op_size, 0, -1): live_in[i] = set(self._live_in[i]) @@ -141,6 +150,8 @@ class ControlFlowGraph(object): return False return True + # TODO(panyx0718): This needs to be less hacky. It seems memory optimization + # doesn't consider vars copied between cpu and gpu. def _update_skip_opt_set(self): for i in range(self.op_size): op = self._ops[i] @@ -154,7 +165,7 @@ class ControlFlowGraph(object): bwd_id = 0 for i in range(self.op_size): op = self._ops[i] - if op.type() in sub_block_ops: + if op.type() in SUB_BLOCK_OPS: continue block_desc = op.block() is_forward = i < self._forward_num @@ -177,13 +188,15 @@ class ControlFlowGraph(object): def compare_shape(x_shape, cache_shape, opt_level): if opt_level == 0: return x_shape == cache_shape - if opt_level == 1: + elif opt_level == 1: if (x_shape[0] == -1) ^ (cache_shape[0] == -1): return False x_size = abs(reduce(lambda x, y: x * y, x_shape)) cache_size = abs(reduce(lambda x, y: x * y, cache_shape)) if x_size <= cache_size: return True + else: + raise ValueError("only support opt_level 0 or 1.") return False self._dataflow_analyze() @@ -191,10 +204,9 @@ class ControlFlowGraph(object): self.pool = [] for i in range(self.op_size): op = self._ops[i] - if op.type() in sub_block_ops: + if op.type() in SUB_BLOCK_OPS: continue block_desc = op.block() - self.current_block_desc = block_desc is_forward = i < self._forward_num if self.pool: defs_can_optimize = filter( @@ -211,37 +223,40 @@ class ControlFlowGraph(object): for index, cache_pair in enumerate(self.pool): cache_var = cache_pair[0] cache_shape = cache_pair[1] - if compare_shape(x_shape, cache_shape, level): - if self._has_var(block_desc, cache_var, is_forward): - x_dtype = self._find_var(block_desc, x, - is_forward).dtype() - cache_dtype = self._find_var( - block_desc, cache_var, is_forward).dtype() - # TODO(qijun): actually, we should compare dtype_to_size[x_dtype] - # and dtype_to_size[cache_dtype] - if x_dtype == cache_dtype: - if PRINT_LOG: - print( - ("Hit Cache !!!! cache pool index " - "is %d, var name is %s, " - "cached var name is %s, " - "var shape is %s ") % - (index, x, cache_var, - str(cache_shape))) - self.pool.pop(index) - if x == cache_var: - break - _rename_arg_( - self._ops, x, cache_var, begin_idx=i) - self._program.block(block_desc.id).var( - str(x)).desc = self._find_var( - block_desc, cache_var, is_forward) - self._update_graph( - x, cache_var, begin_idx=i) - break - - in_diff, out_diff = self._get_diff(self._live_in[i], - self._live_out[i]) + if not compare_shape(x_shape, cache_shape, level): + continue + + if not self._has_var(block_desc, cache_var, is_forward): + continue + + x_dtype = self._find_var(block_desc, x, + is_forward).dtype() + cache_dtype = self._find_var(block_desc, cache_var, + is_forward).dtype() + # TODO(qijun): actually, we should compare + # dtype_to_size[x_dtype] and dtype_to_size[cache_dtype] + if x_dtype != cache_dtype: + continue + + if PRINT_LOG: + print(("Hit Cache !!!! cache pool index " + "is %d, var name is %s, " + "cached var name is %s, " + "var shape is %s ") % (index, x, cache_var, + str(cache_shape))) + self.pool.pop(index) + if x == cache_var: + break + # Rename the var to the cache var already with + # memory allocated in order to reuse the memory. + _rename_arg_(self._ops, x, cache_var, begin_idx=i) + self._program.block(block_desc.id).var(str( + x)).desc = self._find_var(block_desc, cache_var, + is_forward) + self._update_graph(x, cache_var, begin_idx=i) + break + + in_diff, _ = self._get_diff(self._live_in[i], self._live_out[i]) can_optimize = filter( lambda x: self._check_var_validity(block_desc, x, is_forward), in_diff) @@ -252,6 +267,19 @@ class ControlFlowGraph(object): def _process_sub_block_pair(pdesc, sub_block_pair): + """Creates a list of tuple each of which tracks info of a subblock. + + Note: this function doesn't handle nested subblocks yet. + TODO(panyx0718): assert if case nested subblocks happen. + + :param pdesc: ProgramDesc. + :param sub_block_pair: A list op pairs. Each op pair is the forward + op and backward op. The ops in the list are special that they contain + a subblock of ops. + :return: A list of tuples, each tuple is (all ops in a subblock pair + including forward and backward, number of forward ops, + all output args names of the ops in the subblock pairs). + """ ops_list = [] block_desc = pdesc.block(0) op_size = block_desc.op_size() @@ -308,6 +336,11 @@ def _process_sub_block_pair(pdesc, sub_block_pair): def _get_cfgs(input_program): + """Process each block and create ControlFlowGraph for each of them. + + :param input_program: Program object. + :return: A list of ControlFlowGraph, each corresponds to a block. + """ ops_list = [] pdesc = input_program.get_desc() block_desc = pdesc.block(0) @@ -316,11 +349,8 @@ def _get_cfgs(input_program): ops_list.append( ([block_desc.op(i) for i in range(op_size)], op_size, set())) - sub_block_pair = [("while", "while_grad"), ("parallel_do", - "parallel_do_grad"), - ("conditional_block", "conditional_block_grad")] - - ops_list.extend(_process_sub_block_pair(pdesc, sub_block_pair)) + # Only process one level of nested subblock. + ops_list.extend(_process_sub_block_pair(pdesc, SUB_BLOCK_PAIR)) cfgs = [ ControlFlowGraph(input_program, ops, forward_num, skip_opt) @@ -330,6 +360,17 @@ def _get_cfgs(input_program): def memory_optimize(input_program, print_log=False, level=0): + """Optimize memory by reusing var memory. + + Note: it doesn't not support subblock nested in subblock. + + :param input_program: Input Program + :param print_log: whether to print debug log. + :param level: If level=0, reuse if the shape is completely equal, o + :return: + """ + if level != 0 and level != 1: + raise ValueError("only support opt_level 0 or 1.") global PRINT_LOG PRINT_LOG = print_log cfgs = _get_cfgs(input_program) diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..c618b02a768f2ca3e2b2914d8ee0134836d5c0d2 --- /dev/null +++ b/python/paddle/fluid/metrics.py @@ -0,0 +1,378 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fluid Metrics + +The metrics are accomplished via Python natively. +""" +import numpy as np +import copy +import warnings + +__all__ = [ + 'MetricBase', + 'CompositeMetric', + 'Accuracy', + 'ChunkEvaluator', + 'EditDistance', + 'DetectionMAP', + 'Auc', +] + + +def _is_numpy_(var): + return isinstance(var, (np.ndarray, np.generic)) + + +def _is_number_(var): + return isinstance(var, int) or isinstance(var, float) or (isinstance( + var, np.ndarray) and var.shape == (1, )) + + +def _is_number_or_matrix_(var): + return _is_number_(var) or isinstance(var, np.ndarray) + + +class MetricBase(object): + """ + Base Class for all evaluators + + Args: + name(str): The name of evaluator. such as, "accuracy". Used for generate + temporary variable name. + Interface: + Note(*) : the states is the attributes who not has _ prefix. + + get_config(): print current states and configuration + reset(): clear the states. If the Metrics states type is not (int, float, np.ndarray), + Please override this method. + update(): update states at every minibatch + eval(): get metric evaluation in numpy type. + """ + + def __init__(self, name, **kwargs): + self._name = str(name) if name != None else self.__class__.__name__ + self._kwargs = kwargs if kwargs != None else dict() + self.reset() + + def __str__(self): + return self._name + + def reset(self): + """ + states is the attributes who not has _ prefix. + reset the states of metrics. + """ + states = { + attr: value + for attr, value in self.__dict__.iteritems() + if not attr.startswith("_") + } + for attr, value in states.iteritems(): + if isinstance(value, int): + setattr(self, attr, 0) + elif isinstance(value, float): + setattr(self, attr, .0) + elif isinstance(value, (np.ndarray, np.generic)): + setattr(self, attr, np.zeros_like(value)) + else: + setattr(self, attr, None) + + def get_config(self): + states = { + attr: value + for attr, value in self.__dict__.iteritems() + if not attr.startswith("_") + } + config = copy.deepcopy(self._kwargs) + config.update({"name": self._name, "states": copy.deepcopy(states)}) + return config + + def update(self): + raise NotImplementedError() + + def eval(self): + raise NotImplementedError() + + +class CompositeMetric(MetricBase): + """ + Compute multiple metrics in each minibatch. + for example, merge F1, accuracy, recall into one Metric. + """ + + def __init__(self, name=None, **kwargs): + super(CompositeMetric, self).__init__(name, kwargs) + self._metrics = [] + + def add_metric(self, metric): + if not isinstance(metric, MetricBase): + raise ValueError("SubMetric should be inherit from MetricBase.") + self._metrics.append(metric) + + def eval(self): + ans = [] + for m in self._metrics: + ans.append(m.eval()) + return ans + + +class Accuracy(MetricBase): + """ + Accumulate the accuracy from minibatches and compute the average accuracy + for every pass. + + Args: + name: the metrics name + + Example: + minibatch_accuracy = fluid.layers.accuracy(pred, label) + accuracy_evaluator = fluid.metrics.Accuracy() + for epoch in PASS_NUM: + accuracy_evaluator.reset() + for data in batches: + loss = exe.run(fetch_list=[cost, minibatch_accuracy]) + accuracy_evaluator.update(value=minibatch_accuracy, weight=batches) + accuracy = accuracy_evaluator.eval() + """ + + def __init__(self, name=None): + super(Accuracy, self).__init__(name) + self.value = .0 + self.weight = .0 + + def update(self, value, weight): + if not _is_number_or_matrix_(value): + raise ValueError( + "The 'value' must be a number(int, float) or a numpy ndarray.") + if not _is_number_(weight): + raise ValueError("The 'weight' must be a number(int, float).") + self.value += value * weight + self.weight += weight + + def eval(self): + if self.weight == 0: + raise ValueError( + "There is no data in Accuracy Metrics. Please check layers.accuracy output has added to Accuracy." + ) + return self.value / self.weight + + +class ChunkEvaluator(MetricBase): + """ + Accumulate counter numbers output by chunk_eval from mini-batches and + compute the precision recall and F1-score using the accumulated counter + numbers. + """ + + def __init__(self, name=None): + super(ChunkEvaluator, self).__init__(name) + self.num_infer_chunks = 0 + self.num_label_chunks = 0 + self.num_correct_chunks = 0 + + def update(self, num_infer_chunks, num_label_chunks, num_correct_chunks): + if not _is_number_or_matrix_(num_infer_chunks): + raise ValueError( + "The 'num_infer_chunks' must be a number(int, float) or a numpy ndarray." + ) + if not _is_number_or_matrix_(num_label_chunks): + raise ValueError( + "The 'num_label_chunks' must be a number(int, float) or a numpy ndarray." + ) + if not _is_number_or_matrix_(num_correct_chunks): + raise ValueError( + "The 'num_correct_chunks' must be a number(int, float) or a numpy ndarray." + ) + self.num_infer_chunks += num_infer_chunks + self.num_label_chunks += num_label_chunks + self.num_correct_chunks += num_correct_chunks + + def eval(self): + precision = float( + self.num_correct_chunks + ) / self.num_infer_chunks if self.num_infer_chunks else 0 + recall = float(self.num_correct_chunks + ) / self.num_label_chunks if self.num_label_chunks else 0 + f1_score = float(2 * precision * recall) / ( + precision + recall) if self.num_correct_chunks else 0 + return precision, recall, f1_score + + +class EditDistance(MetricBase): + """ + Accumulate edit distance sum and sequence number from mini-batches and + compute the average edit_distance and instance error of all batches. + + Args: + name: the metrics name + + Example: + edit_distance_metrics = fluid.layers.edit_distance(input, label) + distance_evaluator = fluid.metrics.EditDistance() + for epoch in PASS_NUM: + distance_evaluator.reset() + for data in batches: + loss = exe.run(fetch_list=[cost] + list(edit_distance_metrics)) + distance_evaluator.update(*edit_distance_metrics) + distance, instance_error = distance_evaluator.eval() + + In the above example: + 'distance' is the average of the edit distance in a pass. + 'instance_error' is the instance error rate in a pass. + + """ + + def __init__(self, name): + super(EditDistance, self).__init__(name) + self.total_distance = .0 + self.seq_num = 0 + self.instance_error = 0 + + def update(self, distances, seq_num): + if not _is_numpy_(distances): + raise ValueError("The 'distances' must be a numpy ndarray.") + if not _is_number_(seq_num): + raise ValueError("The 'seq_num' must be a number(int, float).") + seq_right_count = np.sum(distances == 0) + total_distance = np.sum(distances) + self.seq_num += seq_num + self.instance_error += seq_num - seq_right_count + self.total_distance += total_distance + + def eval(): + if self.seq_num == 0: + raise ValueError( + "There is no data in EditDistance Metric. Please check layers.edit_distance output has been added to EditDistance." + ) + avg_distance = self.total_distance / self.seq_num + avg_instance_error = self.instance_error / self.seq_num + return avg_distance, avg_instance_error + + +class DetectionMAP(MetricBase): + """ + Calculate the detection mean average precision (mAP). + + TODO (Dang Qingqing): update the following doc. + The general steps are as follows: + 1. calculate the true positive and false positive according to the input + of detection and labels. + 2. calculate mAP value, support two versions: '11 point' and 'integral'. + + Please get more information from the following articles: + https://sanchom.wordpress.com/tag/average-precision/ + https://arxiv.org/abs/1512.02325 + """ + + def __init__(self, name=None): + super(DetectionMAP, self).__init__(name) + # the current map value + self.value = .0 + + def update(self, value, weight): + if not _is_number_or_matrix_(value): + raise ValueError( + "The 'value' must be a number(int, float) or a numpy ndarray.") + if not _is_number_(weight): + raise ValueError("The 'weight' must be a number(int, float).") + self.value += value + self.weight += weight + + def eval(self): + if self.weight == 0: + raise ValueError( + "There is no data in DetectionMAP Metrics. " + "Please check layers.detection_map output has added to DetectionMAP." + ) + return self.value / self.weight + + +class Auc(MetricBase): + """ + Auc Metrics which adapts to binary classification. + Need to note that auc metrics compute the value via Python natively. + If you concern the speed, please use the fluid.layers.auc instead. + + The `auc` function creates four local variables, `true_positives`, + `true_negatives`, `false_positives` and `false_negatives` that are used to + compute the AUC. To discretize the AUC curve, a linearly spaced set of + thresholds is used to compute pairs of recall and precision values. The area + under the ROC-curve is therefore computed using the height of the recall + values by the false positive rate, while the area under the PR-curve is the + computed using the height of the precision values by the recall. + + Args: + name: metric name + curve: Specifies the name of the curve to be computed, 'ROC' [default] or + 'PR' for the Precision-Recall-curve. + num_thresholds: The number of thresholds to use when discretizing the roc + curve. + + "NOTE: only implement the ROC curve type via Python now." + """ + + def __init__(self, name, curve='ROC', num_thresholds=200): + super(MetricBase, self).__init__(name, curve, num_thresholds) + self._curve = curve + self._num_thresholds = num_thresholds + self._epsilon = 1e-6 + self.tp_list = np.ndarray((num_thresholds, )) + self.fn_list = np.ndarray((num_thresholds, )) + self.tn_list = np.ndarray((num_thresholds, )) + self.fp_list = np.ndarray((num_thresholds, )) + + def update(self, labels, predictions, axis=1): + if not _is_numpy_(labels): + raise ValueError("The 'labels' must be a numpy ndarray.") + if not _is_numpy_(predictions): + raise ValueError("The 'predictions' must be a numpy ndarray.") + + kepsilon = 1e-7 # to account for floating point imprecisions + thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) + for i in range(num_thresholds - 2)] + thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] + + # caculate TP, FN, TN, FP count + for idx_thresh, thresh in enumerate(thresholds): + tp, fn, tn, fp = 0, 0, 0, 0 + for i, lbl in enumerate(labels): + if lbl: + if predictions[i, 0] >= thresh: + tp += 1 + else: + fn += 1 + else: + if predictions[i, 0] >= thresh: + fp += 1 + else: + tn += 1 + tp_list[idx_thresh] += tp + fn_list[idx_thresh] += fn + tn_list[idx_thresh] += tn + fp_list[idx_thresh] += fp + + def eval(self): + epsilon = self._epsilon + num_thresholds = self._num_thresholds + tpr = (tp_list.astype("float32") + epsilon) / ( + tp_list + fn_list + epsilon) + fpr = fp_list.astype("float32") / (fp_list + tn_list + epsilon) + rec = (tp_list.astype("float32") + epsilon) / ( + tp_list + fp_list + epsilon) + + x = fpr[:num_thresholds - 1] - fpr[1:] + y = (tpr[:num_thresholds - 1] + tpr[1:]) / 2.0 + auc_value = np.sum(x * y) + return auc_value diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 180575c35dc6e115e11cccf9fff9fb2d3cd7e9a6..36503cac6d5391821b977d90e6b77c4df7e3b564 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import re from collections import defaultdict from paddle.fluid.framework import Program import framework @@ -818,8 +818,8 @@ class ModelAverage(Optimizer): min_average_window, max_average_window and current update times. Args: - params_grads: A list of parameter-grad variable pairs. average_window_rate: The rate of average window. + params_grads: A list of parameter-grad variable pairs. min_average_window: The minimum size of average window. max_average_window: The maximum size of average window. @@ -840,8 +840,8 @@ class ModelAverage(Optimizer): """ def __init__(self, - params_grads, average_window_rate, + params_grads=None, min_average_window=10000, max_average_window=10000, **kwargs): @@ -849,24 +849,37 @@ class ModelAverage(Optimizer): self.average_window = average_window_rate self.min_average_window = min_average_window self.max_average_window = max_average_window - self.params_grads = params_grads + + self.params_grads = [] if params_grads is None else params_grads + params = {} + for param, grad in self.params_grads: + if param.do_model_average != False: + params[param.name] = (param, grad) + for param in framework.default_main_program().global_block( + ).all_parameters(): + if param.name not in params and param.do_model_average != False: + grad = param.block.create_var( + name=unique_name.generate(".".join([param.name, 'tmp'])), + dtype=param.dtype, + persistable=False, + stop_gradient=True) + params[param.name] = (param, grad) + self.params_grads = params.values() + for param, grad in self.params_grads: - if grad is not None: - self._append_average_accumulate_op(param) + self._append_average_accumulate_op(param) self.apply_program = Program() block = self.apply_program.global_block() with program_guard(main_program=self.apply_program): for param_grad in self.params_grads: - if param_grad[1] is not None: - self._add_average_apply_op(block, param_grad) + self._add_average_apply_op(block, param_grad) self.restore_program = Program() block = self.restore_program.global_block() with program_guard(main_program=self.restore_program): for param_grad in self.params_grads: - if param_grad[1] is not None: - self._add_average_restore_op(block, param_grad) + self._add_average_restore_op(block, param_grad) def _add_average_apply_op(self, block, param_grad): param = block.clone_variable(param_grad[0]) diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 24dfa6144ae9584f1678e662716da123352430dd..5ce2aa1fc4d0b275b502af0f97e4a0f83e85de5b 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -100,9 +100,11 @@ class ParallelExecutor(object): local_scopes = share_vars_from.executor.local_scopes( ) if share_vars_from else [] - persistable_vars = [ + self.persistable_vars = [ v.name - for v in filter(lambda var: var.persistable, main.list_vars()) + for v in filter(lambda var: \ + var.persistable and var.type != core.VarDesc.VarType.RAW, + main.list_vars()) ] self.executor = core.ParallelExecutor( @@ -113,7 +115,7 @@ class ParallelExecutor(object): p.name for p in main.global_block().iter_parameters() if not p.stop_gradient ]), - set(persistable_vars), + set(self.persistable_vars), main.desc, loss_name if loss_name else '', scope, @@ -143,3 +145,6 @@ class ParallelExecutor(object): self.executor.run(fetch_list, fetch_var_name, feed_tensor_dict) arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array() return [arr[i] for i in range(len(arr))] + + def bcast_params(self): + self.executor.bcast_params(set(self.persistable_vars)) diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index 255cd2104325afa31449cbd3875499a7c5d7f572..1c6970441bccdc1c1221503256c30c83502bd123 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -28,13 +28,15 @@ class ParamAttr(object): learning_rate=1.0, regularizer=None, trainable=True, - gradient_clip=None): + gradient_clip=None, + do_model_average=None): self.name = name self.initializer = initializer self.learning_rate = learning_rate self.regularizer = regularizer self.trainable = trainable self.gradient_clip = gradient_clip + self.model_average = do_model_average def set_default_initializer(self, initializer): if initializer is None: @@ -80,7 +82,8 @@ class ParamAttr(object): }, 'regularizer': self.regularizer, 'trainable': self.trainable, - 'gradient_clip_attr': self.gradient_clip + 'gradient_clip_attr': self.gradient_clip, + 'model_average': self.model_average } if with_initializer: kwargs['initializer'] = self.initializer @@ -90,7 +93,7 @@ class ParamAttr(object): class WeightNormParamAttr(ParamAttr): """ Used for weight normalization. Any field in ParamAttr can also be set here. - Besides, an extra field dim can be set to indicate the dimension except + Besides, an extra field dim can be set to indicate the dimension except which to normalize. """ # List to record the parameters reparameterized by weight normalization. diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index c0a6df831acbfe2654a5941cf95c91343992ef13..d9cd76952e31f8185512ab45f9f3ab2ce7d9da48 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextlib import math - import numpy as np +import os +import time +import unittest + import paddle import paddle.dataset.conll05 as conll05 import paddle.fluid as fluid -from paddle.fluid.initializer import init_on_cpu -import contextlib -import time -import unittest -import os word_dict, verb_dict, label_dict = conll05.get_dict() word_dict_len = len(word_dict) @@ -37,7 +36,7 @@ depth = 8 mix_hidden_lr = 1e-3 IS_SPARSE = True -PASS_NUM = 10 +PASS_NUM = 100 BATCH_SIZE = 10 embedding_name = 'emb' @@ -77,7 +76,8 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, emb_layers.append(mark_embedding) hidden_0_layers = [ - fluid.layers.fc(input=emb, size=hidden_dim) for emb in emb_layers + fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') + for emb in emb_layers ] hidden_0 = fluid.layers.sums(input=hidden_0_layers) @@ -94,8 +94,8 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, for i in range(1, depth): mix_hidden = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=hidden_dim), - fluid.layers.fc(input=input_tmp[1], size=hidden_dim) + fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') ]) lstm = fluid.layers.dynamic_lstm( @@ -109,8 +109,8 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, input_tmp = [mix_hidden, lstm] feature_out = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=label_dict_len), - fluid.layers.fc(input=input_tmp[1], size=label_dict_len) + fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') ]) return feature_out @@ -171,7 +171,7 @@ def train(use_cuda, save_dirname=None, is_local=True): # check other optimizers and check why out will be NAN sgd_optimizer = fluid.optimizer.SGD( learning_rate=fluid.layers.exponential_decay( - learning_rate=0.0001, + learning_rate=0.01, decay_steps=100000, decay_rate=0.5, staircase=True)) @@ -233,7 +233,7 @@ def train(use_cuda, save_dirname=None, is_local=True): print("second per batch: " + str((time.time( ) - start_time) / batch_id)) # Set the threshold low to speed up the CI test - if float(pass_precision) > 0.05: + if float(pass_precision) > 0.01: if save_dirname is not None: # TODO(liuyiqun): Change the target to crf_decode fluid.io.save_inference_model(save_dirname, [ diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index e4997b4069f60ff4382b4254bc026ae8ae29b345..5ec6890c1b0dabd2804a92071b63c9610299e67c 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -157,7 +157,6 @@ def train(nn_type, for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - pserver_endpoints = os.getenv("PSERVERS") trainers = int(os.getenv("TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index f10ef9b63412ecf74471f4fb94eb91ac72d5f8f9..356c3e64b3d03b520a1bec5b5e0174e1d8ee23e8 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -1,6 +1,12 @@ file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") +# The fully connected test is removed whe the WITH_MKLDNN flag is OFF +# Because the fully connected layer has only one kernel (MKLDNN) +if(NOT WITH_MKLDNN) + list(REMOVE_ITEM TEST_OPS test_fc_op) +endif(NOT WITH_MKLDNN) + if(NOT WITH_DISTRIBUTE) list(REMOVE_ITEM TEST_OPS test_recv_op) endif(NOT WITH_DISTRIBUTE) @@ -28,6 +34,8 @@ function(py_test_modules TARGET_NAME) endif() endfunction() +list(REMOVE_ITEM TEST_OPS test_sequence_expand) + # test time consuming OPs in a separate process for expliot parallism list(REMOVE_ITEM TEST_OPS test_parallel_executor) list(REMOVE_ITEM TEST_OPS test_warpctc_op) @@ -64,6 +72,8 @@ else() endforeach(TEST_OP) endif(WITH_FAST_BUNDLE_TEST) +# +py_test_modules(test_sequence_expand MODULES test_sequence_expand) # tests with high overhead py_test_modules(test_parallel_executor MODULES test_parallel_executor) py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR}) diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 10aa63e18a6eeaa44e5b12f7532998dca2bc5e9f..7ecf9a1459ffc9740ae8c12df3902163ee689f59 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -14,23 +14,13 @@ import unittest import numpy as np -from op_test import OpTest import paddle.fluid.core as core from paddle.fluid.op import Operator +import paddle.fluid as fluid +from op_test import OpTest from paddle.fluid.framework import grad_var_name -def get_backward_op(scope, op, no_grad_set): - backward_op = core.Operator.backward(op, no_grad_set) - for input in backward_op.input_vars(): - var = scope.var(input) - var.get_tensor() - for output in backward_op.output_vars(): - var = scope.var(output) - var.get_tensor() - return backward_op - - def _reference_testing(x, scale, offset, mean, var, epsilon, data_format): x_shape = x.shape if len(x_shape) == 2: @@ -64,11 +54,6 @@ def _reference_testing(x, scale, offset, mean, var, epsilon, data_format): def _reference_training(x, scale, offset, epsilon, data_format): x_shape = x.shape - if len(x_shape) == 2: - if data_format == "NCHW": - x = np.reshape(x, (x.shape[0], x.shape[1], 1, 1)) - else: - x = np.reshape(x, (x.shape[0], 1, 1, x.shape[1])) if data_format == "NCHW": n, c, h, w = x.shape @@ -88,8 +73,6 @@ def _reference_training(x, scale, offset, epsilon, data_format): offset_tile = np.reshape(offset, (1, c, 1, 1)) offset_tile = np.reshape(offset_tile, (1, c, 1, 1)) y = normalized * scale_tile + offset_tile - if len(x_shape) == 2: - y = np.reshape(y, (y.shape[0], y.shape[1])) return y, mean, var elif data_format == "NHWC": x_square = x * x @@ -100,59 +83,42 @@ def _reference_training(x, scale, offset, epsilon, data_format): var = x_square_sum / element_count - mean * mean normalized = (x - mean) / np.sqrt(var + epsilon) y = normalized * scale + offset - if len(x_shape) == 2: - y = np.reshape(y, x_shape) return y, mean, var else: raise ValueError("Unknown data order.") -def _reference_grad(x, grad_y, scale, mean, var, epsilon, data_format): +def _reference_grad(x, y_grad, scale, mean, var, epsilon, data_format): # Use the following formulas to calculate gradients: # grad_scale = # sum(grad_y * (x - mean)) * rsqrt(var + epsilon) # # grad_offset = sum(output_y) # - # grad_x = + # x_grad = # 1/N * scale * rsqrt(var + epsilon) * (N * grad_y - sum(grad_y) - # (x - mean) * sum(grad_y * (x - mean)) / (var + epsilon)) # transfer from (N, C, H, W) to (N, H, W, C) to simplify computation - x_shape = x.shape - - if len(x_shape) == 2: - if data_format == "NCHW": - x = np.reshape(x, (x.shape[0], x.shape[1], 1, 1)) - grad_y = np.reshape(grad_y, - (grad_y.shape[0], grad_y.shape[1], 1, 1)) - else: - x = np.reshape(x, (x.shape[0], 1, 1, x.shape[1])) - grad_y = np.reshape(grad_y, - (grad_y.shape[0], 1, 1, grad_y.shape[1])) - if data_format == "NCHW": x = np.transpose(x, (0, 2, 3, 1)) - grad_y = np.transpose(grad_y, (0, 2, 3, 1)) + y_grad = np.transpose(y_grad, (0, 2, 3, 1)) - # raise ValueError("data_format must be NHWC, got %s." % data_format) - grad_x = scale * (grad_y - np.mean( - grad_y, axis=(0, 1, 2)) - (x - mean) * np.mean( - grad_y * (x - mean), axis=(0, 1, 2)) / + x_grad = scale * (y_grad - np.mean( + y_grad, axis=(0, 1, 2)) - (x - mean) * np.mean( + y_grad * (x - mean), axis=(0, 1, 2)) / (var + epsilon)) / np.sqrt(var + epsilon) - grad_scale = np.sum(grad_y * (x - mean) / np.sqrt(var + epsilon), + grad_scale = np.sum(y_grad * (x - mean) / np.sqrt(var + epsilon), axis=(0, 1, 2)) - grad_offset = np.sum(grad_y, axis=(0, 1, 2)) + grad_offset = np.sum(y_grad, axis=(0, 1, 2)) # transfer back to N, C, H, W if data_format == "NCHW": - grad_x = np.transpose(grad_x, (0, 3, 1, 2)) + x_grad = np.transpose(x_grad, (0, 3, 1, 2)) x = np.transpose(x, (0, 3, 1, 2)) - grad_y = np.transpose(grad_y, (0, 3, 1, 2)) + y_grad = np.transpose(y_grad, (0, 3, 1, 2)) - if len(x_shape) == 2: - grad_x = np.reshape(grad_x, x_shape) - return grad_x, grad_scale, grad_offset + return x_grad, grad_scale, grad_offset def create_or_get_tensor(scope, var_name, var, place): @@ -186,7 +152,7 @@ def set_output_grad(scope, outputs, place, feed_dict=None): __set_tensor__(output, data) -class TestBatchNormOpInference(OpTest): +class TestBatchNormOpInference(unittest.TestCase): def setUp(self): self.dtype = np.float32 @@ -304,231 +270,121 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference): self.check_with_place(place, data_format, self.dtype, [2, 3]) -class TestBatchNormOpTraining(OpTest): +class TestBatchNormOpTraining(unittest.TestCase): def __assert_close(self, tensor, np_array, msg, atol=1e-4): + if not np.allclose(np.array(tensor), np_array, atol=atol): + import pdb + pdb.set_trace() self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) - def test_python_testing(self): - data_format = "NHWC" - epsilon = 0.00001 - - n, h, w, c = 2, 3, 4, 5 - x_shape = [n, h, w, c] - scale_shape = [c] - - x_val = np.random.random_sample(x_shape).astype(np.float32) - scale_val = np.random.random_sample(scale_shape).astype(np.float32) - bias_val = np.random.random_sample(scale_shape).astype(np.float32) - - mean = np.zeros(scale_shape).astype(np.float32) - variance = np.ones(scale_shape).astype(np.float32) - - y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance, - epsilon, "NHWC") - - # running N, C, H, W case - # should produce the same results - x_shape2 = [n, c, h, w] - x_val2 = np.transpose(x_val, (0, 3, 1, 2)) - y_out2 = _reference_testing(x_val2, scale_val, bias_val, mean, variance, - epsilon, "NCHW") - - # transfer (N, C, H, W) back to (N, H, W, C) - y_out2_trans = np.transpose(y_out2, (0, 2, 3, 1)) - self.__assert_close(y_out, y_out2_trans, "inference output") - print 'python: NHWC, NCHW, inference checking passed' - - def test_python_training(self): - data_format = "NHWC" - epsilon = 0.00001 - momentum = 0.9 - - # N, H, W, C: 2, 3, 4, 2 - n, h, w, c = 2, 3, 4, 5 - x_shape = [n, h, w, c] - scale_shape = [c] - - x_val = np.random.random_sample(x_shape).astype(np.float32) - scale_val = np.random.random_sample(scale_shape).astype(np.float32) - bias_val = np.random.random_sample(scale_shape).astype(np.float32) - - mean = np.zeros(scale_shape).astype(np.float32) - variance = np.ones(scale_shape).astype(np.float32) - - # run forward - y_out, saved_mean, var_ref = _reference_training( - x_val, scale_val, bias_val, epsilon, "NHWC") - - # - mean_out = saved_mean * (1. - momentum) + momentum * mean - variance_out = var_ref * (1. - momentum) + momentum * variance - saved_variance = 1. / np.sqrt(var_ref + epsilon) - - # running N, C, H, W case - # should produce the same results - x_shape2 = [n, c, h, w] - x_val2 = np.transpose(x_val, (0, 3, 1, 2)) - y_out2, saved_mean2, var_ref2 = _reference_training( - x_val2, scale_val, bias_val, epsilon, "NCHW") - - self.__assert_close(saved_mean, saved_mean2, "batch mean") - self.__assert_close(var_ref, var_ref2, "batch variance") - - # transfer (N, C, H, W) back to (N, H, W, C) - y_out2_trans = np.transpose(y_out2, (0, 2, 3, 1)) - self.__assert_close(y_out, y_out2_trans, "batch output") - print 'python: NHWC, NCHW, forward checking passed' - - # test backward now - # NHWC - self.y_grad = np.random.random_sample(x_shape).astype(np.float32) - y_grad = self.y_grad - # y_grad = np.ones(x_shape).astype(np.float32) - x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_grad( - x_val, y_grad, scale_val, saved_mean, var_ref, epsilon, "NHWC") - - # NCHW - y_grad2 = np.transpose(y_grad, (0, 3, 1, 2)) - # y_grad2 = np.ones(x_shape2).astype(np.float32) - x_grad_ref2, scale_grad_ref2, bias_grad_ref2 = _reference_grad( - x_val2, y_grad2, scale_val, saved_mean2, var_ref2, epsilon, "NCHW") - - self.__assert_close(scale_grad_ref, scale_grad_ref2, "scale gradient") - self.__assert_close(bias_grad_ref, bias_grad_ref2, "bias gradient") - - x_grad_transpose = np.transpose(x_grad_ref2, (0, 2, 3, 1)) - self.__assert_close(x_grad_ref, x_grad_transpose, "x gradient") - print 'python: NHWC, NCHW, backward checking passed' - def test_forward_backward(self): def test_with_place(place, data_layout, shape): # attr epsilon = 0.00001 momentum = 0.9 - - if len(shape) == 2: - x_shape = shape - c = shape[1] + if data_layout == "NCHW": + n, c, h, w = shape[0], shape[1], shape[2], shape[3] else: - # n, h, w, c = 2, 3, 4, 2 n, h, w, c = shape[0], shape[1], shape[2], shape[3] - if data_format == "NHWC": - x_shape = [n, h, w, c] - elif data_format == "NCHW": - x_shape = [n, c, h, w] - else: - raise ValueError("Unknown data type.") scale_shape = [c] - x_val = np.random.random_sample(x_shape).astype(np.float32) - scale_val = np.random.random_sample(scale_shape).astype(np.float32) - bias_val = np.random.random_sample(scale_shape).astype(np.float32) - + np.random.seed(123) + x = np.random.random_sample(shape).astype(np.float32) + scale = np.random.random_sample(scale_shape).astype(np.float32) + bias = np.random.random_sample(scale_shape).astype(np.float32) mean = np.zeros(scale_shape).astype(np.float32) variance = np.ones(scale_shape).astype(np.float32) # run forward - y_out, saved_mean, var_ref = _reference_training( - x_val, scale_val, bias_val, epsilon, data_format) - - # update moving mean and variance + y, saved_mean, var_ref = _reference_training(x, scale, bias, + epsilon, data_layout) mean_out = saved_mean * (1. - momentum) + momentum * mean variance_out = var_ref * (1. - momentum) + momentum * variance saved_variance = 1. / np.sqrt(var_ref + epsilon) - - # for gradient test - # y_grad = np.ones(x_shape).astype(np.float32) - y_grad = np.zeros(x_shape).astype(np.float32) - if len(y_grad.shape) == 2: - y_grad[0, 0] = 1. - else: - y_grad[0, 0, 0, 0] = 1. - # y_grad = np.random.random_sample(x_shape).astype(np.float32) - x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_grad( - x_val, y_grad, scale_val, saved_mean, var_ref, epsilon, - data_format) - - scope = core.Scope() - - # create input - x_tensor = create_or_get_tensor(scope, "x_val", x_val, place) - scale_tensor = create_or_get_tensor(scope, "scale_val", scale_val, - place) - bias_tensor = create_or_get_tensor(scope, "bias_val", bias_val, - place) - mean_tensor = create_or_get_tensor(scope, "mean", mean, place) - variance_tensor = create_or_get_tensor(scope, "variance", variance, - place) - - # create output - y_tensor = create_or_get_tensor(scope, "y_out", None, place) - saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None, - place) - saved_variance_tensor = create_or_get_tensor( - scope, "saved_variance", None, place) - mean_out_tensor = mean_tensor - variance_out_tensor = variance_tensor - - batch_norm_op = Operator( - "batch_norm", - # inputs - X="x_val", - Scale="scale_val", - Bias="bias_val", - Mean="mean", - Variance="variance", - # outputs - Y="y_out", - MeanOut="mean", - VarianceOut="variance", - SavedMean="saved_mean", - SavedVariance="saved_variance", - # attrs - is_test=False, - data_layout=data_layout, - momentum=momentum, - epsilon=epsilon) - - batch_norm_op.run(scope, place) - - # check forward result - self.__assert_close(y_tensor, y_out, "y_out") - self.__assert_close(saved_mean_tensor, saved_mean, "saved_mean") - self.__assert_close(saved_variance_tensor, saved_variance, - "saved_variance") - self.__assert_close(mean_out_tensor, mean_out, "mean_out") - if isinstance(place, core.CUDAPlace): - atol = 5e-2 - else: - atol = 1e-4 - self.__assert_close(variance_out_tensor, variance_out, - "variance_out", atol) - print "op test forward passed: ", str(place), data_layout - # run backward - batch_norm_op_grad = get_backward_op(scope, batch_norm_op, set()) - set_output_grad( - scope, - ["y_out", "mean", "variance", "saved_mean", "saved_variance"], - place, - feed_dict={"y_out": y_grad}) - batch_norm_op_grad.run(scope, place) - - x_grad_tensor = create_or_get_tensor(scope, - grad_var_name("x_val"), None, - place) - scale_grad_tensor = create_or_get_tensor(scope, - grad_var_name("scale_val"), - None, place) - bias_grad_tensor = create_or_get_tensor(scope, - grad_var_name("bias_val"), - None, place) + y_grad = np.random.random_sample(shape).astype(np.float32) + x_grad, scale_grad, bias_grad = _reference_grad( + x, y_grad, scale, saved_mean, var_ref, epsilon, data_format) + + var_dict = locals() + var_dict['y@GRAD'] = y_grad + + var_names = [ + 'x', 'scale', 'bias', 'mean', 'variance', 'y', 'saved_mean', + 'saved_variance' + ] + ground_truth = {name: var_dict[name] for name in var_names} + + program = fluid.Program() + with fluid.program_guard(program): + block = program.global_block() + for name in ground_truth: + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape) + bn_op = block.append_op( + type="batch_norm", + inputs={ + "X": block.var('x'), + "Scale": block.var('scale'), + "Bias": block.var('bias'), + "Mean": block.var('mean'), + "Variance": block.var('variance') + }, + outputs={ + "Y": block.var('y'), + "MeanOut": block.var('mean'), # share the same memory + "VarianceOut": + block.var('variance'), # share the same memory + "SavedMean": block.var('saved_mean'), + "SavedVariance": block.var('saved_variance') + }, + attrs={ + "momentum": momentum, + "epsilon": epsilon, + "is_test": False, + "data_layout": data_layout + }) + block.create_var(name='y@GRAD', dtype='float32', shape=y.shape) + + # generate backward op_desc + grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( + bn_op.desc, set(), []) + grad_op_desc = grad_op_desc_list[0] + new_op_desc = block.desc.append_op() + new_op_desc.copy_from(grad_op_desc) + for var_name in grad_op_desc.output_arg_names(): + block.desc.var(var_name.encode("ascii")) + grad_op_desc.infer_var_type(block.desc) + grad_op_desc.infer_shape(block.desc) + for arg in grad_op_desc.output_arg_names(): + grad_var = block.desc.find_var(arg.encode("ascii")) + grad_var.set_dtype(core.VarDesc.VarType.FP32) + + exe = fluid.Executor(place) + out = exe.run( + program, + feed={ + name: var_dict[name] + for name in + ['x', 'scale', 'bias', 'mean', 'variance', 'y@GRAD'] + }, + fetch_list=[ + 'y', 'mean', 'variance', 'saved_mean', 'saved_variance', + 'x@GRAD', 'scale@GRAD', 'bias@GRAD' + ]) + + self.__assert_close(y, out[0], "y") + self.__assert_close(mean_out, out[1], "mean") + self.__assert_close(variance_out, out[2], "variance", 1e-3) + self.__assert_close(saved_mean, out[3], "saved_mean") + self.__assert_close(saved_variance, out[4], "saved_variance", 1e-3) + self.__assert_close(x_grad, out[5], "x_grad") + self.__assert_close(scale_grad, out[6], "scale_grad") + self.__assert_close(bias_grad, out[7], "bias_grad") - # check gradient output - self.__assert_close(x_grad_tensor, x_grad_ref, "x_grad") - self.__assert_close(scale_grad_tensor, scale_grad_ref, "scale_grad") - self.__assert_close(bias_grad_tensor, bias_grad_ref, "bias_grad") - print "op test backward passed: ", str(place), data_layout + print "op test forward passed: ", str(place), data_layout places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"): @@ -537,7 +393,6 @@ class TestBatchNormOpTraining(OpTest): for place in places: for data_format in ["NCHW", "NHWC"]: test_with_place(place, data_format, [2, 3, 4, 5]) - test_with_place(place, data_format, [2, 3]) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cond_op.py b/python/paddle/fluid/tests/unittests/test_cond_op.py deleted file mode 100644 index 66fbae961a2701e79da5222ae2689108335c4065..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/test_cond_op.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import paddle.fluid.core as core -import unittest -import numpy as np -from paddle.fluid.op import Operator, CondOp - - -class PySimpleCond(object): - ''' - A simple implementation of dynamic if-else based on numpy - ''' - - def __init__(self): - array = [1] * 10 - for i in range(1, 10, 2): - array[i] = 0 - self.cond = np.array(array) - self.x = np.ones(shape=(10, 1)).astype("float32") - - def forward(self): - self.index_t = np.where(self.cond == 1) - self.index_f = np.where(self.cond == 0) - y_t = self.x[self.index_t] - y_f = self.x[self.index_f] - y_t = y_t * 2. - y_f = y_f * (-2.) - output = np.zeros(shape=(10, 1)) - output[self.index_t] = y_t - output[self.index_f] = y_f - return output - - -class PySimpleCondTest(unittest.TestCase): - def setUp(self): - self.condnn = PySimpleCond() - - def test_forward(self): - output = self.condnn.forward() - - -def create_tensor(scope, name, shape, np_data): - tensor = scope.var(name).get_tensor() - tensor.set_dims(shape) - tensor.set(np_data, core.CPUPlace()) - return tensor - - -class TestCondOp(unittest.TestCase): - ''' - Test CondOp - - equation: - cond = [True, False, True, False, ...] - y[index_t] = x[index_t] * 2. - y[index_f] = x[index_f] * -2. - outputs: - y - ''' - - def setUp(self): - self.py_cond = PySimpleCond() - - def forward(self): - self.scope = core.Scope() - self.create_global_variables() - self.create_cond_op() - self.create_sub_net() - self.condop.run(self.scope, core.CPUPlace()) - return np.array(self.scope.find_var("Out").get_tensor()) - - def create_global_variables(self): - x_np_data = self.py_cond.x - create_tensor(self.scope, "X", [10, 1], x_np_data) - cond_np_data = self.py_cond.cond.astype("int32") - create_tensor(self.scope, "cond", [10, 1], cond_np_data) - self.scope.var("SubScopes") - self.scope.var("IndexTensors") - self.scope.var("Out") - - def create_cond_op(self): - self.condop = CondOp( - Cond="cond", - Xs=["X"], - Outs=["Out"], - SubScopes="SubScopes", - IndexTensors="IndexTensors") - - def create_sub_net(self): - truenet = core.Net.create() - scale_op_t = Operator("scale", X='X', Out='Out', scale=2.) - truenet.append_op(scale_op_t) - truenet.complete_add_op(True) - self.condop.set_truenet(truenet) - - falsenet = core.Net.create() - scale_op_t = Operator("scale", X='X', Out='Out', scale=-2.) - falsenet.append_op(scale_op_t) - falsenet.complete_add_op(True) - self.condop.set_falsenet(falsenet) - - def test_forward(self): - print 'test cond op forward' - pd_output = self.forward() - py_output = self.py_cond.forward() - print 'pd_output', pd_output - print - print 'py_output', py_output - self.assertEqual(pd_output.shape, py_output.shape) - print 'test passed' - return 0 - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py index 8c67e45b7fc997012af5f678f21271ad8b220edc..69365db4d104a1b69916a605534eff83e242289f 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py @@ -15,10 +15,8 @@ import unittest import numpy as np from operator import mul -from op_test import OpTest import paddle.fluid.core as core -from paddle.fluid.op import Operator -from paddle.fluid.framework import grad_var_name +import paddle.fluid as fluid np.random.random(123) @@ -70,161 +68,93 @@ def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1): return grad_x, d_scale, d_bias -def get_backward_op(scope, op, no_grad_set): - backward_op = core.Operator.backward(op, no_grad_set) - for input in backward_op.input_vars(): - var = scope.var(input) - var.get_tensor() - for output in backward_op.output_vars(): - var = scope.var(output) - var.get_tensor() - return backward_op - - -def create_or_get_tensor(scope, var_name, var, place): - tensor = scope.var(var_name).get_tensor() - if var is not None: - assert isinstance(var, np.ndarray) - tensor.set_lod([[]]) - tensor.set_dims(var.shape) - tensor.set(var, place) - return tensor - - -def set_output_grad(scope, outputs, place, feed_dict=None): - def __set_tensor__(name, data=None): - out_tensor = scope.find_var(name).get_tensor() - grad_tensor = scope.var(grad_var_name(name)).get_tensor() - out_dtype = out_tensor.dtype() - if data is None: - if out_dtype == core.VarDesc.VarType.FP64: - data = np.ones(out_tensor.shape(), dtype=np.float64) - elif out_dtype == core.VarDesc.VarType.FP32: - data = np.ones(out_tensor.shape(), dtype=np.float32) - else: - raise ValueError("Not supported data type " + str(out_dtype)) - grad_tensor.set(data, place) - - for output in outputs: - data = None - if output in feed_dict: - data = feed_dict[output] - __set_tensor__(output, data) - - -class TestLayerNormdOp(OpTest): +class TestLayerNormdOp(unittest.TestCase): def __assert_close(self, tensor, np_array, msg, atol=1e-4): self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) - def __assert_grad_close(self, - tensor, - np_array, - name, - place, - max_relative_error=0.02): - a = np.array(tensor) - b = np_array - abs_a = np.abs(a) - abs_a[abs_a < 1e-5] = 1 - - diff_mat = np.abs(a - b) / abs_a - max_diff = np.max(diff_mat) - - def err_msg(): - offset = np.argmax(diff_mat > max_relative_error) - return ("%s Variable %s max gradient diff %f over limit %f, " - "the first error element is %d, %f, %f") % ( - "Gradient Check On %s" % str(place), name, max_diff, - max_relative_error, offset, a.flatten()[offset], - b.flatten()[offset]) - - self.assertLessEqual(max_diff, max_relative_error, err_msg()) - def check_forward_backward(self, shape, begin_norm_axis): - def test_with_place(place, shape, begin_norm_axis=1): - # setUp - assert begin_norm_axis > 0 and begin_norm_axis < len( - shape), 'begin_norm_axis must be between 0 and len(shape)-1.' + def test_with_place(place, shape, begin_norm_axis): # attr epsilon = 0.00001 x_shape = shape D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) scale_shape = [D] - x_val = np.random.random_sample(x_shape).astype(np.float32) - scale_val = np.random.random_sample(scale_shape).astype(np.float32) - bias_val = np.random.random_sample(scale_shape).astype(np.float32) + np.random.seed(123) + x = np.random.random_sample(x_shape).astype(np.float32) + scale = np.random.random_sample(scale_shape).astype(np.float32) + bias = np.random.random_sample(scale_shape).astype(np.float32) y_grad = np.random.random_sample(x_shape).astype(np.float32) - # run forward - y_out, saved_mean, var_ref = _reference_layer_norm_naive( - x_val, scale_val, bias_val, epsilon, begin_norm_axis) - naive_fw = {"Y": y_out, "Mean": saved_mean, "Variance": var_ref} - - # get gradient - x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_layer_norm_grad( - x_val, y_grad, scale_val, saved_mean, var_ref, begin_norm_axis) - naive_grad = { - "X": x_grad_ref, - "Scale": scale_grad_ref, - "Bias": bias_grad_ref - } - - scope = core.Scope() - - # create input - input_map = {"X": x_val, "Scale": scale_val, "Bias": bias_val} - for i_name in input_map: - create_or_get_tensor(scope, i_name, input_map[i_name], place) - - # create output - output_map = {"Y": None, "Mean": None, "Variance": None} - output_tensor = {} - for o_name in output_map: - output_tensor[o_name] = create_or_get_tensor( - scope, o_name, output_map[o_name], place) - - layer_norm_op = Operator( - "layer_norm", - # inputs - X="X", - Scale="Scale", - Bias="Bias", - # outputs - Y="Y", - Mean="Mean", - Variance="Variance", - # attrs - epsilon=epsilon, - begin_norm_axis=begin_norm_axis) - - layer_norm_op.run(scope, place) - - # check forward result - atol = 5e-2 if isinstance(place, core.CUDAPlace) else 1e-4 - for o_tensor in output_tensor: - self.__assert_close(output_tensor[o_tensor], naive_fw[o_tensor], - o_tensor, atol) - - # run backward - layer_norm_op_grad = get_backward_op(scope, layer_norm_op, set()) - set_output_grad( - scope, ["Y", "Mean", "Variance"], - place, - feed_dict={"Y": y_grad}) - layer_norm_op_grad.run(scope, place) - - # get output - grad_tensor = {} - for o_name in naive_grad: - grad_tensor[o_name] = x_ = create_or_get_tensor( - scope, grad_var_name(o_name), None, place) - - # check gradient output - for o_grad in naive_grad: - self.__assert_grad_close(grad_tensor[o_grad], - naive_grad[o_grad], o_grad + "@GRAD", - place) + # reference forward & backward + y, mean, variance = _reference_layer_norm_naive( + x, scale, bias, epsilon, begin_norm_axis) + x_grad, scale_grad, bias_grad = _reference_layer_norm_grad( + x, y_grad, scale, mean, variance, begin_norm_axis) + + var_dict = locals() + var_dict['y@GRAD'] = y_grad + var_names = [ + 'x', 'scale', 'bias', 'mean', 'variance', 'y', 'y@GRAD' + ] + ground_truth = {name: var_dict[name] for name in var_names} + + program = fluid.Program() + with fluid.program_guard(program): + block = program.global_block() + for name in ground_truth: + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape) + layer_norm_op = block.append_op( + type="layer_norm", + inputs={ + "X": block.var('x'), + "Scale": block.var('scale'), + "Bias": block.var('bias'), + }, + outputs={ + "Y": block.var('y'), + "Mean": block.var('mean'), # share the same memory + "Variance": + block.var('variance'), # share the same memory + }, + attrs={ + "epsilon": epsilon, + "begin_norm_axis": begin_norm_axis + }) + + # generate backward op_desc + grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( + layer_norm_op.desc, set(), []) + grad_op_desc = grad_op_desc_list[0] + new_op_desc = block.desc.append_op() + new_op_desc.copy_from(grad_op_desc) + for var_name in grad_op_desc.output_arg_names(): + block.desc.var(var_name.encode("ascii")) + grad_op_desc.infer_var_type(block.desc) + grad_op_desc.infer_shape(block.desc) + for arg in grad_op_desc.output_arg_names(): + grad_var = block.desc.find_var(arg.encode("ascii")) + grad_var.set_dtype(core.VarDesc.VarType.FP32) + + exe = fluid.Executor(place) + out = exe.run(program, + feed={ + name: var_dict[name] + for name in ['x', 'scale', 'bias', 'y@GRAD'] + }, + fetch_list=[ + 'y', 'mean', 'variance', 'x@GRAD', + 'scale@GRAD', 'bias@GRAD' + ]) + self.__assert_close(y, out[0], "y") + self.__assert_close(mean, out[1], "mean") + self.__assert_close(variance, out[2], "variance", 1e-3) + self.__assert_close(x_grad, out[3], "x_grad") + self.__assert_close(scale_grad, out[4], "scale_grad", 1e-3) + self.__assert_close(bias_grad, out[5], "bias_grad") places = [core.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"): @@ -237,15 +167,6 @@ class TestLayerNormdOp(OpTest): self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=1) self.check_forward_backward(shape=[2, 3, 4, 5], begin_norm_axis=3) - def test_check_forward_backward_with_scale(self): - pass # TODO(zcd) - - def test_check_forward_backward_with_bias(self): - pass # TODO(zcd) - - def test_check_forward_backward(self): - pass # TODO(zcd) - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 2179826d81f715d6d280aea28a76f919330dd644..a1be2d671ddc5c689b16319fcf5bf12dca5dde7e 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -32,7 +32,6 @@ class TestBook(unittest.TestCase): cost = layers.square_error_cost(input=y_predict, label=y) avg_cost = layers.mean(cost) self.assertIsNotNone(avg_cost) - program.append_backward(avg_cost) print(str(program)) @@ -94,8 +93,6 @@ class TestBook(unittest.TestCase): cost = layers.cross_entropy(input=predict, label=label) avg_cost = layers.mean(cost) - program.append_backward(avg_cost) - print(str(program)) def test_word_embedding(self): @@ -343,6 +340,16 @@ class TestBook(unittest.TestCase): print(layers.lod_reset(x=x, y=y)) print(str(program)) + def test_label_smooth(self): + program = Program() + with program_guard(program): + label = layers.data(name="label", shape=[1], dtype="float32") + one_hot_label = layers.one_hot(input=label, depth=10) + smooth_label = layers.label_smooth( + label=one_hot_label, epsilon=0.1, dtype="float32") + self.assertIsNotNone(smooth_label) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_multiple_reader.py b/python/paddle/fluid/tests/unittests/test_multi_file_reader.py similarity index 91% rename from python/paddle/fluid/tests/unittests/test_multiple_reader.py rename to python/paddle/fluid/tests/unittests/test_multi_file_reader.py index a60a5d6c4af2b6b3652d0fe2089018b9403eee25..5dc41e54d6158787eb966333c894e378b5c706d0 100644 --- a/python/paddle/fluid/tests/unittests/test_multiple_reader.py +++ b/python/paddle/fluid/tests/unittests/test_multi_file_reader.py @@ -61,8 +61,12 @@ class TestMultipleReader(unittest.TestCase): exe.run(fluid.default_startup_program()) batch_count = 0 - while not data_files.eof(): - img_val, = exe.run(fetch_list=[img]) + while True: + try: + img_val, = exe.run(fetch_list=[img]) + except fluid.core.EnforceNotMet as ex: + self.assertIn("There is no next data.", ex.message) + break batch_count += 1 self.assertLessEqual(img_val.shape[0], self.batch_size) data_files.reset() diff --git a/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py b/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py index 0b7a29075939a548320185947b5afa7261029d49..1471843ded7a42432a84a9fad76bb97dcf7fb9c2 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py +++ b/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py @@ -44,7 +44,7 @@ class TestMultipleReader(unittest.TestCase): shapes=[(-1, 784), (-1, 1)], lod_levels=[0, 0], dtypes=['float32', 'int64']) - data_file = fluid.layers.create_multi_pass_reader( + data_file = fluid.layers.io.multi_pass( reader=data_file, pass_num=self.pass_num) img, label = fluid.layers.read_file(data_file) @@ -57,8 +57,12 @@ class TestMultipleReader(unittest.TestCase): exe.run(fluid.default_startup_program()) batch_count = 0 - while not data_file.eof(): - img_val, = exe.run(fetch_list=[img]) + while True: + try: + img_val, = exe.run(fetch_list=[img]) + except fluid.core.EnforceNotMet as ex: + self.assertIn("There is no next data.", ex.message) + break batch_count += 1 self.assertLessEqual(img_val.shape[0], self.batch_size) data_file.reset() diff --git a/python/paddle/fluid/tests/unittests/test_net.py b/python/paddle/fluid/tests/unittests/test_net.py deleted file mode 100644 index ae1699d647d7c0adab36200fb07bde12085053c1..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/test_net.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid.core as core -from paddle.fluid.op import Operator -import unittest - - -def fc(X, W, Y): - ret_v = core.Net.create() - - ret_v.append_op(Operator("mul", X="X", Y="W", Out="pre_activation")) - ret_v.append_op(Operator("sigmoid", X="pre_activation", Out=Y)) - ret_v.complete_add_op(True) - return ret_v - - -class TestNet(unittest.TestCase): - def test_net_all(self): - net = core.Net.create() - op1 = Operator("sum", X=["X", "Y"], Out="Out") - net.append_op(op1) - - net2 = core.Net.create() - net2.append_op(fc(X="X", W="w", Y="fc.out")) - net2.complete_add_op(True) - net.append_op(net2) - net.complete_add_op(True) - - expected = ''' -Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}. - Op(sum), inputs:{X[X, Y]}, outputs:{Out[Out]}. - Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. - Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. - Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. - Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Out[fc.out]}. -''' - self.assertEqual(expected, "\n" + str(net)) - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor.py b/python/paddle/fluid/tests/unittests/test_parallel_executor.py index 8401716db88ef3dda68644a052d78b4476c9fdc7..83d22fd799eea55eedb58f93421b275985edb50b 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor.py @@ -26,11 +26,14 @@ def simple_fc_net(use_feed): img = fluid.layers.data(name='image', shape=[784], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') else: - reader = fluid.layers.open_recordio_file( - filename='./mnist.recordio', + reader = fluid.layers.open_files( + filenames=['./mnist.recordio'], shapes=[[-1, 784], [-1, 1]], lod_levels=[0, 0], - dtypes=['float32', 'int64']) + dtypes=['float32', 'int64'], + thread_num=1, + for_parallel=True) + reader = fluid.layers.io.double_buffer(reader) img, label = fluid.layers.read_file(reader) hidden = img for _ in xrange(4): @@ -51,11 +54,14 @@ def fc_with_batchnorm(use_feed): img = fluid.layers.data(name='image', shape=[784], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') else: - reader = fluid.layers.open_recordio_file( - filename='./mnist.recordio', + reader = fluid.layers.open_files( + filenames=['mnist.recordio'], shapes=[[-1, 784], [-1, 1]], lod_levels=[0, 0], - dtypes=['float32', 'int64']) + dtypes=['float32', 'int64'], + thread_num=1, + for_parallel=True) + reader = fluid.layers.io.double_buffer(reader) img, label = fluid.layers.read_file(reader) hidden = img @@ -467,7 +473,7 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase): loss = simple_fc_net(True) test_program = main.clone(for_test=True) - opt = fluid.optimizer.SGD(learning_rate=0.0001) + opt = fluid.optimizer.SGD(learning_rate=0.001) opt.minimize(loss) batch_size = 32 @@ -494,4 +500,153 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase): train_loss, = train_exe.run([loss.name], feed_dict=feed_dict) train_loss = numpy.array(train_loss) - self.assertTrue(numpy.allclose(train_loss, test_loss)) + self.assertTrue( + numpy.allclose( + train_loss, test_loss, atol=1e-8), + "Train loss: " + str(train_loss) + "\n Test loss:" + + str(test_loss)) + + +import paddle.dataset.conll05 as conll05 +import paddle.fluid as fluid + +word_dict, verb_dict, label_dict = conll05.get_dict() +word_dict_len = len(word_dict) +label_dict_len = len(label_dict) +pred_dict_len = len(verb_dict) +mark_dict_len = 2 +word_dim = 32 +mark_dim = 5 +hidden_dim = 512 +depth = 8 +mix_hidden_lr = 1e-3 +embedding_name = 'emb' + + +def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, + **ignored): + # 8 features + predicate_embedding = fluid.layers.embedding( + input=predicate, + size=[pred_dict_len, word_dim], + dtype='float32', + param_attr='vemb') + + mark_embedding = fluid.layers.embedding( + input=mark, size=[mark_dict_len, mark_dim], dtype='float32') + + word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] + emb_layers = [ + fluid.layers.embedding( + size=[word_dict_len, word_dim], + input=x, + param_attr=fluid.ParamAttr( + name=embedding_name, trainable=False)) for x in word_input + ] + emb_layers.append(predicate_embedding) + emb_layers.append(mark_embedding) + + hidden_0_layers = [ + fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') + for emb in emb_layers + ] + + hidden_0 = fluid.layers.sums(input=hidden_0_layers) + + lstm_0 = fluid.layers.dynamic_lstm( + input=hidden_0, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid') + + # stack L-LSTM and R-LSTM with direct edges + input_tmp = [hidden_0, lstm_0] + + for i in range(1, depth): + mix_hidden = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') + ]) + + lstm = fluid.layers.dynamic_lstm( + input=mix_hidden, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + is_reverse=((i % 2) == 1)) + + input_tmp = [mix_hidden, lstm] + + feature_out = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') + ]) + + return feature_out + + +class TestCRFModel(unittest.TestCase): + def test_all(self): + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + word = fluid.layers.data( + name='word_data', shape=[1], dtype='int64', lod_level=1) + predicate = fluid.layers.data( + name='verb_data', shape=[1], dtype='int64', lod_level=1) + ctx_n2 = fluid.layers.data( + name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) + ctx_n1 = fluid.layers.data( + name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) + ctx_0 = fluid.layers.data( + name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) + ctx_p1 = fluid.layers.data( + name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) + ctx_p2 = fluid.layers.data( + name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) + mark = fluid.layers.data( + name='mark_data', shape=[1], dtype='int64', lod_level=1) + feature_out = db_lstm(**locals()) + target = fluid.layers.data( + name='target', shape=[1], dtype='int64', lod_level=1) + crf_cost = fluid.layers.linear_chain_crf( + input=feature_out, + label=target, + param_attr=fluid.ParamAttr( + name='crfw', learning_rate=1e-1)) + avg_cost = fluid.layers.mean(crf_cost) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=0.01, + decay_steps=100000, + decay_rate=0.5, + staircase=True)) + sgd_optimizer.minimize(avg_cost) + + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.conll05.test(), buf_size=8192), + batch_size=16) + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(startup) + + pe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name) + + feeder = fluid.DataFeeder( + feed_list=[ + word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, + mark, target + ], + place=fluid.CPUPlace()) + + data = train_data() + for i in xrange(10): + cur_batch = next(data) + print map(numpy.array, + pe.run(feed_dict=feeder.feed(cur_batch), + fetch_list=[avg_cost.name]))[0] diff --git a/python/paddle/fluid/tests/unittests/test_program.py b/python/paddle/fluid/tests/unittests/test_program.py index 87a2195f0d5c7fd355ea01a3c8f60908b33d4b9d..c51a48239330621d8e008415f81361616467cabf 100644 --- a/python/paddle/fluid/tests/unittests/test_program.py +++ b/python/paddle/fluid/tests/unittests/test_program.py @@ -87,57 +87,6 @@ class TestProgram(unittest.TestCase): print(prog) print(prog_restored) - def test_append_backward(self): - prog = Program() - block = prog.global_block() - - mul_x = block.create_var( - dtype="float32", shape=[5, 10], lod_level=0, name="mul.x") - mul_y = block.create_var( - dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") - mul_out = block.create_var( - dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") - mul_op = block.append_op( - type="mul", - inputs={"X": [mul_x], - "Y": mul_y}, - outputs={"Out": [mul_out]}, - attrs={"x_num_col_dims": 1}) - - add_y = block.create_var( - dtype="float32", shape=[5, 8], lod_level=0, name="add.y") - add_out = block.create_var( - dtype="float32", shape=[5, 8], lod_level=0, name="add.out") - add_op = block.append_op( - type="elementwise_add", - inputs={"X": mul_out, - "Y": add_y}, - outputs={"Out": add_out}, - attrs={"x_num_col_dims": 1}) - mean_out = block.create_var( - dtype="float32", shape=[1], lod_level=0, name="mean.out") - block.append_op( - type="mean", inputs={"X": add_out}, outputs={"Out": mean_out}) - - self.assertEqual(mul_op.idx, 0) - self.assertEqual(add_op.idx, 1) - param_to_grad = prog.append_backward(mean_out, set()) - - for var_name in ("mul.x", "mul.y", "mul.out", "add.y", "add.out", - "mean.out"): - self.assertEqual(param_to_grad[var_name][0], - grad_var_name(var_name)) - self.assertEqual(param_to_grad[var_name][1], 0) - - expect_ops = [ - "mul", "elementwise_add", "mean", "fill_constant", "mean_grad", - "elementwise_add_grad", "mul_grad" - ] - actual_ops = [] - for op in block.ops: - actual_ops.append(op.type) - self.assertEqual(actual_ops, expect_ops) - def test_program_clone_with_parameter(self): main_program = Program() startup_program = Program() diff --git a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py index f98a8bbc68a4315df3ae761f2e52b8f11cb620c6..3f9059fb5b31cd009c068ccddc9a8938adae5772 100644 --- a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py +++ b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py @@ -201,24 +201,6 @@ class TestBlockDesc(unittest.TestCase): op1.set_type("test") op2.set_type("test") - var0 = block.var("var0") - var1 = block.var("var1") - var2 = block.var("var2") - var3 = block.var("var3") - var4 = block.var("var4") - var5 = block.var("var5") - - op0.set_input("X", ["var0"]) - op0.set_output("Y", ["var0"]) - op1.set_input("X", ["var1", "var2"]) - op1.set_output("Y", ["var3", "var4"]) - op2.set_input("X", ["var1"]) - op2.set_output("Y", ["var4", "var5"]) - - program.sync_with_cpp() - - # remove op1, its input var2 and output var3 will be removed at the same time, - # but its input var1 and output var4 will not be removed since they are used for op2. block.remove_op(1, 2) program.sync_with_cpp() @@ -226,8 +208,6 @@ class TestBlockDesc(unittest.TestCase): for idx in xrange(0, block.op_size()): all_ops.append(block.op(idx)) self.assertEqual(all_ops, [op0, op2]) - all_vars = block.all_vars() - self.assertEqual(set(all_vars), {var0, var1, var4, var5}) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_recordio_reader.py b/python/paddle/fluid/tests/unittests/test_recordio_reader.py index 24a0074d9b9621d902d12eb8cb29d9b65be22ed3..7c8e7f634fdd3ee3f056a95df774402a7c29e906 100644 --- a/python/paddle/fluid/tests/unittests/test_recordio_reader.py +++ b/python/paddle/fluid/tests/unittests/test_recordio_reader.py @@ -65,8 +65,13 @@ class TestRecordIO(unittest.TestCase): # train a pass batch_id = 0 - while not data_file.eof(): - tmp, = exe.run(fetch_list=[avg_loss]) + while True: + try: + tmp, = exe.run(fetch_list=[avg_loss]) + except fluid.core.EnforceNotMet as ex: + self.assertIn("There is no next data.", ex.message) + break + avg_loss_np.append(tmp) batch_id += 1 data_file.reset() @@ -74,8 +79,8 @@ class TestRecordIO(unittest.TestCase): self.assertLess(avg_loss_np[-1], avg_loss_np[0]) def test_shuffle_reader(self): - self.test_main(decorator_callback=lambda reader: fluid.layers.create_shuffle_reader(reader, buffer_size=200)) + self.test_main(decorator_callback=lambda reader: fluid.layers.io.shuffle(reader, buffer_size=200)) def test_double_buffer_reader(self): - self.test_main(decorator_callback=lambda reader: fluid.layers.create_double_buffer_reader(reader, + self.test_main(decorator_callback=lambda reader: fluid.layers.io.double_buffer(reader, place='cuda:0' if fluid.core.is_compiled_with_cuda() else 'cpu')) diff --git a/python/paddle/fluid/tests/unittests/test_sequence_expand.py b/python/paddle/fluid/tests/unittests/test_sequence_expand.py index 7feb509c4d6f5768552fc2515081f7e68f420967..4c8ec1426c6e103498af544ea5928ec630707d46 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_expand.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_expand.py @@ -47,8 +47,10 @@ class TestSequenceExpand(OpTest): x_len = x_idx[i] - x_idx[i - 1] if repeat_num > 0: x_sub = x_data[x_idx[i - 1]:x_idx[i], :] - x_sub = np.repeat(x_sub, repeat_num, axis=0) - out = np.vstack((out, x_sub)) + stacked_x_sub = x_sub + for r in range(repeat_num - 1): + stacked_x_sub = np.vstack((stacked_x_sub, x_sub)) + out = np.vstack((out, stacked_x_sub)) if x_lod is not None: for j in xrange(repeat_num): out_lod[0].append(out_lod[0][-1] + x_len) @@ -101,11 +103,11 @@ class TestSequenceExpandCase3(TestSequenceExpand): class TestSequenceExpandCase4(TestSequenceExpand): def set_data(self): - data = [0.1, 0.3, 0.2, 0.15, 0.25, 0.2, 0.15, 0.25, 0.1, 0.3] + data = np.random.uniform(0.1, 1, [5 * 2, 1]) x_data = np.array(data).reshape([5, 2]).astype('float32') x_lod = [[0, 2, 5]] - y_data = np.random.uniform(0.1, 1, [2, 1]).astype('float32') - y_lod = [[0, 1, 2], [0, 1, 2]] + y_data = np.random.uniform(0.1, 1, [3, 1]).astype('float32') + y_lod = [[0, 1, 3], [0, 1, 3]] self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} diff --git a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py index 75ff85a55fc4fd504ecd032e17f7e189c17192fb..346a949b6e7c96b5535f5e65ddbada11e110a0a7 100644 --- a/python/paddle/fluid/tests/unittests/test_uniform_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_uniform_random_op.py @@ -15,6 +15,16 @@ import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core +from paddle.fluid.op import Operator + + +def output_hist(out): + hist, _ = np.histogram(out, range=(-5, 10)) + hist = hist.astype("float32") + hist /= float(out.size) + prob = 0.1 * np.ones((10)) + return hist, prob class TestUniformRandomOp(OpTest): @@ -33,11 +43,37 @@ class TestUniformRandomOp(OpTest): self.check_output_customized(self.verify_output) def verify_output(self, outs): - tensor = outs[0] - hist, _ = np.histogram(outs[0], range=(-5, 10)) - hist = hist.astype("float32") - hist /= float(outs[0].size) - prob = 0.1 * np.ones((10)) + hist, prob = output_hist(np.array(outs[0])) + self.assertTrue( + np.allclose( + hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) + + +class TestUniformRandomOpSelectedRows(unittest.TestCase): + def get_places(self): + places = [core.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(core.CUDAPlace(0)) + return places + + def test_check_output(self): + for place in self.get_places(): + self.check_with_place(place) + + def check_with_place(self, place): + scope = core.Scope() + out = scope.var("X").get_selected_rows() + + op = Operator( + "uniform_random", + Out="X", + shape=[4, 784], + min=-5.0, + max=10.0, + seed=10) + op.run(scope, place) + self.assertEqual(out.get_tensor().shape(), [4, 784]) + hist, prob = output_hist(np.array(out.get_tensor())) self.assertTrue( np.allclose( hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) diff --git a/python/paddle/v2/reader/__init__.py b/python/paddle/v2/reader/__init__.py index 3b059735a924d58714cd88a761eb83143f1192d6..12efdc4a0fec83fed57bdcbf687aaec69d13ba91 100644 --- a/python/paddle/v2/reader/__init__.py +++ b/python/paddle/v2/reader/__init__.py @@ -50,7 +50,7 @@ An example implementation for single item data reader creator: def reader(): while True: yield numpy.random.uniform(-1, 1, size=width*height) - return reader + return reader An example implementation for multiple item data reader creator: @@ -60,7 +60,7 @@ An example implementation for multiple item data reader creator: def reader(): while True: yield numpy.random.uniform(-1, 1, size=width*height), label - return reader + return reader TODO(yuyang18): Should we add whole design doc here? diff --git a/tools/aws_benchmarking/README.md b/tools/aws_benchmarking/README.md new file mode 100644 index 0000000000000000000000000000000000000000..837fcbb8512bce027ecd09a7f39b806151e9154b --- /dev/null +++ b/tools/aws_benchmarking/README.md @@ -0,0 +1,160 @@ +# AWS benchmark testing tool +This is an automation tool for deploying paddlepaddle benchmark testing to AWS. + +## Features + + - subnet creation to fit just the amount of ec2 instances required. + - pserver and trainer ec2 instances allocation, and instance state verification + - nvidia-docker ready for GPU training + - Instances and network element garbage collection when a task is accomplished or an error occurred + - Test log is collected in realtime + - Web service for checking log or tearing down the testing setup + - No testing code change needed + - Lots of optional configuration options + + ## Usages + + ### Prerequisites + + - You have a working AWS account + - You have [AWS Command Line Interface](https://aws.amazon.com/cli/) installed + - Your AWS cli is bind with a account which has `AmazonEC2FullAccess` permission, and it's set as default credential. + - You have key pair created and pem file downloaded. + - You have a default VPC in the region you want to run the test. + - You have a Security Group created for the VPC mentioned above, which allows port 22 and the port you want to expose your control web service (5436 by default) + - If your test is supposed to run in a GPU machine, especially a multi card GPU machine (p2, p3 series), you might need to contact amazon to raise the limit which allows no more than 1 GPU instance at a time. + + ### Start a benchmark test + +#### Create training image + +*What to expect in this step:* + +*You will have your training logic packed with paddle runtime in a docker image, and be able to be picked up by AWS instance for training.* + +Training python script and PaddlePaddle runtime are supposed to be packed into one docker image. Use PaddlePaddle production images as base image and create the training images with the docker file as follows: + +```Dockerfile +FROM paddlepaddle/paddle:latest-gpu + +ENV HOME /root +COPY ./ /root/ +WORKDIR /root +RUN pip install -r /root/requirements.txt +ENTRYPOINT ["python", "my_training.py"] +``` + +***Please Note*** +Training nodes will run your `ENTRYPOINT` script with the following environment variables: + + - `TASK_NAME`: unique name to identify this training process. + - `TRAINING_ROLE`: current node's role in this training process, either "PSERVER" or "TRAINER" + - `PSERVER_HOSTS`: comma separated value of pserver end points, I.E. "192.168.1.2:5436,192.168.1.3:5436" + - `PSERVERS`: same as above + - `TRAINERS`: trainer count + - `SERVER_ENDPOINT`: current server end point if the node role is a pserver + - `TRAINER_INDEX`: an integer to identify the index of current trainer if the node role is a trainer. + - `PADDLE_INIT_TRAINER_ID`: same as above + + Now we have a working distributed training script which takes advantage of node environment variables and docker file to generate the training image. Run the following command: + + ```bash + docker build -t myreponname/paddle_benchmark . + ``` + + Now you have the image built and tagged with `myreponame/paddle_benchmark`, let's push it to dockerhub so that it can be picked up by out AWS instance. + + ```bash + docker push myreponame/paddle_benchmark + ``` + +#### Create instances and start training + +*What to expect in this step* + +*you will be asked to provide some basic settings to config your training, and this tool will have your training started and monitored* + +Now let's start the training process: + +```bash +docker run -i -v $HOME/.aws:/root/.aws -v :/root/.pem \ +putcn/paddle_aws_client \ +--action create \ +--key_name \ +--security_group_id \ +--docker_image myreponame/paddle_benchmark \ +--pserver_count 2 \ +--trainer_count 2 +``` + +Now just wait until you see this: +``` +master server finished init process, visit http://XXX:XXX/status to check master log +``` +That means you can turn off your laptop and your cluster is creating instances, starting training process, collecting logs and eventually shut all pservers and trainers down when training is finished. + +#### Post creation operations + +To access the master log: + +```bash +docker run -i -v $HOME/.aws:/root/.aws \ +putcn/paddle_aws_client \ +--action status \ +--master_server_public_ip \ +--master_server_port +``` + +To tear down the training setup: + +```bash +docker run -i -v $HOME/.aws:/root/.aws \ +putcn/paddle_aws_client \ +--action cleanup \ +--master_server_public_ip \ +--master_server_port +``` + +To retrieve training logs +TBD + +### Tech details + +*What to expect in this step* + +*You will understand what is happening behind the scene, and how to check the training log, how to tear down the training on the fly, etc.* + +Let's understand what is happening under the hood when you run above command in your laptop + +![alt](diagram.png) + +There are 4 roles in the figure above: + - client: your laptop + - master: who tasks to aws api server to create/tear down instances, and monitor training process + - AWS api server: the one who actually creates and manages instances + - pservers and trainers: training instances + +When you run the `docker run` command above, what it actually does is to ask aws api service to create a subnet (step 1) and a master instance (step 2), and pass all the parameters the client collected or generated (step 3). The master is kept as minimum hardware config to keep the running cost low. + +Then when the master is up and running, it will ask the aws api server to create the heavy lifting training instances who are expensive to run (step 4). And the master will start training process as soon as they are done initializing (step 5). + +Meanwhile, the master will expose a web service for client to check training log or even tear the training setup down by a web service call. + +if you are creating the training with client docker container, and also monitoring your aws dashboard, you will initially see a instance tagged with `ROLE=MASTER` and `TASK_NAME=_master` starts, then you will see several instances tagged with `ROLE=PSERVER` and `ROLE=TRAINER` starts. +When the training is finished, pservers and trainers will be terminated. All their logs are kept in master node's docker env. + +Master exposes 4 major services: + + - GET `/status`: return master log + - GET `/logs`: return list of log file names + - GET `/log/`: return a particular log by log file name + - POST `/cleanup`: teardown the whole setup + + +### Parameters + +TBD, please refer to client/cluster_launcher.py for now + +### Trouble shooting + +TBD diff --git a/tools/aws_benchmarking/client/Dockerfile b/tools/aws_benchmarking/client/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..812c5d4bce0adff404577ce6b5fd3f0f4a91118c --- /dev/null +++ b/tools/aws_benchmarking/client/Dockerfile @@ -0,0 +1,7 @@ +FROM python:2.7.14-stretch + +ENV HOME /root +COPY ./ /root/ +WORKDIR /root +RUN pip install -r /root/requirements.txt +ENTRYPOINT ["python", "cluster_launcher.py"] \ No newline at end of file diff --git a/tools/aws_benchmarking/client/cluster_launcher.py b/tools/aws_benchmarking/client/cluster_launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..594378ff8fc0744a4b11b1c11e2e3b270be7aed0 --- /dev/null +++ b/tools/aws_benchmarking/client/cluster_launcher.py @@ -0,0 +1,407 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import time +import math +import logging +import copy + +import netaddr +import boto3 +import namesgenerator +import paramiko +from scp import SCPClient +import requests + + +def str2bool(v): + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + '--key_name', type=str, default="", help="required, key pair name") +parser.add_argument( + '--security_group_id', + type=str, + default="", + help="required, the security group id associated with your VPC") + +parser.add_argument( + '--vpc_id', + type=str, + default="", + help="The VPC in which you wish to run test") +parser.add_argument( + '--subnet_id', + type=str, + default="", + help="The Subnet_id in which you wish to run test") + +parser.add_argument( + '--pserver_instance_type', + type=str, + default="c5.2xlarge", + help="your pserver instance type, c5.2xlarge by default") +parser.add_argument( + '--trainer_instance_type', + type=str, + default="p2.8xlarge", + help="your trainer instance type, p2.8xlarge by default") + +parser.add_argument( + '--task_name', + type=str, + default="", + help="the name you want to identify your job") +parser.add_argument( + '--pserver_image_id', + type=str, + default="ami-da2c1cbf", + help="ami id for system image, default one has nvidia-docker ready, \ + use ami-1ae93962 for us-east-2") + +parser.add_argument( + '--pserver_command', type=str, default="", help="pserver start command") + +parser.add_argument( + '--trainer_image_id', + type=str, + default="ami-da2c1cbf", + help="ami id for system image, default one has nvidia-docker ready, \ + use ami-1ae93962 for us-west-2") + +parser.add_argument( + '--trainer_command', type=str, default="", help="trainer start command") + +parser.add_argument( + '--availability_zone', + type=str, + default="us-east-2a", + help="aws zone id to place ec2 instances") + +parser.add_argument( + '--trainer_count', type=int, default=1, help="Trainer count") + +parser.add_argument( + '--pserver_count', type=int, default=1, help="Pserver count") + +parser.add_argument( + '--action', type=str, default="create", help="create|cleanup|status") + +parser.add_argument('--pem_path', type=str, help="private key file") + +parser.add_argument( + '--pserver_port', type=str, default="5436", help="pserver port") + +parser.add_argument( + '--docker_image', type=str, default="busybox", help="training docker image") + +parser.add_argument( + '--master_server_port', type=int, default=5436, help="master server port") + +parser.add_argument( + '--master_server_public_ip', type=str, help="master server public ip") + +parser.add_argument( + '--master_docker_image', + type=str, + default="putcn/paddle_aws_master:latest", + help="master docker image id") + +parser.add_argument( + '--no_clean_up', + type=str2bool, + default=False, + help="whether to clean up after training") + +args = parser.parse_args() + +logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') + +ec2client = boto3.client('ec2') + + +def print_arguments(): + print('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +def create_subnet(): + # if no vpc id provided, list vpcs + logging.info("start creating subnet") + if not args.vpc_id: + logging.info("no vpc provided, trying to find the default one") + vpcs_desc = ec2client.describe_vpcs( + Filters=[{ + "Name": "isDefault", + "Values": ["true", ] + }], ) + if len(vpcs_desc["Vpcs"]) == 0: + raise ValueError('No default VPC') + args.vpc_id = vpcs_desc["Vpcs"][0]["VpcId"] + vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] + + logging.info("default vpc fount with id %s and CidrBlock %s" % + (args.vpc_id, vpc_cidrBlock)) + + if not vpc_cidrBlock: + logging.info("trying to find cidrblock for vpc") + vpcs_desc = ec2client.describe_vpcs( + Filters=[{ + "Name": "vpc-id", + "Values": [args.vpc_id, ], + }], ) + if len(vpcs_desc["Vpcs"]) == 0: + raise ValueError('No VPC found') + vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] + logging.info("cidrblock for vpc is %s" % vpc_cidrBlock) + + # list subnets in vpc in order to create a new one + + logging.info("trying to find ip blocks for new subnet") + subnets_desc = ec2client.describe_subnets( + Filters=[{ + "Name": "vpc-id", + "Values": [args.vpc_id, ], + }], ) + + ips_taken = [] + for subnet_dec in subnets_desc["Subnets"]: + ips_taken.append(subnet_dec["CidrBlock"]) + + ip_blocks_avaliable = netaddr.IPSet( + [vpc_cidrBlock]) ^ netaddr.IPSet(ips_taken) + # adding 10 addresses as buffer + cidr_prefix = 32 - math.ceil( + math.log(args.pserver_count + args.trainer_count + 10, 2)) + if cidr_prefix <= 16: + raise ValueError('Too many nodes to fit in current VPC') + + for ipnetwork in ip_blocks_avaliable.iter_cidrs(): + try: + subnet_cidr = ipnetwork.subnet(int(cidr_prefix)).next() + logging.info("subnet ip block found %s" % (subnet_cidr)) + break + except Exception: + pass + + if not subnet_cidr: + raise ValueError( + 'No avaliable subnet to fit required nodes in current VPC') + + logging.info("trying to create subnet") + subnet_desc = ec2client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=args.vpc_id, + AvailabilityZone=args.availability_zone) + + subnet_id = subnet_desc["Subnet"]["SubnetId"] + + subnet_waiter = ec2client.get_waiter('subnet_available') + # sleep for 1s before checking its state + time.sleep(1) + subnet_waiter.wait(SubnetIds=[subnet_id, ]) + + logging.info("subnet created") + + logging.info("adding tags to newly created subnet") + ec2client.create_tags( + Resources=[subnet_id, ], + Tags=[{ + "Key": "Task_name", + 'Value': args.task_name + }]) + return subnet_id + + +def run_instances(image_id, instance_type, count=1, role="MASTER", cmd=""): + response = ec2client.run_instances( + ImageId=image_id, + InstanceType=instance_type, + MaxCount=count, + MinCount=count, + UserData=cmd, + DryRun=False, + InstanceInitiatedShutdownBehavior="stop", + KeyName=args.key_name, + Placement={'AvailabilityZone': args.availability_zone}, + NetworkInterfaces=[{ + 'DeviceIndex': 0, + 'SubnetId': args.subnet_id, + "AssociatePublicIpAddress": True, + 'Groups': args.security_group_ids + }], + TagSpecifications=[{ + 'ResourceType': "instance", + 'Tags': [{ + "Key": 'Task_name', + "Value": args.task_name + "_master" + }, { + "Key": 'Role', + "Value": role + }] + }]) + + instance_ids = [] + for instance in response["Instances"]: + instance_ids.append(instance["InstanceId"]) + + if len(instance_ids) > 0: + logging.info(str(len(instance_ids)) + " instance(s) created") + else: + logging.info("no instance created") + #create waiter to make sure it's running + + logging.info("waiting for instance to become accessible") + waiter = ec2client.get_waiter('instance_status_ok') + waiter.wait( + Filters=[{ + "Name": "instance-status.status", + "Values": ["ok"] + }, { + "Name": "instance-status.reachability", + "Values": ["passed"] + }, { + "Name": "instance-state-name", + "Values": ["running"] + }], + InstanceIds=instance_ids) + + instances_response = ec2client.describe_instances(InstanceIds=instance_ids) + + return instances_response["Reservations"][0]["Instances"] + + +def generate_task_name(): + return namesgenerator.get_random_name() + + +def init_args(): + + if not args.task_name: + args.task_name = generate_task_name() + logging.info("task name generated %s" % (args.task_name)) + + if not args.pem_path: + args.pem_path = os.path.expanduser("~") + "/" + args.key_name + ".pem" + if args.security_group_id: + args.security_group_ids = (args.security_group_id, ) + + +def create(): + + init_args() + + # create subnet + if not args.subnet_id: + args.subnet_id = create_subnet() + + # create master node + + master_instance_response = run_instances( + image_id="ami-7a05351f", instance_type="t2.nano") + + logging.info("master server started") + + args.master_server_public_ip = master_instance_response[0][ + "PublicIpAddress"] + args.master_server_ip = master_instance_response[0]["PrivateIpAddress"] + + logging.info("master server started, master_ip=%s, task_name=%s" % + (args.master_server_public_ip, args.task_name)) + + # cp config file and pems to master node + + ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path) + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_client.connect( + hostname=args.master_server_public_ip, username="ubuntu", pkey=ssh_key) + + with SCPClient(ssh_client.get_transport()) as scp: + scp.put(os.path.expanduser("~") + "/" + ".aws", + recursive=True, + remote_path='/home/ubuntu/') + scp.put(args.pem_path, + remote_path='/home/ubuntu/' + args.key_name + ".pem") + + logging.info("credentials and pem copied to master") + + # set arguments and start docker + kick_off_cmd = "docker run -d -v /home/ubuntu/.aws:/root/.aws/" + kick_off_cmd += " -v /home/ubuntu/" + args.key_name + ".pem:/root/" + args.key_name + ".pem" + kick_off_cmd += " -v /home/ubuntu/logs/:/root/logs/" + kick_off_cmd += " -p " + str(args.master_server_port) + ":" + str( + args.master_server_port) + kick_off_cmd += " " + args.master_docker_image + + args_to_pass = copy.copy(args) + args_to_pass.action = "serve" + del args_to_pass.pem_path + del args_to_pass.security_group_ids + del args_to_pass.master_docker_image + del args_to_pass.master_server_public_ip + for arg, value in sorted(vars(args_to_pass).iteritems()): + if value: + kick_off_cmd += ' --%s %s' % (arg, value) + + logging.info(kick_off_cmd) + stdin, stdout, stderr = ssh_client.exec_command(command=kick_off_cmd) + return_code = stdout.channel.recv_exit_status() + logging.info(return_code) + if return_code != 0: + raise Exception("Error while kicking off master") + + logging.info( + "master server finished init process, visit %s to check master log" % + (get_master_web_url("/status"))) + + +def cleanup(): + print requests.post(get_master_web_url("/cleanup")).text + + +def status(): + print requests.post(get_master_web_url("/status")).text + + +def get_master_web_url(path): + return "http://" + args.master_server_public_ip + ":" + str( + args.master_server_port) + path + + +if __name__ == "__main__": + print_arguments() + if args.action == "create": + if not args.key_name or not args.security_group_id: + raise ValueError("key_name and security_group_id are required") + create() + elif args.action == "cleanup": + if not args.master_server_public_ip: + raise ValueError("master_server_public_ip is required") + cleanup() + elif args.action == "status": + if not args.master_server_public_ip: + raise ValueError("master_server_public_ip is required") + status() diff --git a/tools/aws_benchmarking/client/requirements.txt b/tools/aws_benchmarking/client/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..9454801f2025671cfd1a2c3b71cf4c2ac07cb8fb --- /dev/null +++ b/tools/aws_benchmarking/client/requirements.txt @@ -0,0 +1,6 @@ +netaddr==0.7.19 +boto3==1.6.21 +namesgenerator==0.3 +paramiko==2.4.1 +scp +requests diff --git a/tools/aws_benchmarking/diagram.png b/tools/aws_benchmarking/diagram.png new file mode 100644 index 0000000000000000000000000000000000000000..b97909c5fe78b59d0e636ff73c2ed3e63a0be722 Binary files /dev/null and b/tools/aws_benchmarking/diagram.png differ diff --git a/tools/aws_benchmarking/server/Dockerfile b/tools/aws_benchmarking/server/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..333523abcdb6fbe7dc01bbaf7d32ce1d8e866028 --- /dev/null +++ b/tools/aws_benchmarking/server/Dockerfile @@ -0,0 +1,7 @@ +FROM python:2.7.14-stretch + +ENV HOME /root +COPY ./ /root/ +WORKDIR /root +RUN pip install -r /root/requirements.txt +ENTRYPOINT ["python", "cluster_master.py"] \ No newline at end of file diff --git a/tools/aws_benchmarking/server/cluster_master.py b/tools/aws_benchmarking/server/cluster_master.py new file mode 100644 index 0000000000000000000000000000000000000000..21f85a5fc43e951897eb6b785367630abda722c0 --- /dev/null +++ b/tools/aws_benchmarking/server/cluster_master.py @@ -0,0 +1,673 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import json +import math +import time +import threading +import logging + +import netaddr +import boto3 +import namesgenerator +import paramiko + +from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer + + +# You must have aws_access_key_id, aws_secret_access_key, region set in +# ~/.aws/credentials and ~/.aws/config +def str2bool(v): + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + '--key_name', type=str, default="", help="required, key pair name") +parser.add_argument( + '--security_group_id', + type=str, + default="", + help="required, the security group id associated with your VPC") + +parser.add_argument( + '--vpc_id', + type=str, + default="", + help="The VPC in which you wish to run test") +parser.add_argument( + '--subnet_id', + type=str, + default="", + help="The Subnet_id in which you wish to run test") + +parser.add_argument( + '--pserver_instance_type', + type=str, + default="c5.2xlarge", + help="your pserver instance type, c5.2xlarge by default") +parser.add_argument( + '--trainer_instance_type', + type=str, + default="p2.8xlarge", + help="your trainer instance type, p2.8xlarge by default") + +parser.add_argument( + '--task_name', + type=str, + default="", + help="the name you want to identify your job") +parser.add_argument( + '--pserver_image_id', + type=str, + default="ami-da2c1cbf", + help="ami id for system image, default one has nvidia-docker ready, use ami-1ae93962 for us-east-2" +) +parser.add_argument( + '--trainer_image_id', + type=str, + default="ami-da2c1cbf", + help="ami id for system image, default one has nvidia-docker ready, use ami-1ae93962 for us-west-2" +) + +parser.add_argument( + '--availability_zone', + type=str, + default="us-east-2a", + help="aws zone id to place ec2 instances") + +parser.add_argument( + '--trainer_count', type=int, default=1, help="Trainer count") + +parser.add_argument( + '--pserver_count', type=int, default=1, help="Pserver count") + +parser.add_argument( + '--pserver_bash_file', + type=str, + default=os.path.join(os.path.dirname(__file__), "pserver.sh.template"), + help="pserver bash file path") + +parser.add_argument( + '--pserver_command', type=str, default="", help="pserver start command") + +parser.add_argument( + '--trainer_bash_file', + type=str, + default=os.path.join(os.path.dirname(__file__), "trainer.sh.template"), + help="trainer bash file path") + +parser.add_argument( + '--trainer_command', type=str, default="", help="trainer start command") + +parser.add_argument( + '--action', type=str, default="serve", help="create|cleanup|serve") + +parser.add_argument('--pem_path', type=str, help="private key file") + +parser.add_argument( + '--pserver_port', type=str, default="5436", help="pserver port") + +parser.add_argument( + '--docker_image', type=str, default="busybox", help="training docker image") + +parser.add_argument( + '--master_server_port', type=int, default=5436, help="master server port") + +parser.add_argument( + '--master_server_ip', type=str, default="", help="master server private ip") + +parser.add_argument( + '--no_clean_up', + type=str2bool, + default=False, + help="whether to clean up after training") + +args = parser.parse_args() + +ec2client = boto3.client('ec2') + +args.log_path = os.path.join(os.path.dirname(__file__), "logs/") + +logging.basicConfig( + filename=args.log_path + 'master.log', + level=logging.INFO, + format='%(asctime)s %(message)s') + +log_files = ["master.log"] + + +def create_subnet(): + # if no vpc id provided, list vpcs + logging.info("start creating subnet") + if not args.vpc_id: + logging.info("no vpc provided, trying to find the default one") + vpcs_desc = ec2client.describe_vpcs( + Filters=[{ + "Name": "isDefault", + "Values": ["true", ] + }], ) + if len(vpcs_desc["Vpcs"]) == 0: + raise ValueError('No default VPC') + args.vpc_id = vpcs_desc["Vpcs"][0]["VpcId"] + vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] + + logging.info("default vpc fount with id %s and CidrBlock %s" % + (args.vpc_id, vpc_cidrBlock)) + + if not vpc_cidrBlock: + logging.info("trying to find cidrblock for vpc") + vpcs_desc = ec2client.describe_vpcs( + Filters=[{ + "Name": "vpc-id", + "Values": [args.vpc_id, ], + }], ) + if len(vpcs_desc["Vpcs"]) == 0: + raise ValueError('No VPC found') + vpc_cidrBlock = vpcs_desc["Vpcs"][0]["CidrBlock"] + logging.info("cidrblock for vpc is %s" % vpc_cidrBlock) + + # list subnets in vpc in order to create a new one + + logging.info("trying to find ip blocks for new subnet") + subnets_desc = ec2client.describe_subnets( + Filters=[{ + "Name": "vpc-id", + "Values": [args.vpc_id, ], + }], ) + + ips_taken = [] + for subnet_dec in subnets_desc["Subnets"]: + ips_taken.append(subnet_dec["CidrBlock"]) + + ip_blocks_avaliable = netaddr.IPSet( + [vpc_cidrBlock]) ^ netaddr.IPSet(ips_taken) + # adding 10 addresses as buffer + cidr_prefix = 32 - math.ceil( + math.log(args.pserver_count + args.trainer_count + 10, 2)) + if cidr_prefix <= 16: + raise ValueError('Too many nodes to fit in current VPC') + + for ipnetwork in ip_blocks_avaliable.iter_cidrs(): + try: + subnet_cidr = ipnetwork.subnet(int(cidr_prefix)).next() + logging.info("subnet ip block found %s" % (subnet_cidr)) + break + except Exception: + pass + + if not subnet_cidr: + raise ValueError( + 'No avaliable subnet to fit required nodes in current VPC') + + logging.info("trying to create subnet") + subnet_desc = ec2client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=args.vpc_id, + AvailabilityZone=args.availability_zone) + + subnet_id = subnet_desc["Subnet"]["SubnetId"] + + subnet_waiter = ec2client.get_waiter('subnet_available') + # sleep for 1s before checking its state + time.sleep(1) + subnet_waiter.wait(SubnetIds=[subnet_id, ]) + + logging.info("subnet created") + + logging.info("adding tags to newly created subnet") + ec2client.create_tags( + Resources=[subnet_id, ], + Tags=[{ + "Key": "Task_name", + 'Value': args.task_name + }]) + return subnet_id + + +def generate_task_name(): + return namesgenerator.get_random_name() + + +def script_to_str(file_path): + if not file_path: + return "echo $PSERVER_HOSTS" + file = open(file_path, 'r') + text = file.read().strip() + file.close() + return text + + +def run_instances(image_id, instance_type, count, role, cmd=""): + response = ec2client.run_instances( + ImageId=image_id, + InstanceType=instance_type, + MaxCount=count, + MinCount=count, + UserData=cmd, + DryRun=False, + InstanceInitiatedShutdownBehavior="stop", + KeyName=args.key_name, + Placement={'AvailabilityZone': args.availability_zone}, + NetworkInterfaces=[{ + 'DeviceIndex': 0, + 'SubnetId': args.subnet_id, + "AssociatePublicIpAddress": True, + 'Groups': args.security_group_ids + }], + TagSpecifications=[{ + 'ResourceType': "instance", + 'Tags': [{ + "Key": 'Task_name', + "Value": args.task_name + }, { + "Key": 'Role', + "Value": role + }] + }]) + + instance_ids = [] + for instance in response["Instances"]: + instance_ids.append(instance["InstanceId"]) + + if len(instance_ids) > 0: + logging.info(str(len(instance_ids)) + " instance(s) created") + else: + logging.info("no instance created") + #create waiter to make sure it's running + + logging.info("waiting for instance to become accessible") + waiter = ec2client.get_waiter('instance_status_ok') + waiter.wait( + Filters=[{ + "Name": "instance-status.status", + "Values": ["ok"] + }, { + "Name": "instance-status.reachability", + "Values": ["passed"] + }, { + "Name": "instance-state-name", + "Values": ["running"] + }], + InstanceIds=instance_ids) + + instances_response = ec2client.describe_instances(InstanceIds=instance_ids) + + return instances_response["Reservations"][0]["Instances"] + + +def create_pservers(): + try: + return run_instances( + image_id=args.pserver_image_id, + instance_type=args.pserver_instance_type, + count=args.pserver_count, + role="PSERVER", ) + except Exception: + logging.exception("error while trying to create pservers") + cleanup(args.task_name) + + +def log_to_file(source, filename): + if not filename in log_files: + log_files.append(filename) + with open(args.log_path + filename, "a") as log_file: + for line in iter(source.readline, ""): + log_file.write(line) + + +def create_trainers(kickoff_cmd, pserver_endpoints_str): + def create_and_start_trainer(trainer_index): + logging.info("trainer " + str(trainer_index) + " is starting") + + instance_response = run_instances( + image_id=args.trainer_image_id, + instance_type=args.trainer_instance_type, + count=1, + role="TRAINER", )[0] + trainer_ip = instance_response["PrivateIpAddress"] + + logging.info("trainer " + str(trainer_index) + " started") + + ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path) + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_client.connect(hostname=trainer_ip, username="ubuntu", pkey=ssh_key) + + logging.info("trainer " + str(trainer_index) + + " terminal connected via ssh") + + cmd = kickoff_cmd.format( + PSERVER_HOSTS=pserver_endpoints_str, + DOCKER_IMAGE=args.docker_image, + TRAINER_INDEX=str(trainer_index), + TASK_NAME=args.task_name, + TRAINER_COUNT=args.trainer_count, + COMMAND=args.trainer_command, + MASTER_ENDPOINT=args.master_server_ip + ":" + + str(args.master_server_port)) + logging.info(cmd) + + stdin, stdout, stderr = ssh_client.exec_command(command=cmd) + + # read and save output log + + logging.info("trainer " + str(trainer_index) + + " command executed, keep fetching log") + + stdout_thread = threading.Thread( + target=log_to_file, + args=( + stdout, + "trainer_" + str(trainer_index) + ".log", )) + stderr_thread = threading.Thread( + target=log_to_file, + args=( + stderr, + "trainer_" + str(trainer_index) + "_err.log", )) + stdout_thread.start() + stderr_thread.start() + + stdout_thread.join() + stderr_thread.join() + + return_code = stdout.channel.recv_exit_status() + if return_code != 0: + trainer_create_results[trainer_index] = {'has_error': True} + raise ValueError("trainer didn't finish with exit code 0") + + ssh_client.close() + + # multi thread starting trainer instance and run kickoff command + + trainer_threads = [] + trainer_create_results = {} + try: + for i in xrange(args.trainer_count): + logging.info("starting tread for trainer " + str(i)) + trainer_thread = threading.Thread( + target=create_and_start_trainer, args=(i, )) + trainer_thread.start() + trainer_threads.append(trainer_thread) + + for trainer_thread in trainer_threads: + trainer_thread.join() + + for result in trainer_create_results: + if result["has_error"]: + logging.error( + "error during trainer starting or training, destorying the while cluster " + ) + cleanup(args.task_name) + break + + logging.info("all trainers stopped") + except Exception, e: + logging.info( + "Training exception, clean up resources, please check log for more info" + ) + finally: + cleanup(args.task_name) + + +def cleanup(task_name): + if args.no_clean_up: + logging.info("no clean up option set, going to leave the setup running") + return + #shutdown all ec2 instances + print("going to clean up " + task_name + " instances") + instances_response = ec2client.describe_instances(Filters=[{ + "Name": "tag:Task_name", + "Values": [task_name] + }]) + + instance_ids = [] + if len(instances_response["Reservations"]) > 0: + for reservation in instances_response["Reservations"]: + for instance in reservation["Instances"]: + instance_ids.append(instance["InstanceId"]) + + ec2client.terminate_instances(InstanceIds=instance_ids) + + instance_termination_waiter = ec2client.get_waiter( + 'instance_terminated') + instance_termination_waiter.wait(InstanceIds=instance_ids) + + #delete the subnet created + + subnet = ec2client.describe_subnets(Filters=[{ + "Name": "tag:Task_name", + "Values": [task_name] + }]) + + if len(subnet["Subnets"]) > 0: + ec2client.delete_subnet(SubnetId=subnet["Subnets"][0]["SubnetId"]) + # no subnet delete waiter, just leave it. + logging.info("Clearnup done") + return + + +def kickoff_pserver(host, pserver_endpoints_str): + try: + ssh_key = paramiko.RSAKey.from_private_key_file(args.pem_path) + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_client.connect(hostname=host, username="ubuntu", pkey=ssh_key) + cmd = (script_to_str(args.pserver_bash_file)).format( + PSERVER_HOSTS=pserver_endpoints_str, + DOCKER_IMAGE=args.docker_image, + PSERVER_PORT=args.pserver_port, + TASK_NAME=args.task_name, + COMMAND=args.pserver_command, + TRAINER_COUNT=args.trainer_count, + TRAINER_INDEX=0, + # there is no way to use 0.0.0.0:port to start pserver + # has to docker --network="host" with host ip to make this work + SERVER_ENDPOINT=host + ":" + str(args.pserver_port), + MASTER_ENDPOINT=args.master_server_ip + ":" + + str(args.master_server_port)) + logging.info(cmd) + stdin, stdout, stderr = ssh_client.exec_command(command=cmd) + + stdout_thread = threading.Thread( + target=log_to_file, args=( + stdout, + "pserver_" + host + ".log", )) + stderr_thread = threading.Thread( + target=log_to_file, args=( + stderr, + "pserver_" + host + "_err.log", )) + stdout_thread.start() + stderr_thread.start() + + stdout_thread.join() + stderr_thread.join() + + return_code = stdout.channel.recv_exit_status() + logging.info(return_code) + if return_code != 0: + raise Exception("Error while kicking off pserver training process") + except Exception: + logging.exception("Error while kicking off pserver training process") + cleanup(args.task_name) + finally: + ssh_client.close() + + +def init_args(): + + if not args.task_name: + args.task_name = generate_task_name() + logging.info("task name generated %s" % (args.task_name)) + + if not args.pem_path: + args.pem_path = os.path.expanduser("~") + "/" + args.key_name + ".pem" + if args.security_group_id: + args.security_group_ids = (args.security_group_id, ) + + args.trainers_job_done_count = 0 + + +def create_cluster(): + + if not args.subnet_id: + logging.info("creating subnet for this task") + args.subnet_id = create_subnet() + logging.info("subnet %s created" % (args.subnet_id)) + + logging.info("creating pservers") + pserver_create_response = create_pservers() + logging.info("pserver created, collecting pserver ips") + + pserver_endpoints = [] + for pserver in pserver_create_response: + pserver_endpoints.append(pserver["NetworkInterfaces"][0][ + "PrivateIpAddress"] + ":" + args.pserver_port) + + pserver_endpoints_str = ",".join(pserver_endpoints) + + logging.info("kicking off pserver training process") + pserver_threads = [] + for pserver in pserver_create_response: + pserver_thread = threading.Thread( + target=kickoff_pserver, + args=(pserver["PrivateIpAddress"], pserver_endpoints_str)) + pserver_thread.start() + pserver_threads.append(pserver_thread) + + logging.info("all pserver training process started") + + logging.info("creating trainers and kicking off trainer training process") + create_trainers( + kickoff_cmd=script_to_str(args.trainer_bash_file), + pserver_endpoints_str=pserver_endpoints_str) + + for pserver_thread in pserver_threads: + pserver_thread.join() + + logging.info("all process ended") + + +def start_server(args): + class S(BaseHTTPRequestHandler): + def _set_headers(self): + self.send_response(200) + self.send_header('Content-type', 'text/text') + self.end_headers() + + def do_HEAD(self): + self._set_headers() + + def do_404(self): + self.send_response(404) + self.send_header('Content-type', 'text/text') + self.end_headers() + logging.info("Received invalid GET request" + self.path) + self.wfile.write("NO ACTION FOUND") + + def do_GET(self): + + request_path = self.path + if request_path == "/status" or request_path == "/master_logs": + self._set_headers() + logging.info("Received request to return status") + with open(args.log_path + "master.log", "r") as logfile: + self.wfile.write(logfile.read().strip()) + elif request_path == "/list_logs" or request_path == "/logs": + self._set_headers() + self.wfile.write("\n".join(log_files)) + elif "/log/" in request_path: + self._set_headers() + log_file_path = request_path.replace("/log/", "") + logging.info("requesting log file path is" + args.log_path + + log_file_path) + with open(args.log_path + log_file_path, "r") as logfile: + self.wfile.write(logfile.read().strip()) + else: + self.do_404() + + def do_POST(self): + + request_path = self.path + + if request_path == "/save_data": + self._set_headers() + logging.info("Received request to save data") + self.wfile.write("DATA SAVED!") + content_length = int(self.headers['Content-Length']) + post_data = self.rfile.read(content_length) + if args.task_name: + with open(args.task_name + ".txt", "a") as text_file: + text_file.write(post_data + "\n") + + elif request_path == "/cleanup": + self._set_headers() + logging.info("Received request to cleanup cluster") + cleanup(args.task_name) + self.wfile.write("cleanup in progress") + + else: + self.do_404() + + server_address = ('', args.master_server_port) + httpd = HTTPServer(server_address, S) + logging.info("HTTP server is starting") + httpd.serve_forever() + + +def print_arguments(): + logging.info('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + logging.info('%s: %s' % (arg, value)) + logging.info('------------------------------------------------') + + +if __name__ == "__main__": + print_arguments() + if args.action == "create": + logging.info("going to create cluster") + if not args.key_name or not args.security_group_id: + raise ValueError("key_name and security_group_id are required") + init_args() + create_cluster() + elif args.action == "cleanup": + logging.info("going to cleanup cluster") + if not args.task_name: + raise ValueError("task_name is required") + cleanup(args.task_name) + elif args.action == "serve": + # serve mode + if not args.master_server_ip: + raise ValueError( + "No master server ip set, please run with --action create") + + logging.info("going to start serve and create cluster") + + init_args() + + logging.info("starting server in another thread") + server_thread = threading.Thread(target=start_server, args=(args, )) + server_thread.start() + + create_cluster() + server_thread.join() + elif args.action == "test": + start_server(args) diff --git a/tools/aws_benchmarking/server/logs/master.log b/tools/aws_benchmarking/server/logs/master.log new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tools/aws_benchmarking/server/pserver.sh.template b/tools/aws_benchmarking/server/pserver.sh.template new file mode 100644 index 0000000000000000000000000000000000000000..2612856d1e6273fe2642f82e8c616eb9ff24f8a4 --- /dev/null +++ b/tools/aws_benchmarking/server/pserver.sh.template @@ -0,0 +1,2 @@ +#!/bin/bash +docker run --network="host" -i -e "SERVER_ENDPOINT={SERVER_ENDPOINT}" -e "MASTER_ENDPOINT={MASTER_ENDPOINT}" -e "TASK_NAME={TASK_NAME}" -e "TRAINER_INDEX={TRAINER_INDEX}" -e "TRAINING_ROLE=PSERVER" -e "TRAINER_COUNT={TRAINER_COUNT}" -e "TRAINERS={TRAINER_COUNT}" -e "PSERVER_HOSTS={PSERVER_HOSTS}" -e "PSERVERS={PSERVER_HOSTS}" {DOCKER_IMAGE} {COMMAND} --device CPU \ No newline at end of file diff --git a/tools/aws_benchmarking/server/requirements.txt b/tools/aws_benchmarking/server/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5c523854f28b0a6f024fba2b2f344b53ba967a2f --- /dev/null +++ b/tools/aws_benchmarking/server/requirements.txt @@ -0,0 +1,4 @@ +netaddr==0.7.19 +boto3==1.6.21 +namesgenerator==0.3 +paramiko==2.4.1 diff --git a/tools/aws_benchmarking/server/trainer.sh.template b/tools/aws_benchmarking/server/trainer.sh.template new file mode 100644 index 0000000000000000000000000000000000000000..a4b2876b08cdf05e90e50589f897d74ca5f90443 --- /dev/null +++ b/tools/aws_benchmarking/server/trainer.sh.template @@ -0,0 +1,2 @@ +#!/bin/bash +nvidia-docker run --network="host" -i -e "MASTER_ENDPOINT={MASTER_ENDPOINT}" -e "TASK_NAME={TASK_NAME}" -e "TRAINER_COUNT={TRAINER_COUNT}" -e "TRAINERS={TRAINER_COUNT}" -e "TRAINER_INDEX={TRAINER_INDEX}" -e "PADDLE_INIT_TRAINER_ID={TRAINER_INDEX}" -e "TRAINING_ROLE=TRAINER" -e "PSERVER_HOSTS={PSERVER_HOSTS}" -e "PSERVERS={PSERVER_HOSTS}" {DOCKER_IMAGE} {COMMAND} --device GPU \ No newline at end of file