diff --git a/CMakeLists.txt b/CMakeLists.txt index c86889c05c8cf0d521dce9adbf3e918ba91729a1..1e11f86d0ee836f65e69c8398fb26c3b6a1070f6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -36,6 +36,7 @@ include(simd) ################################ Configurations ####################################### option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND}) +option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF) option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND}) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) @@ -53,7 +54,7 @@ option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) option(ON_TRAVIS "Exclude special unit test on Travis CI" OFF) option(WITH_C_API "Compile PaddlePaddle with C-API(Prediction)" OFF) # TODO: Only compile PaddlePaddle fluid version by WITH_FLUID option. -option(WITH_FLUID "Compile PaddlePaddle fluid only(TODO)" ON) +option(WITH_FLUID "Compile PaddlePaddle fluid only(TODO)" OFF) option(WITH_GOLANG "Compile PaddlePaddle with GOLANG" OFF) option(GLIDE_INSTALL "Download and install go dependencies " ON) option(USE_NNPACK "Compile PaddlePaddle with NNPACK library" OFF) @@ -180,6 +181,11 @@ if(WITH_GPU) include(cuda) endif(WITH_GPU) +if(WITH_AMD_GPU) + find_package(HIP) + include(hip) +endif(WITH_AMD_GPU) + if(WITH_MKLML) list(APPEND EXTERNAL_LIBS ${MKLML_IOMP_LIB}) endif() diff --git a/Dockerfile b/Dockerfile index 60e76c7f2ede6beaca11659020d5991a75d5b741..fbec88c7966d6ea93495519843d6cda63f622661 100644 --- a/Dockerfile +++ b/Dockerfile @@ -53,10 +53,14 @@ RUN localedef -i en_US -f UTF-8 en_US.UTF-8 # FIXME: due to temporary ipykernel dependency issue, specify ipykernel jupyter # version util jupyter fixes this issue. + +# specify sphinx version as 1.5.6 and remove -U option for [pip install -U +# sphinx-rtd-theme] since -U option will cause sphinx being updated to newest +# version(1.7.1 for now), which causes building documentation failed. RUN pip install --upgrade pip && \ pip install -U wheel && \ - pip install -U docopt PyYAML sphinx && \ - pip install -U sphinx-rtd-theme==0.1.9 recommonmark + pip install -U docopt PyYAML sphinx==1.5.6 && \ + pip install sphinx-rtd-theme==0.1.9 recommonmark RUN pip install pre-commit 'ipython==5.3.0' && \ pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \ diff --git a/benchmark/cluster/vgg16/vgg16_fluid.py b/benchmark/cluster/vgg16/vgg16_fluid.py index 786f224608f7d41c438411de0e09fedbcf2264b8..8b29227cfab2a36d5b9f6d17b837b33da8d2a92e 100644 --- a/benchmark/cluster/vgg16/vgg16_fluid.py +++ b/benchmark/cluster/vgg16/vgg16_fluid.py @@ -18,12 +18,13 @@ import sys import time import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid as fluid -import paddle.v2.fluid.core as core -import paddle.v2.fluid.profiler as profiler +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.profiler as profiler import argparse import functools import os +from paddle.fluid import debuger def str2bool(v): @@ -182,28 +183,27 @@ def main(): start_time = time.time() num_samples = 0 train_pass_acc.reset() - with profiler.profiler("CPU", 'total') as prof: - for batch_id, data in enumerate(train_reader()): - ts = time.time() - img_data = np.array( - map(lambda x: x[0].reshape(data_shape), data)).astype( - "float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = y_data.reshape([-1, 1]) - - loss, acc, b_size = exe.run( - trainer_prog, - feed={"pixel": img_data, - "label": y_data}, - fetch_list=[avg_cost, batch_acc, batch_size]) - iters += 1 - num_samples += len(data) - train_pass_acc.add(value=acc, weight=b_size) - print( - "Pass = %d, Iters = %d, Loss = %f, Accuracy = %f, Speed = %.2f img/s" - % (pass_id, iters, loss, acc, - len(data) / (time.time() - ts)) - ) # The accuracy is the accumulation of batches, but not the current batch. + for batch_id, data in enumerate(train_reader()): + ts = time.time() + img_data = np.array( + map(lambda x: x[0].reshape(data_shape), data)).astype( + "float32") + y_data = np.array(map(lambda x: x[1], data)).astype("int64") + y_data = y_data.reshape([-1, 1]) + + loss, acc, b_size = exe.run( + trainer_prog, + feed={"pixel": img_data, + "label": y_data}, + fetch_list=[avg_cost, batch_acc, batch_size]) + iters += 1 + num_samples += len(data) + train_pass_acc.add(value=acc, weight=b_size) + print( + "Pass = %d, Iters = %d, Loss = %f, Accuracy = %f, Speed = %.2f img/s" + % (pass_id, iters, loss, acc, + len(data) / (time.time() - ts)) + ) # The accuracy is the accumulation of batches, but not the current batch. pass_elapsed = time.time() - start_time pass_train_acc = train_pass_acc.eval() @@ -254,9 +254,7 @@ def main(): pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) - print("starting server side startup") exe.run(pserver_startup) - print("starting parameter server...") exe.run(pserver_prog) elif training_role == "TRAINER": # Parameter initialization diff --git a/benchmark/cluster/vgg16/vgg16_tf.py b/benchmark/cluster/vgg16/vgg16_tf.py index 996df0e314b867ea8de618dfd3977f490fbe8372..2d220478acae46566760209dbc012cff316946aa 100644 --- a/benchmark/cluster/vgg16/vgg16_tf.py +++ b/benchmark/cluster/vgg16/vgg16_tf.py @@ -292,14 +292,18 @@ def run_benchmark(cluster_spec, server): return np.mean(test_accs) config = tf.ConfigProto( - intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) + intra_op_parallelism_threads=1, + inter_op_parallelism_threads=1, + log_device_placement=True) config.gpu_options.allow_growth = True hooks = [tf.train.StopAtStepHook(last_step=1000000)] with tf.train.MonitoredTrainingSession( - master=server.target, is_chief=(args.task_index == 0), - hooks=hooks) as sess: + master=server.target, + is_chief=(args.task_index == 0), + hooks=hooks, + config=config) as sess: iters, num_samples, start_time = 0, 0, 0.0 for pass_id in range(args.num_passes): # train diff --git a/cmake/configure.cmake b/cmake/configure.cmake index 0f76f55270592c5625a9624b33f4c0f82efdc627..f726405c4773994f6ca6509e5218750805b03995 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -57,11 +57,7 @@ if(NOT WITH_GOLANG) add_definitions(-DPADDLE_WITHOUT_GOLANG) endif(NOT WITH_GOLANG) -if(NOT WITH_GPU) - add_definitions(-DHPPL_STUB_FUNC) - - list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) -else() +if(WITH_GPU) add_definitions(-DPADDLE_WITH_CUDA) FIND_PACKAGE(CUDA REQUIRED) @@ -84,7 +80,14 @@ else() # Include cuda and cudnn include_directories(${CUDNN_INCLUDE_DIR}) include_directories(${CUDA_TOOLKIT_INCLUDE}) -endif(NOT WITH_GPU) +elseif(WITH_AMD_GPU) + add_definitions(-DPADDLE_WITH_HIP) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__HIP_PLATFORM_HCC__") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__HIP_PLATFORM_HCC__") +else() + add_definitions(-DHPPL_STUB_FUNC) + list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) +endif() if (WITH_MKLML AND MKLML_IOMP_LIB) message(STATUS "Enable Intel OpenMP with ${MKLML_IOMP_LIB}") diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index d9cd264b49d546c35a2c57a82ead83ea654b60ae..10662fc96704685f030a5d76c6857d4bc20a63d9 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -24,7 +24,7 @@ set(BOOST_PROJECT "extern_boost") # So we use 1.41.0 here. set(BOOST_VER "1.41.0") set(BOOST_TAR "boost_1_41_0") -set(BOOST_URL "http://paddlepaddledeps.s3-website-us-west-1.amazonaws.com/${BOOST_TAR}.tar.gz") +set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}/${BOOST_TAR}" CACHE PATH "boost include directory." FORCE) diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake index 6a701e076c95372f903a09d35d4208ee73bd584c..73d70c34dce8bedd9e62519c207e5be3dcf7dba3 100644 --- a/cmake/external/eigen.cmake +++ b/cmake/external/eigen.cmake @@ -4,18 +4,33 @@ SET(EIGEN_SOURCE_DIR ${THIRD_PARTY_PATH}/eigen3) SET(EIGEN_INCLUDE_DIR ${EIGEN_SOURCE_DIR}/src/extern_eigen3) INCLUDE_DIRECTORIES(${EIGEN_INCLUDE_DIR}) -ExternalProject_Add( - extern_eigen3 - ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY "https://github.com/RLovelett/eigen.git" - GIT_TAG 70661066beef694cadf6c304d0d07e0758825c10 - PREFIX ${EIGEN_SOURCE_DIR} - UPDATE_COMMAND "" - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - TEST_COMMAND "" -) +if(WITH_AMD_GPU) + ExternalProject_Add( + extern_eigen3 + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/sabreshao/hipeigen.git" + GIT_TAG 0cba03ff9f8f9f70bbd92ac5857b031aa8fed6f9 + PREFIX ${EIGEN_SOURCE_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) +else() + ExternalProject_Add( + extern_eigen3 + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/RLovelett/eigen.git" + GIT_TAG 70661066beef694cadf6c304d0d07e0758825c10 + PREFIX ${EIGEN_SOURCE_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) +endif() if (${CMAKE_VERSION} VERSION_LESS "3.3.0") set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/eigen3_dummy.c) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 471e3929069d0d28105404b4f0f6baa303faf0e0..c749c97f13649fe8432091414b56f7d0ea8ace8b 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -317,6 +317,82 @@ function(nv_test TARGET_NAME) endif() endfunction(nv_test) +function(hip_library TARGET_NAME) + if (WITH_AMD_GPU) + set(options STATIC static SHARED shared) + set(oneValueArgs "") + set(multiValueArgs SRCS DEPS) + cmake_parse_arguments(hip_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + set(_sources ${hip_library_SRCS}) + HIP_PREPARE_TARGET_COMMANDS(${TARGET_NAME} OBJ _generated_files _source_files ${_sources} HIPCC_OPTIONS ${_hipcc_options} HCC_OPTIONS ${_hcc_options} NVCC_OPTIONS ${_nvcc_options}) + if(_source_files) + list(REMOVE_ITEM _sources ${_source_files}) + endif() + if(hip_library_SRCS) + if (hip_library_SHARED OR hip_library_shared) # build *.so + add_library(${TARGET_NAME} SHARED ${_cmake_options} ${_generated_files} ${_sources}) + set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE HIP) + else() + add_library(${TARGET_NAME} STATIC ${_cmake_options} ${_generated_files} ${_sources}) + set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE CXX) + target_link_libraries(${TARGET_NAME} /opt/rocm/hip/lib/libhip_hcc.so /opt/rocm/hip/lib/libhip_device.a) + find_fluid_modules(${TARGET_NAME}) + endif() + if (hip_library_DEPS) + add_dependencies(${TARGET_NAME} ${hip_library_DEPS}) + target_link_libraries(${TARGET_NAME} ${hip_library_DEPS}) + endif() + # cpplint code style + foreach(source_file ${hip_library_SRCS}) + string(REGEX REPLACE "\\.[^.]*$" "" source ${source_file}) + if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h) + list(APPEND hip_library_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h) + endif() + endforeach() + add_style_check_target(${TARGET_NAME} ${hip_library_SRCS} ${hip_library_HEADERS}) + else(hip_library_SRCS) + if (hip_library_DEPS) + merge_static_libs(${TARGET_NAME} ${hip_library_DEPS}) + else() + message(FATAL "Please specify source file or library in nv_library.") + endif() + endif(hip_library_SRCS) + endif() +endfunction(hip_library) + +function(hip_binary TARGET_NAME) + if (WITH_AMD_GPU) + set(options "") + set(oneValueArgs "") + set(multiValueArgs SRCS DEPS) + cmake_parse_arguments(hip_binary "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + hip_add_executable(${TARGET_NAME} ${hip_binary_SRCS}) + if(hip_binary_DEPS) + target_link_libraries(${TARGET_NAME} ${hip_binary_DEPS}) + add_dependencies(${TARGET_NAME} ${hip_binary_DEPS}) + endif() + endif() +endfunction(hip_binary) + +function(hip_test TARGET_NAME) + if (WITH_AMD_GPU AND WITH_TESTING) + set(options "") + set(oneValueArgs "") + set(multiValueArgs SRCS DEPS) + cmake_parse_arguments(hip_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + set(_sources ${hip_test_SRCS}) + HIP_PREPARE_TARGET_COMMANDS(${TARGET_NAME} OBJ _generated_files _source_files ${_sources} HIPCC_OPTIONS ${_hipcc_options} HCC_OPTIONS ${_hcc_options} NVCC_OPTIONS ${_nvcc_options}) + if(_source_files) + list(REMOVE_ITEM _sources ${_source_files}) + endif() + add_executable(${TARGET_NAME} ${_cmake_options} ${_generated_files} ${_sources}) + set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE HIP) + target_link_libraries(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) + add_dependencies(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) + add_test(${TARGET_NAME} ${TARGET_NAME}) + endif() +endfunction(hip_test) + function(go_library TARGET_NAME) set(options STATIC static SHARED shared) set(oneValueArgs "") diff --git a/cmake/hip.cmake b/cmake/hip.cmake new file mode 100644 index 0000000000000000000000000000000000000000..bfe491bd6b7602959d3dd60bd06c67993593cc9b --- /dev/null +++ b/cmake/hip.cmake @@ -0,0 +1,43 @@ +if(NOT WITH_AMD_GPU) + return() +endif() + +include_directories("/opt/rocm/include") +include_directories("/opt/rocm/hipblas/include") +include_directories("/opt/rocm/hiprand/include") +include_directories("/opt/rocm/rocrand/include") +include_directories("/opt/rocm/rccl/include") +include_directories("/opt/rocm/thrust") + +list(APPEND EXTERNAL_LIBS "-L/opt/rocm/lib/ -lhip_hcc") + +set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -fPIC -DPADDLE_WITH_HIP -std=c++14" ) + +if(WITH_DSO) + set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_USE_DSO") +endif(WITH_DSO) + +if(WITH_DOUBLE) + set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_TYPE_DOUBLE") +endif(WITH_DOUBLE) + +if(WITH_TESTING) + set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_WITH_TESTING") +endif(WITH_TESTING) + +if(CMAKE_BUILD_TYPE STREQUAL "Debug") + list(APPEND HIP_HCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG}) +elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") + list(APPEND HIP_HCC_FLAGS ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}) +elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel") + list(APPEND HIP_HCC_FLAGS ${CMAKE_CXX_FLAGS_MINSIZEREL}) +endif() + +if("x${HCC_HOME}" STREQUAL "x") + set(HCC_HOME "/opt/rocm/hcc") +endif() + +set(CMAKE_HIP_LINK_EXECUTABLE "${HIP_HIPCC_CMAKE_LINKER_HELPER} ${HCC_HOME} -o ") +set(CMAKE_HIP_CREATE_SHARED_LIBRARY "${HIP_HIPCC_CMAKE_LINKER_HELPER} ${HCC_HOME} -o -shared") +set(CMAKE_HIP_CREATE_SHARED_MODULE "${HIP_HIPCC_CMAKE_LINKER_HELPER} ${HCC_HOME} -o -shared") + diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index da67701ec1af57df742dce105990cffa40f45d7c..a9b27933a5307aabeaf150aeb859e869197229f5 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -1 +1,2 @@ add_subdirectory(v2) +add_subdirectory(fluid) diff --git a/doc/design/cpp_data_feeding.md b/doc/design/cpp_data_feeding.md deleted file mode 100644 index 22c2a925eb8c5e1dd8451e1d3cba261ce471ec51..0000000000000000000000000000000000000000 --- a/doc/design/cpp_data_feeding.md +++ /dev/null @@ -1,78 +0,0 @@ -# C++ Data Feeding - -In training with Paddle V2 API, data feeding wholly dependents on Python code. To get rid of the Python environment and achieve the goal of "wrapping the whole training by a while loop op" in Paddle Fluid, a C++ data feeding mechanism is required. - -In this document we show the fundamental design of C++ data feeding process, which includes the data reading, shuffling and batching. - -## Reader - -A new concept named 'Reader' is introduced. `Reader` is a series of inherited classes which can be hold by our `Variable` and they are used to read or process file data. - - -### `ReaderBase` - -`ReaderBase` is the abstract base class of all readers. It defines the all readers' interfaces. - -```cpp -class ReaderBase { - public: - explicit ReaderBase(const std::vector& shapes) : shapes_(shapes) { - PADDLE_ENFORCE(!shapes_.empty()); - } - // Read the next batch of data. (A 'batch' can be only one instance) - // If the next batch doesn't exist, the '*out' will be an empty std::vector. - virtual void ReadNext(std::vector* out) = 0; - - // Reinitialize the reader and read the file from the begin. - virtual void ReInit() = 0; - - // Get a certain read in data's shape. - DDim shape(size_t idx) const; - // Get shapes of all read in data. - std::vector shapes() const { return shapes_; } - // Set shapes of read in data. - void set_shapes(const std::vector& shapes) { shapes_ = shapes; } - - virtual ~ReaderBase() {} - - protected: - std::vector shapes_; -}; -``` - -### `FileReader` and `DecoratedReader` - -These two classes are derived from the `ReaderBase` and will further be derived by respective specific readers. That is to say, in our design, there are two kinds of readers: file readers and decorated readers. A file reader reads from a file of some specific format, and yield only one instance of data at a time. e.g. RecordIO reader, jpg reader, .... A decorated reader takes another reader(both file reader and decorated reader are OK) as its 'underlying reader'. It gets data from its underlying reader, does some process on them(shuffling, or batching), then yields processed data. The output data of a decorated reader can be a single instance or a batch. `ShuffleReader` and `BatchReader` are both decorated readers. - -All the readers share exactly the same interfaces defined in `ReaderBase`. So they can be decorated for more than one time: We can **shuffle** a reader's outputs and then **batch** the shuffle outputs. The interface consistency also allows related ops use readers without knowing what they are exactly. - - -### `ReaderHolder` - -Different readers belong to different class types. It leads to a problem: How can we drop them into `Variable`s and fetch them out by a unified method? For example, if a Variable holds a `BatchReader`, we can not get it by the following code: - -```cpp -var->Get("batch_reader"); -``` - -we have to write: - -```cpp -var->Get("batch_reader"); -``` - -This requires each time getting a reader from a variable we must know the reader's type exactly. It is nearly impossible. - -To solve this problem, we introduce `ReaderHolder` as a wrapper. It acts as an empty decorator of `ReaderBase`, which erases reader's type. With `ReaderHolder` we are able to fetch all types of readers by `var->Get("...")` and regard the obtained object as a reader. - -## Related Operators - -To create and invoke readers, some now ops are introduced: - -### `CreateReaderOp` - -Each reader has its creating op. File readers' creating ops have no input and yield the created file reader as its output. Decorated readers' creating ops take the underlying readers as inputs and then yield new decorated readers. - -### `ReadOp` - -A reader is only a Variable. It cannot trigger the reading process by itself. So we add the `ReadOp` to execute it. A `ReadOp` takes a reader Variable as its input. Each time it runs, it invokes the reader‘s `ReadNext()` function and gets a new batch of data(or only one instance of data, if we use file reader directly). The output data of a reader are in the form of `std::vector`, so the `ReadOp` also needs to split the vector and move LoDTensors to their respective output Variables. diff --git a/doc/design/images/duplicate_op.graffle b/doc/design/images/duplicate_op.graffle deleted file mode 100644 index 5979f792e252f028a615729215529c2be42d9165..0000000000000000000000000000000000000000 Binary files a/doc/design/images/duplicate_op.graffle and /dev/null differ diff --git a/doc/design/images/duplicate_op.png b/doc/design/images/duplicate_op.png deleted file mode 100644 index f299c5d37f260a1bb0daec886f0a4ee1c1f31c92..0000000000000000000000000000000000000000 Binary files a/doc/design/images/duplicate_op.png and /dev/null differ diff --git a/doc/design/images/duplicate_op2.graffle b/doc/design/images/duplicate_op2.graffle deleted file mode 100644 index 5cec3bc64dbd44dc99e348485969f29bd128ceb1..0000000000000000000000000000000000000000 Binary files a/doc/design/images/duplicate_op2.graffle and /dev/null differ diff --git a/doc/design/images/duplicate_op2.png b/doc/design/images/duplicate_op2.png deleted file mode 100644 index 21cdd5cabf1b5203e1435a75b57770d2f702fa92..0000000000000000000000000000000000000000 Binary files a/doc/design/images/duplicate_op2.png and /dev/null differ diff --git a/doc/design/images/replica.png b/doc/design/images/replica.png deleted file mode 100644 index ef59e56b01d792a059279e6bb9a29f3db6a59a41..0000000000000000000000000000000000000000 Binary files a/doc/design/images/replica.png and /dev/null differ diff --git a/doc/design/images/two_phase_commit.png b/doc/design/images/two_phase_commit.png deleted file mode 100644 index ef6f7317bd440cc7d9fe08fcbbf2b7a542f99049..0000000000000000000000000000000000000000 Binary files a/doc/design/images/two_phase_commit.png and /dev/null differ diff --git a/doc/fluid/CMakeLists.txt b/doc/fluid/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..cc999f5a8d70a2239ea3b130e9da172d5f681c65 --- /dev/null +++ b/doc/fluid/CMakeLists.txt @@ -0,0 +1,49 @@ +if(NOT DEFINED SPHINX_THEME) + set(SPHINX_THEME default) +endif() + +if(NOT DEFINED SPHINX_THEME_DIR) + set(SPHINX_THEME_DIR) +endif() + +# configured documentation tools and intermediate build results +set(BINARY_BUILD_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_build") + +# Sphinx cache with pickled ReST documents +set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") + +# HTML output director +set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") + +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/../templates/conf.py.en.in" + "${BINARY_BUILD_DIR_EN}/conf.py" + @ONLY) + +sphinx_add_target(paddle_fluid_docs + html + ${BINARY_BUILD_DIR_EN} + ${SPHINX_CACHE_DIR_EN} + ${CMAKE_CURRENT_SOURCE_DIR} + ${SPHINX_HTML_DIR_EN}) + +# configured documentation tools and intermediate build results +set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") + +# Sphinx cache with pickled ReST documents +set(SPHINX_CACHE_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_doctrees") + +# HTML output directory +set(SPHINX_HTML_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/html") + +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/../templates/conf.py.cn.in" + "${BINARY_BUILD_DIR_CN}/conf.py" + @ONLY) + +sphinx_add_target(paddle_fluid_docs_cn + html + ${BINARY_BUILD_DIR_CN} + ${SPHINX_CACHE_DIR_CN} + ${CMAKE_CURRENT_SOURCE_DIR} + ${SPHINX_HTML_DIR_CN}) diff --git a/doc/fluid/build_and_install/index_cn.rst b/doc/fluid/build_and_install/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..9276236f9fd511bde3570a8c88b437119911d60a --- /dev/null +++ b/doc/fluid/build_and_install/index_cn.rst @@ -0,0 +1,2 @@ +安装与使用 +------------ diff --git a/doc/fluid/build_and_install/index_en.rst b/doc/fluid/build_and_install/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..cc1e61a58a026a0f5c3b106875a8a86dc9cba613 --- /dev/null +++ b/doc/fluid/build_and_install/index_en.rst @@ -0,0 +1,2 @@ +Build and Install +------------ diff --git a/doc/design/images/asgd.gif b/doc/fluid/design/algorithm/images/asgd.gif similarity index 100% rename from doc/design/images/asgd.gif rename to doc/fluid/design/algorithm/images/asgd.gif diff --git a/doc/design/images/theta_star.gif b/doc/fluid/design/algorithm/images/theta_star.gif similarity index 100% rename from doc/design/images/theta_star.gif rename to doc/fluid/design/algorithm/images/theta_star.gif diff --git a/doc/design/parameter_average.md b/doc/fluid/design/algorithm/parameter_average.md similarity index 100% rename from doc/design/parameter_average.md rename to doc/fluid/design/algorithm/parameter_average.md diff --git a/doc/design/build_system/README.md b/doc/fluid/design/concepts/README.md similarity index 100% rename from doc/design/build_system/README.md rename to doc/fluid/design/concepts/README.md diff --git a/doc/design/block.md b/doc/fluid/design/concepts/block.md similarity index 100% rename from doc/design/block.md rename to doc/fluid/design/concepts/block.md diff --git a/doc/fluid/design/concepts/cpp_data_feeding.md b/doc/fluid/design/concepts/cpp_data_feeding.md new file mode 100644 index 0000000000000000000000000000000000000000..8607b40ccbbe01db77afed72c1efa780b520744c --- /dev/null +++ b/doc/fluid/design/concepts/cpp_data_feeding.md @@ -0,0 +1,171 @@ +# C++ Data Feeding + +While using Paddle V2 API for training, data feeding completely depends on the Python code. To get rid of the Python environment and achieve the goal of "wrapping the whole training by a while loop op" in Paddle Fluid, a C++ data feeding mechanism is required. + +In this document, we show the fundamental design of a C++ data feeding process, which includes data reading, shuffling and batching. + +## Overview + +![](images/readers.png) + +## Reader + +In order to handle the above-mentioned problem, a new concept called 'Reader' is introduced. `Reader` is a series of inherited classes which can be held by our `Variable` and they are used to read or process file data. + + +### ReaderBase + +`ReaderBase` is the abstract base class for all readers. It defines the interface for all readers. + +```cpp +class ReaderBase { + public: + // Reads the next batch of data. (A 'batch' can be only one instance) + // If the next batch doesn't exist, it throws an exception + virtual void ReadNext(std::vector* out) = 0; + + // Checks whether the next instance exists. + virtual bool HasNext() = 0; + + // Reinitializes the reader and read the file from the beginning. + virtual void ReInit() = 0; + + virtual ~ReaderBase(); +}; +``` + +### FileReader + +`FileReader` is derived from the `ReaderBase`. It is still an abstract class and will further be derived by Readers of respective specific format. + +```cpp +class FileReader : public ReaderBase { + public: + explicit FileReader(const std::vector& dims); + + void ReadNext(std::vector* out) override; + + protected: + virtual void ReadNextImpl(std::vector* out) = 0; + + private: + std::vector dims_; +}; +``` + +A file reader binds with a single file and reads one data instance at a time. Each type of file reader shall implement its own `ReadNextImpl()`, `HasNext()` and `ReInit()`. + +The `ReadNextImpl()` is invoked by `ReadNext()`. Besides invoking `ReadNextImpl()`, `ReadNext()` is also responsible for checking the output, making sure that each shape of `LoDTensor` in `*out` is consistent with the one in `dims_`. + +### DecoratedReader + +A decorated reader takes another reader(both file reader and decorated reader are OK) as its 'underlying reader'. It gets data from its underlying reader, does some processing on them(shuffling, batching or something else), then yields processed data. The output data of a decorated reader can be a single instance or a batch. `ShuffleReader` and `BatchReader` are both decorated readers. + +```cpp +class DecoratedReader : public ReaderBase { + public: + explicit DecoratedReader(ReaderBase* reader) : ReaderBase(), reader_(reader) { + PADDLE_ENFORCE_NOT_NULL(reader_); + } + + void ReInit() override { reader_->ReInit(); } + + bool HasNext() const override { return reader_->HasNext(); } + + protected: + ReaderBase* reader_; +}; +``` + +Both the `FileReader` and `DecoratedReader` share exactly the same interface as defined in `ReaderBase`. So they can be decorated for multiple times: We can **shuffle** a reader's outputs and then **batch** the shuffled outputs. The interface consistency also allows related ops use readers without knowing their underlying type. + +### MultipleReader + +All `FileReader` binds with a single file and are single-threaded. However, sometimes we need to read data from more than one file. In this case, it's not enough to only have `FileReader` and `DecoratedReader`. + +So `MultipleReader` is introduced. It is also derived from `ReaderBase`. A `MultipleReader` holds several prefetching `FileReaders` and these readers run concurrently. Another pivotal part of a `MultipleReader` is a buffer channel. The channel collects data yield by all prefetching readers and makes subsequent OPs or decorated readers be able to fetch data without concerning about multiple readers scheduling. + +![](images/multiple_reader.png) + +This graph shows how a `MultipleReader` works with three prefetching file readers and two GPUs. There is a queue of files which are going to be read. Each time when a prefetching file reader is free(complete reading from one file), it fetches a new file from the queue. Each prefetching file reader runs in a separated prefetch thread and dumps their outputs to the same channel. + +To the subsequent two decorated readers, the `MultipleReader` is **a single reader**. They don't need to concern about how prefetch readers are scheduled. They only need to invoke `MultipleReader::ReadNext()` to get the next data from the buffer channel. + +### ReaderHolder + +Different readers belong to different class types. This leads to a problem: How can we drop them into `Variable`s and fetch them out by a unified method? For example, if a Variable holds a `BatchReader`, we can not get it by the following code: + +```cpp +var->Get("batch_reader"); +``` + +We would have to write: + +```cpp +var->Get("batch_reader"); +``` + +This requires that in order to get a reader from a variable, every time, we must know the reader's type exactly. This is nearly impossible. + +To solve this problem, we introduce `ReaderHolder` as a wrapper. It acts as an empty decorator of `ReaderBase`, which hides reader's type. With `ReaderHolder` we are able to fetch all types of readers by `var->Get("...")` and regard the obtained object as a reader. + +## Related Operators + +To create and invoke readers, some new ops are introduced: + +### CreateReaderOp + +Each reader has its creation op. File readers' creation ops have no input and yield the created file reader as its output. Decorated readers' creation ops take the underlying readers as inputs and then yield new decorated readers. + +However, direct usage of file readers' creation ops is not recommended because a file reader can only read one file via a single thread. Using `OpenFilesOp` is a better choice. + +### OpenFilesOp + +The `OpenFilesOp` is the creation op of `MultipleReader`. It takes no input but requires a list of file names as one of its attributes. The newly created `MultipleReader` then creates its own prefetching readers according to given file names. + +To make sure that created prefetching readers match file formats, we need a name prefix rule to append file format tags to file names, as well as a file reader registry mechanism to map file format tags to their corresponding file readers' constructors. + +### HasNextOp + +`HasNextOp` is used to check whether the next data batch exists via the reader's `HasNext()` interface. + +### ResetOp + +`ResetOp` is used to reset a reader via its `ReInit()` interface. + +### ReadOp + +A reader is only a Variable. It cannot trigger the reading process by itself. So we add the `ReadOp` to execute it. A `ReadOp` takes a reader Variable as its input. Each time it runs, it invokes the reader‘s `ReadNext()` function and gets a new batch of data(or only one instance of data, if we use file reader directly). The output data of a reader are in the form of `std::vector`, so the `ReadOp` also needs to split the vector and move LoDTensors to their respective output Variables. + +## Program with Readers + +A `Program` holds readers as its persistable variables. These variables are created by `CreateReaderOp` or `OpenFilesOp`. These ops shall run only once. So they shall be settled in the `startup_program`. `HasNextOp`, `ResetOp` and `ReadOp` are required by training loop, so they shall be in the `main_program`. + +The ops of a `startup_program` with readers would be like this: + +``` +multiple_reader = open_files_op(...) +batch_reader = create_batch_reader_op(multiple_reader) +double_buffer_reader = create_double_buffer_op(batch_reader) +... (other initializers) +``` + +The forwarding ops of the corresponding `main_program` would be like this: + +``` +while_op { + has_next = has_next_op(double_buffer_reader) + if_else_op(has_next) { + batch_data = read_op(double_buffer_reader) + ... (subsequent training ops) + } else { + reset_op(double_buffer_reader) + } +} +``` + +Two important considerations for these programs are as follows: + +1. The multiple\_reader is the batch\_reader's underlying reader, and the batch\_reader is the double\_buffer\_reader's underlying reader. `read_op`, `has_next_op` and other reader related ops will only invoke the top-most reader. In this case, it's the double\_buffer\_reader. + +2. All readers exist in both `startup_program` and `main_program`. And they are persistable. diff --git a/doc/design/executor.md b/doc/fluid/design/concepts/executor.md similarity index 100% rename from doc/design/executor.md rename to doc/fluid/design/concepts/executor.md diff --git a/doc/design/functions_operators_layers.md b/doc/fluid/design/concepts/functions_operators_layers.md similarity index 100% rename from doc/design/functions_operators_layers.md rename to doc/fluid/design/concepts/functions_operators_layers.md diff --git a/doc/fluid/design/concepts/images/multiple_reader.png b/doc/fluid/design/concepts/images/multiple_reader.png new file mode 100644 index 0000000000000000000000000000000000000000..b22126b31db4982c13fc3a0827805e6aaf955046 Binary files /dev/null and b/doc/fluid/design/concepts/images/multiple_reader.png differ diff --git a/doc/fluid/design/concepts/images/readers.png b/doc/fluid/design/concepts/images/readers.png new file mode 100644 index 0000000000000000000000000000000000000000..fd59168ce16c9e2a0ef45303c28c997cfd7740be Binary files /dev/null and b/doc/fluid/design/concepts/images/readers.png differ diff --git a/paddle/fluid/framework/lod_tensor.md b/doc/fluid/design/concepts/lod_tensor.md similarity index 100% rename from paddle/fluid/framework/lod_tensor.md rename to doc/fluid/design/concepts/lod_tensor.md diff --git a/doc/design/program.md b/doc/fluid/design/concepts/program.md similarity index 100% rename from doc/design/program.md rename to doc/fluid/design/concepts/program.md diff --git a/doc/design/scope.md b/doc/fluid/design/concepts/scope.md similarity index 100% rename from doc/design/scope.md rename to doc/fluid/design/concepts/scope.md diff --git a/paddle/fluid/framework/tensor.md b/doc/fluid/design/concepts/tensor.md similarity index 100% rename from paddle/fluid/framework/tensor.md rename to doc/fluid/design/concepts/tensor.md diff --git a/doc/design/tensor_array.md b/doc/fluid/design/concepts/tensor_array.md similarity index 100% rename from doc/design/tensor_array.md rename to doc/fluid/design/concepts/tensor_array.md diff --git a/doc/design/var_desc.md b/doc/fluid/design/concepts/var_desc.md similarity index 100% rename from doc/design/var_desc.md rename to doc/fluid/design/concepts/var_desc.md diff --git a/paddle/fluid/framework/variable.md b/doc/fluid/design/concepts/variable.md similarity index 100% rename from paddle/fluid/framework/variable.md rename to doc/fluid/design/concepts/variable.md diff --git a/doc/design/concurrent_programming.md b/doc/fluid/design/concurrent/concurrent_programming.md similarity index 100% rename from doc/design/concurrent_programming.md rename to doc/fluid/design/concurrent/concurrent_programming.md diff --git a/doc/design/csp.md b/doc/fluid/design/concurrent/csp.md similarity index 100% rename from doc/design/csp.md rename to doc/fluid/design/concurrent/csp.md diff --git a/doc/fluid/design/concurrent/images/select_op_workflow.png b/doc/fluid/design/concurrent/images/select_op_workflow.png new file mode 100644 index 0000000000000000000000000000000000000000..719ed76f9d542d6c4f20c30f27656bb53325aa85 Binary files /dev/null and b/doc/fluid/design/concurrent/images/select_op_workflow.png differ diff --git a/doc/design/parallel_do.md b/doc/fluid/design/concurrent/parallel_do.md similarity index 100% rename from doc/design/parallel_do.md rename to doc/fluid/design/concurrent/parallel_do.md diff --git a/doc/fluid/design/concurrent/select_op.md b/doc/fluid/design/concurrent/select_op.md new file mode 100644 index 0000000000000000000000000000000000000000..52c226bc94a4e8bfc5588705d7f65328840e91cc --- /dev/null +++ b/doc/fluid/design/concurrent/select_op.md @@ -0,0 +1,265 @@ +# select_op Design + +## Introduction + +In golang, the [**select**](https://golang.org/ref/spec#Select_statements) +statement lets a goroutine wait on multiple communication operations at the +same time. The **select** blocks until one of its cases can run, then +executes the case. If multiple cases are ready to run, then one case is +choosen at random to be executed. + +With the introduction of CSP for Paddle, we mimic this behavior by +creating a ***select_op***. + +## How to use it + +The **select_op** is available as a c++ operator. However most users +will prefer to use the much simplier Python API. + +- **fluid.Select()**: Creates a select operator and adds it to the current +block within the main program. Also creates a sub block and adds it to the +main program. This sub block is used to hold all variables and operators +used by the case statements. + +Within the select block, users can add cases by +calling **select.case** or **select.default** method. + +- **fluid.Select.case(channel_action, channel, result_variable)**: Represents +a fluid channel send/recv case. This method creates a SelectCase block +guard and adds it to the Select block. The arguments into this method tells +the select which channel operation to listen to. + +- **fluid.Select.default()**: Represents the fluid default case. This default +case is executed if none of the channel send/recv cases are available to +execute. + +**Example:** +``` +ch1 = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) +quit_ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) + +x = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) +y = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=1) + +while_cond = fill_constant(shape=[1], dtype=core.VarDesc.VarType.BOOL, value=True) +while_op = While(cond=while_cond) + +with while_op.block(): + with fluid.Select() as select: + with select.case(fluid.channel_send, channel, x): + # Send x, then perform Fibonacci calculation on x and y + x_tmp = fill_constant(shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) + assign(input=x, output=x_tmp) + assign(input=y, output=x) + assign(elementwise_add(x=x_tmp, y=y), output=y) + with select.case(fluid.channel_recv, quit_channel, result2): + # Exit out of While loop + while_false = fill_constant(shape=[1], dtype=core.VarDesc.VarType.BOOL, value=False) + helper = layer_helper.LayerHelper('assign') + helper.append_op( + type='assign', + inputs={'X': [while_false]}, + outputs={'Out': [while_cond]}) +``` + +## How it Works + +### Program Description + +``` +blocks { + idx: 0 + ... + // Create "case_to_execute" variable + ops { + outputs { + parameter: "Out" + arguments: "fill_constant_110.tmp_0" + } + type: "fill_constant" + attrs { + name: "force_cpu" + type: BOOLEAN + b: false + } + attrs { + name: "value" + type: FLOAT + f: -1.0 + } + attrs { + name: "shape" + type: INTS + ints: 1 + } + attrs { + name: "dtype" + type: INT + i: 2 + } + } + // Create "select" operator. + // inputs: + // X: All input variables used by operators within the select block + // case_to_execute: Variable filled in by select_op when it determines + // which case to execute. + // + // outputs: + // Out: All output variables referenced by operators within select block. + // + // attrs: + // sub_block: The block id containing the select "cases" + // cases: Serialized list of all cases in the select op. + // Each case is serialized as: ',,,' + // where type is 0 for default, 1 for send, and 2 for receive. + // No channel and values are needed for default cases. + ops { + inputs { + parameter: "X" + arguments: "fill_constant_103.tmp_0" + arguments: "fill_constant_104.tmp_0" + } + inputs { + parameter: "case_to_execute" + arguments: "fill_constant_110.tmp_0" + } + outputs { + parameter: "Out" + arguments: "fill_constant_110.tmp_0" + } + type: "select" + attrs { + name: "sub_block" + type: BLOCK + block_idx: 1 + } + attrs { + name: "cases" + type: STRINGS + strings: "0,1,channel_101,fill_constant_109.tmp_0" + strings: "1,2,channel_102,fill_constant_108.tmp_0" + } + } + ... +} +``` + +The python select API will add the **select_op** to the current block. In addition, it will +iterate through all it's case statements and add any input variables required by case statements +into **X**. It will also create a temp variable called **case_to_execute**. This variable is +filled in by the select_op after it has completed processing the case statements. + +If there are no available cases to execute (ie: all cases are blocked on channel operations, and +there is no default statement), then the select_op will block the current thread. The thread will +unblock once there is a channel operation affecting one of the case statements, at which point, the +**select_op** will set the **case_to_execute** variable to the index of the case to execute. + +Finally the select_op will call executor.run on the **sub_block**. + +``` +blocks { + idx: 1 + parent_idx: 0 + ... + // Fill a tensor with the case index (ie: 0,1,2,3,ect.) + ops { + outputs { + parameter: "Out" + arguments: "fill_constant_111.tmp_0" + } + type: "fill_constant" + attrs { + name: "force_cpu" + type: BOOLEAN + b: false + } + attrs { + name: "value" + type: FLOAT + f: 0.0 + } + attrs { + name: "shape" + type: INTS + ints: 1 + } + attrs { + name: "dtype" + type: INT + i: 2 + } + } + // Create an "equal" operator to compare the case index with the "case_to_execute" + // tensor (which was filled in by the select op). + ops { + inputs { + parameter: "X" + arguments: "fill_constant_111.tmp_0" // case 0 + } + inputs { + parameter: "Y" + arguments: "fill_constant_110.tmp_0" // case_to_execute + } + outputs { + parameter: "Out" + arguments: "equal_0.tmp_0" + } + type: "equal" + attrs { + name: "axis" + type: INT + i: -1 + } + } + // Use the output of the "equal" operator as a condition for the "conditional_block". + // If the condition evaluates to true, then execute the "sub_block" (which represents + // the select case's body) + ops { + inputs { + parameter: "Params" + } + inputs { + parameter: "X" + arguments: "equal_0.tmp_0" + } + outputs { + parameter: "Out" + } + outputs { + parameter: "Scope" + arguments: "_generated_var_0" + } + type: "conditional_block" + attrs { + name: "is_scalar_condition" + type: BOOLEAN + b: true + } + attrs { + name: "sub_block" + type: BLOCK + block_idx: 4 + } + } + ... + // Repeat the above operators for each case statements inside the select body +} + +``` + +Cases are represented by a **conditional_block operator**, whose's condition is set as the output of +equal(**case_to_execute**, **case_index**). Since each case index is unique in this sub-block, +only one case will be executed. + +### select_op flow + +

+
+

+ +The select algorithm is inspired by golang's select routine. Please refer to +http://www.tapirgames.com/blog/golang-concurrent-select-implementation for more information. + +## Backward Pass + +TODO diff --git a/doc/design/float16.md b/doc/fluid/design/data_type/float16.md similarity index 100% rename from doc/design/float16.md rename to doc/fluid/design/data_type/float16.md diff --git a/doc/design/fluid_dist/distributed_architecture.md b/doc/fluid/design/dist_train/distributed_architecture.md similarity index 98% rename from doc/design/fluid_dist/distributed_architecture.md rename to doc/fluid/design/dist_train/distributed_architecture.md index 9368c5780dc922953f38bf0f86d9f797a4a8a6fe..a405cb6aaf80b9d2e8a1a9c774ca85cc7e62bbab 100644 --- a/doc/design/fluid_dist/distributed_architecture.md +++ b/doc/fluid/design/dist_train/distributed_architecture.md @@ -1,4 +1,4 @@ -# Design Doc: Distributed Training Architecture +# Design Doc: Fluid Distributed Training Architecture ## Abstract @@ -155,7 +155,7 @@ Cluster environment. `RemoteExecutor.run` sends the `ProgramDesc` and -[TrainingJob](https://github.com/PaddlePaddle/cloud/blob/develop/doc/autoscale/README.md#training-job-resource) +[TrainingJob](https://github.com/PaddlePaddle/cloud/blob/unreleased-tpr/doc/autoscale/README.md#training-job-resource) to a server in the cluster which executes `RemoteExecutor.listen`. This server is responsible to start the final Kubernetes Jobs to run the different role of `ProgramDesc` from `ConfigMap`. diff --git a/doc/fluid/design/dist_train/distributed_lookup_table_design.md b/doc/fluid/design/dist_train/distributed_lookup_table_design.md new file mode 100644 index 0000000000000000000000000000000000000000..e543adf0f97cc6b47415b807d7a1ed1effec9b22 --- /dev/null +++ b/doc/fluid/design/dist_train/distributed_lookup_table_design.md @@ -0,0 +1,128 @@ +## Design Doc: Distributed Lookup Table Operator + +A lookup table operator in PaddlePaddle where the table could be out +of the memory of a computer. + +## Background + +A lookup table operator is well-used in deep learning for learning the +representation, or the +[*embedding*](http://www.cs.toronto.edu/~fritz/absps/ieee-lre.pdf), of +symbols. + +### The Forward Algorithm + +The forward algorithm of the lookup table is a multiplication of the +input vector x and the lookup table matrix W: + +$$y = x * W$$ + +When x is a sparse vector of symbols, the above multiplication +simplifies into looking up rows in W that correspond to symbols in x, +denoted by W(x). Please be aware that W could be huge and out of the +memory, so we'd need a distributed storage service, which supports the +lookup of rows. + +The following figure illustrates the multiplication of x with two +non-zero elements, or say, two symbols, and a lookup table W: + +![lookup table](./src/lookup_table.png) + +### The Backward Algorithm + +The backward algorithm computes W'(x) using W(x). W'(x) has the same +scale of size as W(x) and is much smaller than W. + +To optimize W given W', we can do simple SGD update: + +$$W = f(W') = \lambda * W'$$ + +or some more sophisticated algorithms that rely on both W' and W: + +$$W = f(W, W')$$ + +The following figure illustrates the backward pass of the lookup +operator: ![lookup table training](./src/lookup_table_training.png) + +## Distributed Storage Service + +The forward algorithm requires a distributed storage service for W. +The backward algorithm prefers that the storage system can apply the +optimization algorithm on W. The following two sections describe two +solutions -- the former doesn't require that the storage service can +do optimization, the latter does. + +### Storage Service Doesn't Optimize + +In this design, we use highly-optimized distributed storage, e.g., +memcached, as the storage service, and we run the optimization +algorithm on parameter servers of PaddlePaddle. The following figure +illustrates the training process. + + + + + +Each trainer runs the forward and backward passes using their local +data: + +1. In the forward pass, when a trainer runs the forward algorithm of a + lookup operator, it retrieves W(x) from the storage service. +1. The trainer computes W'(x) in the backward pass using W(x). + +During the global update process: + +1. Each trainer uploads its W'(x) to parameter servers. +1. The parameter server runs the optimization algorithm, e.g., the + Adam optimization algorithm, which requires that + 1. The parameter server retrieves W(x) from memcached, and + 1. The parameter server pushes $\Delta W(x)=f(W(x), lambda \sum_j + W'(x))$ to memcached, where $f$ denotes the optimization + algorithm. + +### Storage Service Does Optimize + +This design is very similar to the above one, except that the +optimization algorithm $f$ runs on the storage service. + +- Pro: parameter servers do not retrieve W(x) from the storage + service, thus saves half network communication. +- Con: the storage service needs to be able to run the optimization + algorithm. + +## Conclusion + +Let us do the "storage service does not optimize" solution first, as a +baseline at least, because it is easier to use a well-optimized +distributed storage service like memcached. We can do the "storage +service does optimize" solution later or at the same time, which, if +implemented carefully, should have better performance than the former. diff --git a/doc/design/fluid_dist/multi_cpu.md b/doc/fluid/design/dist_train/multi_cpu.md similarity index 100% rename from doc/design/fluid_dist/multi_cpu.md rename to doc/fluid/design/dist_train/multi_cpu.md diff --git a/doc/design/fluid_dist/parameter_server.md b/doc/fluid/design/dist_train/parameter_server.md similarity index 100% rename from doc/design/fluid_dist/parameter_server.md rename to doc/fluid/design/dist_train/parameter_server.md diff --git a/doc/design/fluid_dist/src/compiler.graffle b/doc/fluid/design/dist_train/src/compiler.graffle similarity index 100% rename from doc/design/fluid_dist/src/compiler.graffle rename to doc/fluid/design/dist_train/src/compiler.graffle diff --git a/doc/design/fluid_dist/src/compiler.png b/doc/fluid/design/dist_train/src/compiler.png similarity index 100% rename from doc/design/fluid_dist/src/compiler.png rename to doc/fluid/design/dist_train/src/compiler.png diff --git a/doc/design/fluid_dist/src/dist-graph.graffle b/doc/fluid/design/dist_train/src/dist-graph.graffle similarity index 100% rename from doc/design/fluid_dist/src/dist-graph.graffle rename to doc/fluid/design/dist_train/src/dist-graph.graffle diff --git a/doc/design/fluid_dist/src/dist-graph.png b/doc/fluid/design/dist_train/src/dist-graph.png similarity index 100% rename from doc/design/fluid_dist/src/dist-graph.png rename to doc/fluid/design/dist_train/src/dist-graph.png diff --git a/doc/design/fluid_dist/src/distributed_architecture.graffle b/doc/fluid/design/dist_train/src/distributed_architecture.graffle similarity index 100% rename from doc/design/fluid_dist/src/distributed_architecture.graffle rename to doc/fluid/design/dist_train/src/distributed_architecture.graffle diff --git a/doc/design/fluid_dist/src/distributed_architecture.png b/doc/fluid/design/dist_train/src/distributed_architecture.png similarity index 100% rename from doc/design/fluid_dist/src/distributed_architecture.png rename to doc/fluid/design/dist_train/src/distributed_architecture.png diff --git a/doc/design/fluid_dist/src/local-graph.graffle b/doc/fluid/design/dist_train/src/local-graph.graffle similarity index 100% rename from doc/design/fluid_dist/src/local-graph.graffle rename to doc/fluid/design/dist_train/src/local-graph.graffle diff --git a/doc/design/fluid_dist/src/local-graph.png b/doc/fluid/design/dist_train/src/local-graph.png similarity index 100% rename from doc/design/fluid_dist/src/local-graph.png rename to doc/fluid/design/dist_train/src/local-graph.png diff --git a/doc/design/fluid_dist/src/local_architecture.graffle b/doc/fluid/design/dist_train/src/local_architecture.graffle similarity index 100% rename from doc/design/fluid_dist/src/local_architecture.graffle rename to doc/fluid/design/dist_train/src/local_architecture.graffle diff --git a/doc/design/fluid_dist/src/local_architecture.png b/doc/fluid/design/dist_train/src/local_architecture.png similarity index 100% rename from doc/design/fluid_dist/src/local_architecture.png rename to doc/fluid/design/dist_train/src/local_architecture.png diff --git a/doc/fluid/design/dist_train/src/lookup_table.png b/doc/fluid/design/dist_train/src/lookup_table.png new file mode 100644 index 0000000000000000000000000000000000000000..72dfe3547f731d0d090338afb206b0549dff472e Binary files /dev/null and b/doc/fluid/design/dist_train/src/lookup_table.png differ diff --git a/doc/fluid/design/dist_train/src/lookup_table_training.png b/doc/fluid/design/dist_train/src/lookup_table_training.png new file mode 100644 index 0000000000000000000000000000000000000000..cc7cc4aeb3b885850fe2f70f19fb84d5873bed1e Binary files /dev/null and b/doc/fluid/design/dist_train/src/lookup_table_training.png differ diff --git a/doc/design/fluid_dist/src/multi-threads.graffle b/doc/fluid/design/dist_train/src/multi-threads.graffle similarity index 100% rename from doc/design/fluid_dist/src/multi-threads.graffle rename to doc/fluid/design/dist_train/src/multi-threads.graffle diff --git a/doc/design/fluid_dist/src/multi-threads/multi-threads@3x.png b/doc/fluid/design/dist_train/src/multi-threads/multi-threads@3x.png similarity index 100% rename from doc/design/fluid_dist/src/multi-threads/multi-threads@3x.png rename to doc/fluid/design/dist_train/src/multi-threads/multi-threads@3x.png diff --git a/doc/design/fluid_dist/src/multi-threads/single-thread@3x.png b/doc/fluid/design/dist_train/src/multi-threads/single-thread@3x.png similarity index 100% rename from doc/design/fluid_dist/src/multi-threads/single-thread@3x.png rename to doc/fluid/design/dist_train/src/multi-threads/single-thread@3x.png diff --git a/doc/design/fluid_dist/src/paddle-compile.graffle b/doc/fluid/design/dist_train/src/paddle-compile.graffle similarity index 100% rename from doc/design/fluid_dist/src/paddle-compile.graffle rename to doc/fluid/design/dist_train/src/paddle-compile.graffle diff --git a/doc/design/fluid_dist/src/paddle-compile.png b/doc/fluid/design/dist_train/src/paddle-compile.png similarity index 100% rename from doc/design/fluid_dist/src/paddle-compile.png rename to doc/fluid/design/dist_train/src/paddle-compile.png diff --git a/doc/design/fluid_dist/src/remote_executor.graffle b/doc/fluid/design/dist_train/src/remote_executor.graffle similarity index 100% rename from doc/design/fluid_dist/src/remote_executor.graffle rename to doc/fluid/design/dist_train/src/remote_executor.graffle diff --git a/doc/design/fluid_dist/src/remote_executor.png b/doc/fluid/design/dist_train/src/remote_executor.png similarity index 100% rename from doc/design/fluid_dist/src/remote_executor.png rename to doc/fluid/design/dist_train/src/remote_executor.png diff --git a/doc/design/fluid_dist/src/sparse_update.graffle b/doc/fluid/design/dist_train/src/sparse_update.graffle similarity index 100% rename from doc/design/fluid_dist/src/sparse_update.graffle rename to doc/fluid/design/dist_train/src/sparse_update.graffle diff --git a/doc/design/fluid_dist/src/sparse_update.png b/doc/fluid/design/dist_train/src/sparse_update.png similarity index 100% rename from doc/design/fluid_dist/src/sparse_update.png rename to doc/fluid/design/dist_train/src/sparse_update.png diff --git a/doc/design/ops/images/2_level_rnn.dot b/doc/fluid/design/dynamic_rnn/2_level_rnn.dot similarity index 100% rename from doc/design/ops/images/2_level_rnn.dot rename to doc/fluid/design/dynamic_rnn/2_level_rnn.dot diff --git a/doc/design/ops/images/2_level_rnn.png b/doc/fluid/design/dynamic_rnn/2_level_rnn.png similarity index 100% rename from doc/design/ops/images/2_level_rnn.png rename to doc/fluid/design/dynamic_rnn/2_level_rnn.png diff --git a/doc/design/ops/images/rnn.dot b/doc/fluid/design/dynamic_rnn/rnn.dot similarity index 100% rename from doc/design/ops/images/rnn.dot rename to doc/fluid/design/dynamic_rnn/rnn.dot diff --git a/doc/design/ops/images/rnn.jpg b/doc/fluid/design/dynamic_rnn/rnn.jpg similarity index 100% rename from doc/design/ops/images/rnn.jpg rename to doc/fluid/design/dynamic_rnn/rnn.jpg diff --git a/doc/design/ops/rnn.md b/doc/fluid/design/dynamic_rnn/rnn.md similarity index 95% rename from doc/design/ops/rnn.md rename to doc/fluid/design/dynamic_rnn/rnn.md index 2f4854793fa1f0b02e4dc17b51a48a972be61c06..6f414e5549b149bc88fb252085ff56dbb06730f8 100644 --- a/doc/design/ops/rnn.md +++ b/doc/fluid/design/dynamic_rnn/rnn.md @@ -5,7 +5,7 @@ This document describes the RNN (Recurrent Neural Network) operator and how it i ## RNN Algorithm Implementation

- +

The above diagram shows an RNN unrolled into a full network. @@ -22,7 +22,7 @@ There are several important concepts here: There could be local variables defined in each step-net. PaddlePaddle runtime realizes these variables in *step-scopes* which are created for each step.

-
+
Figure 2 illustrates the RNN's data flow

@@ -49,7 +49,7 @@ or copy the memory value of the previous step to the current ex-memory variable. ### Usage in Python -For more information on Block, please refer to the [design doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.md). +For more information on Block, please refer to the [design doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/block.md). We can define an RNN's step-net using a Block: @@ -93,7 +93,7 @@ For example, we could have a 2-level RNN, where the top level corresponds to par The following figure illustrates feeding in text into the lower level, one sentence at a step, and the feeding in step outputs to the top level. The final top level output is about the whole text.

- +

```python @@ -149,5 +149,5 @@ If the `output_all_steps` is set to False, it will only output the final time st

- +

diff --git a/doc/design/ops/images/rnn.png b/doc/fluid/design/dynamic_rnn/rnn.png similarity index 100% rename from doc/design/ops/images/rnn.png rename to doc/fluid/design/dynamic_rnn/rnn.png diff --git a/doc/design/ops/images/rnn_2level_data.dot b/doc/fluid/design/dynamic_rnn/rnn_2level_data.dot similarity index 100% rename from doc/design/ops/images/rnn_2level_data.dot rename to doc/fluid/design/dynamic_rnn/rnn_2level_data.dot diff --git a/doc/design/ops/images/rnn_2level_data.png b/doc/fluid/design/dynamic_rnn/rnn_2level_data.png similarity index 100% rename from doc/design/ops/images/rnn_2level_data.png rename to doc/fluid/design/dynamic_rnn/rnn_2level_data.png diff --git a/paddle/fluid/operators/op_documentation/rnn_design.md b/doc/fluid/design/dynamic_rnn/rnn_design.md similarity index 100% rename from paddle/fluid/operators/op_documentation/rnn_design.md rename to doc/fluid/design/dynamic_rnn/rnn_design.md diff --git a/doc/design/if_else_op.md b/doc/fluid/design/execution/if_else_op.md similarity index 100% rename from doc/design/if_else_op.md rename to doc/fluid/design/execution/if_else_op.md diff --git a/doc/design/switch.md b/doc/fluid/design/execution/switch.md similarity index 100% rename from doc/design/switch.md rename to doc/fluid/design/execution/switch.md diff --git a/doc/fluid/design/index_cn.rst b/doc/fluid/design/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..f1887be6901653d4263d711d78b626d2abfd45c9 --- /dev/null +++ b/doc/fluid/design/index_cn.rst @@ -0,0 +1,2 @@ +设计思想 +------------ diff --git a/doc/fluid/design/index_en.rst b/doc/fluid/design/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..18a4b4122f6e3f0096676f34ffea8a80aa9b6696 --- /dev/null +++ b/doc/fluid/design/index_en.rst @@ -0,0 +1,2 @@ +Design +------------ diff --git a/doc/design/multi_language_interface/00.why_plain_c.md b/doc/fluid/design/interface/00.why_plain_c.md similarity index 100% rename from doc/design/multi_language_interface/00.why_plain_c.md rename to doc/fluid/design/interface/00.why_plain_c.md diff --git a/doc/design/multi_language_interface/01.inference_implementation.md b/doc/fluid/design/interface/01.inference_implementation.md similarity index 100% rename from doc/design/multi_language_interface/01.inference_implementation.md rename to doc/fluid/design/interface/01.inference_implementation.md diff --git a/paddle/fluid/memory/README.md b/doc/fluid/design/memory/README.md similarity index 100% rename from paddle/fluid/memory/README.md rename to doc/fluid/design/memory/README.md diff --git a/doc/design/images/control_flow_graph.png b/doc/fluid/design/memory/images/control_flow_graph.png similarity index 100% rename from doc/design/images/control_flow_graph.png rename to doc/fluid/design/memory/images/control_flow_graph.png diff --git a/doc/design/images/dataflow_equations.png b/doc/fluid/design/memory/images/dataflow_equations.png similarity index 100% rename from doc/design/images/dataflow_equations.png rename to doc/fluid/design/memory/images/dataflow_equations.png diff --git a/doc/design/images/deep_learning.png b/doc/fluid/design/memory/images/deep_learning.png similarity index 100% rename from doc/design/images/deep_learning.png rename to doc/fluid/design/memory/images/deep_learning.png diff --git a/doc/design/memory_optimization.md b/doc/fluid/design/memory/memory_optimization.md similarity index 100% rename from doc/design/memory_optimization.md rename to doc/fluid/design/memory/memory_optimization.md diff --git a/doc/design/backward.md b/doc/fluid/design/modules/backward.md similarity index 100% rename from doc/design/backward.md rename to doc/fluid/design/modules/backward.md diff --git a/paddle/fluid/operators/op_documentation/batch_norm_op.md b/doc/fluid/design/modules/batch_norm_op.md similarity index 100% rename from paddle/fluid/operators/op_documentation/batch_norm_op.md rename to doc/fluid/design/modules/batch_norm_op.md diff --git a/doc/design/evaluator.md b/doc/fluid/design/modules/evaluator.md similarity index 100% rename from doc/design/evaluator.md rename to doc/fluid/design/modules/evaluator.md diff --git a/paddle/fluid/operators/images/batch_norm_fork.dot b/doc/fluid/design/modules/images/batch_norm_fork.dot similarity index 100% rename from paddle/fluid/operators/images/batch_norm_fork.dot rename to doc/fluid/design/modules/images/batch_norm_fork.dot diff --git a/paddle/fluid/operators/images/batch_norm_fork.png b/doc/fluid/design/modules/images/batch_norm_fork.png similarity index 100% rename from paddle/fluid/operators/images/batch_norm_fork.png rename to doc/fluid/design/modules/images/batch_norm_fork.png diff --git a/paddle/fluid/operators/images/batch_norm_op_kernel.png b/doc/fluid/design/modules/images/batch_norm_op_kernel.png similarity index 100% rename from paddle/fluid/operators/images/batch_norm_op_kernel.png rename to doc/fluid/design/modules/images/batch_norm_op_kernel.png diff --git a/doc/design/images/feed_forward.png b/doc/fluid/design/modules/images/feed_forward.png similarity index 100% rename from doc/design/images/feed_forward.png rename to doc/fluid/design/modules/images/feed_forward.png diff --git a/doc/design/images/feed_forward_regularized.png b/doc/fluid/design/modules/images/feed_forward_regularized.png similarity index 100% rename from doc/design/images/feed_forward_regularized.png rename to doc/fluid/design/modules/images/feed_forward_regularized.png diff --git a/doc/design/images/l1_regularization.png b/doc/fluid/design/modules/images/l1_regularization.png similarity index 100% rename from doc/design/images/l1_regularization.png rename to doc/fluid/design/modules/images/l1_regularization.png diff --git a/doc/design/images/l2_regularization.png b/doc/fluid/design/modules/images/l2_regularization.png similarity index 100% rename from doc/design/images/l2_regularization.png rename to doc/fluid/design/modules/images/l2_regularization.png diff --git a/doc/design/images/loss_equation.png b/doc/fluid/design/modules/images/loss_equation.png similarity index 100% rename from doc/design/images/loss_equation.png rename to doc/fluid/design/modules/images/loss_equation.png diff --git a/doc/design/infer_var_type.md b/doc/fluid/design/modules/infer_var_type.md similarity index 100% rename from doc/design/infer_var_type.md rename to doc/fluid/design/modules/infer_var_type.md diff --git a/paddle/fluid/operators/op_documentation/net_op_design.md b/doc/fluid/design/modules/net_op_design.md similarity index 100% rename from paddle/fluid/operators/op_documentation/net_op_design.md rename to doc/fluid/design/modules/net_op_design.md diff --git a/doc/design/optimizer.md b/doc/fluid/design/modules/optimizer.md similarity index 100% rename from doc/design/optimizer.md rename to doc/fluid/design/modules/optimizer.md diff --git a/doc/design/prune.md b/doc/fluid/design/modules/prune.md similarity index 100% rename from doc/design/prune.md rename to doc/fluid/design/modules/prune.md diff --git a/doc/design/python_api.md b/doc/fluid/design/modules/python_api.md similarity index 100% rename from doc/design/python_api.md rename to doc/fluid/design/modules/python_api.md diff --git a/doc/design/register_grad_op.md b/doc/fluid/design/modules/register_grad_op.md similarity index 100% rename from doc/design/register_grad_op.md rename to doc/fluid/design/modules/register_grad_op.md diff --git a/doc/design/regularization.md b/doc/fluid/design/modules/regularization.md similarity index 100% rename from doc/design/regularization.md rename to doc/fluid/design/modules/regularization.md diff --git a/doc/design/selected_rows.md b/doc/fluid/design/modules/selected_rows.md similarity index 100% rename from doc/design/selected_rows.md rename to doc/fluid/design/modules/selected_rows.md diff --git a/doc/design/api.md b/doc/fluid/design/motivation/api.md similarity index 100% rename from doc/design/api.md rename to doc/fluid/design/motivation/api.md diff --git a/doc/design/fluid-compiler.graffle b/doc/fluid/design/motivation/fluid-compiler.graffle similarity index 100% rename from doc/design/fluid-compiler.graffle rename to doc/fluid/design/motivation/fluid-compiler.graffle diff --git a/doc/design/fluid-compiler.png b/doc/fluid/design/motivation/fluid-compiler.png similarity index 100% rename from doc/design/fluid-compiler.png rename to doc/fluid/design/motivation/fluid-compiler.png diff --git a/doc/design/fluid.md b/doc/fluid/design/motivation/fluid.md similarity index 98% rename from doc/design/fluid.md rename to doc/fluid/design/motivation/fluid.md index f78fa8c1914124f33b9730f918c8887ced4f8d9d..110b7d78bf12ac8328fb3a913e4386e75d63c995 100644 --- a/doc/design/fluid.md +++ b/doc/fluid/design/motivation/fluid.md @@ -103,7 +103,7 @@ In computability theory, a system of data-manipulation rules, such as a programm There are two ways to execute a Fluid program. When a program is executed, it creates a protobuf message [`ProgramDesc`](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/paddle/framework/framework.proto#L145) that describes the process and is conceptually like an [abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree). -There is a C++ class [`Executor`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.h), which runs a `ProgramDesc`, similar to how an interpreter runs a Python program. +There is a C++ class [`Executor`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/executor.h), which runs a `ProgramDesc`, similar to how an interpreter runs a Python program. Fluid is moving towards the direction of a compiler, which is explain in [fluid_compiler.md](fluid_compiler.md). diff --git a/doc/design/fluid_compiler.md b/doc/fluid/design/motivation/fluid_compiler.md similarity index 100% rename from doc/design/fluid_compiler.md rename to doc/fluid/design/motivation/fluid_compiler.md diff --git a/doc/design/refactorization.md b/doc/fluid/design/motivation/refactorization.md similarity index 100% rename from doc/design/refactorization.md rename to doc/fluid/design/motivation/refactorization.md diff --git a/doc/design/kernel_hint_design.md b/doc/fluid/design/muti_devices/kernel_hint_design.md similarity index 100% rename from doc/design/kernel_hint_design.md rename to doc/fluid/design/muti_devices/kernel_hint_design.md diff --git a/doc/design/kernel_selection.md b/doc/fluid/design/muti_devices/kernel_selection.md similarity index 100% rename from doc/design/kernel_selection.md rename to doc/fluid/design/muti_devices/kernel_selection.md diff --git a/doc/design/operator_kernel_type.md b/doc/fluid/design/muti_devices/operator_kernel_type.md similarity index 100% rename from doc/design/operator_kernel_type.md rename to doc/fluid/design/muti_devices/operator_kernel_type.md diff --git a/doc/design/speech/deep_speech_2.md b/doc/fluid/design/network/deep_speech_2.md similarity index 98% rename from doc/design/speech/deep_speech_2.md rename to doc/fluid/design/network/deep_speech_2.md index cfdc4d6df04344c70d3334626bd38eca997c31ff..af0c6ef36feba9e0239e7a5f81a8dc9108b2471a 100644 --- a/doc/design/speech/deep_speech_2.md +++ b/doc/fluid/design/network/deep_speech_2.md @@ -94,7 +94,7 @@ The classical DS2 network contains 15 layers (from bottom to top): - **One** CTC-loss layer
-
+
Figure 1. Archetecture of Deep Speech 2 Network.
@@ -141,7 +141,7 @@ TODO by Assignees ### Beam Search with CTC and LM
-
+
Figure 2. Algorithm for CTC Beam Search Decoder.
diff --git a/doc/design/ops/images/LOD-and-shape-changes-during-decoding.jpg b/doc/fluid/design/network/images/LOD-and-shape-changes-during-decoding.jpg similarity index 100% rename from doc/design/ops/images/LOD-and-shape-changes-during-decoding.jpg rename to doc/fluid/design/network/images/LOD-and-shape-changes-during-decoding.jpg diff --git a/doc/design/speech/image/beam_search.png b/doc/fluid/design/network/images/beam_search.png similarity index 100% rename from doc/design/speech/image/beam_search.png rename to doc/fluid/design/network/images/beam_search.png diff --git a/doc/design/speech/image/ds2_network.png b/doc/fluid/design/network/images/ds2_network.png similarity index 100% rename from doc/design/speech/image/ds2_network.png rename to doc/fluid/design/network/images/ds2_network.png diff --git a/doc/design/ops/sequence_decoder.md b/doc/fluid/design/network/sequence_decoder.md similarity index 100% rename from doc/design/ops/sequence_decoder.md rename to doc/fluid/design/network/sequence_decoder.md diff --git a/doc/design/auto_gradient_check.md b/doc/fluid/design/others/auto_gradient_check.md similarity index 100% rename from doc/design/auto_gradient_check.md rename to doc/fluid/design/others/auto_gradient_check.md diff --git a/doc/design/dcgan.png b/doc/fluid/design/others/dcgan.png similarity index 100% rename from doc/design/dcgan.png rename to doc/fluid/design/others/dcgan.png diff --git a/doc/design/gan_api.md b/doc/fluid/design/others/gan_api.md similarity index 100% rename from doc/design/gan_api.md rename to doc/fluid/design/others/gan_api.md diff --git a/doc/design/graph.md b/doc/fluid/design/others/graph.md similarity index 100% rename from doc/design/graph.md rename to doc/fluid/design/others/graph.md diff --git a/doc/design/graph_survey.md b/doc/fluid/design/others/graph_survey.md similarity index 100% rename from doc/design/graph_survey.md rename to doc/fluid/design/others/graph_survey.md diff --git a/doc/design/images/graph_construction_example.bash b/doc/fluid/design/others/images/graph_construction_example.bash similarity index 100% rename from doc/design/images/graph_construction_example.bash rename to doc/fluid/design/others/images/graph_construction_example.bash diff --git a/doc/design/images/graph_construction_example.dot b/doc/fluid/design/others/images/graph_construction_example.dot similarity index 100% rename from doc/design/images/graph_construction_example.dot rename to doc/fluid/design/others/images/graph_construction_example.dot diff --git a/doc/design/images/graph_construction_example_all.png b/doc/fluid/design/others/images/graph_construction_example_all.png similarity index 100% rename from doc/design/images/graph_construction_example_all.png rename to doc/fluid/design/others/images/graph_construction_example_all.png diff --git a/doc/design/images/graph_construction_example_forward_backward.png b/doc/fluid/design/others/images/graph_construction_example_forward_backward.png similarity index 100% rename from doc/design/images/graph_construction_example_forward_backward.png rename to doc/fluid/design/others/images/graph_construction_example_forward_backward.png diff --git a/doc/design/images/graph_construction_example_forward_only.png b/doc/fluid/design/others/images/graph_construction_example_forward_only.png similarity index 100% rename from doc/design/images/graph_construction_example_forward_only.png rename to doc/fluid/design/others/images/graph_construction_example_forward_only.png diff --git a/doc/design/parameters_in_cpp.md b/doc/fluid/design/others/parameters_in_cpp.md similarity index 100% rename from doc/design/parameters_in_cpp.md rename to doc/fluid/design/others/parameters_in_cpp.md diff --git a/doc/design/simple_op_design.md b/doc/fluid/design/others/simple_op_design.md similarity index 100% rename from doc/design/simple_op_design.md rename to doc/fluid/design/others/simple_op_design.md diff --git a/doc/design/test.dot b/doc/fluid/design/others/test.dot similarity index 100% rename from doc/design/test.dot rename to doc/fluid/design/others/test.dot diff --git a/doc/design/test.dot.png b/doc/fluid/design/others/test.dot.png similarity index 100% rename from doc/design/test.dot.png rename to doc/fluid/design/others/test.dot.png diff --git a/doc/fluid/dev/api_doc_std_cn.md b/doc/fluid/dev/api_doc_std_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..5596b2653ae6ed9917f77dad08f926bcb1fb3419 --- /dev/null +++ b/doc/fluid/dev/api_doc_std_cn.md @@ -0,0 +1,220 @@ +# API注释撰写标准 + +- [API注释模块](#API注释模块) +- [格式及示例](#格式及示例) +- [完整示例](#完整示例) + + +## API注释模块 + +API文档须包含以下几个模块(排列顺序为文档撰写顺序): + +- Python API Definition + + API的代码定义。 + +- Function Description + + API的功能描述。描述该API的含义、作用或对输入所做的操作,及参考文献和对应链接(如果有),必要时给出公式,并解释公式中关键变量的含义。 + +- Args Description + + API参数介绍。按代码定义中的参数顺序逐个介绍,介绍内容包含数据类型、默认值(如果有)、含义等。 + +- Returns + + API返回值介绍。介绍返回值含义,必要时给出对应的形状。若返回值为包含多个参数的tuple,则按顺序逐个介绍各参数。 + +- Raises(如果有) + + 可能抛出的异常或错误及可能的产生原因,当可能抛出多种异常或错误时应分条列出。 + +- Note(如果有) + + 注意事项。当有多条注意事项时,应分条列出。 + +- Examples + + API的使用示例。 + + +## 格式及示例 + +API文档须使用reStructuredText格式撰写,该格式详情请参考[链接](http://sphinx-doc-zh.readthedocs.io/en/latest/rest.html)。API文档各模块的内容格式及示例如下(以下以fc为例进行说明): + +- Python API Definition + + - 格式: + + [Python API Definition] + + - 示例 + + ``` + fc(input, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + name=None, + main_program=None, + startup_program=None) + ``` + +- Function Description + + - 格式 + + 本模块应包含以下内容(排列顺序为文档撰写顺序): + + [Function Description] + + [Formula] + + [Symbols' Descriptions if necessary] + + [References if necessary] + + - 示例 + + [Function Description] + + ``` + **Fully Connected Layer** + + The fully connected layer can take multiple tensors as its inputs. It + creates a variable called weights for each input tensor, which represents + a fully connected weight matrix from each input unit to each output unit. + The fully connected layer multiplies each input tensor with its coresponding + weight to produce an output Tensor. If multiple input tensors are given, + the results of multiple multiplications will be sumed up. If bias_attr is + not None, a bias variable will be created and added to the output. Finally, + if activation is not None, it will be applied to the output as well. + ``` + + [Formula] + + ``` + This process can be formulated as follows: + + .. math:: + + Out = Act({\sum_{i=0}^{N-1}X_iW_i + b}) + ``` + + [Symbols' Descriptions if necessary] + + ``` + In the above equation: + + * :math:`N`: Number of the input. + * :math:`X_i`: The input tensor. + * :math:`W`: The weights created by this layer. + * :math:`b`: The bias parameter created by this layer (if needed). + * :math:`Act`: The activation function. + * :math:`Out`: The output tensor. + ``` + + [References if necessary] + + 因fc没有必要列出的参考文献,故该内容省略。其他情况下需明确给出对应的参考文献和对应连接,以 layer_norm 为例: + + ``` + Refer to `Layer Normalization `_ for more details. + ``` + + +- Args Description + + - 格式 + + \[Arg's Name\][(Data Type, Default Value)][Description] + + - 示例 + + fc的部分参数注释如下: + + ``` + Args: + input (Variable|list of Variable): The input tensor(s) of this layer, and the dimension of + the input tensor(s) is at least 2. + param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable + parameters/weights of this layer. + name (str, default None): The name of this layer. + ``` + +- Returns + + - 格式 + + [Name][Shape] + + - 示例 + + ``` + Returns: + A tensor variable storing the transformation result. + ``` + + 当返回值为包含多个参数的tuple时,应按顺序逐个介绍各参数,以dynamic_lstm为例: + + ``` + Returns: + A tuple containing: + The hidden state of LSTM whose shape is (T X D). + The cell state of LSTM whose shape is (T X D). + ``` + +- Raises + + - 格式 + + [Exception Type][Condition] + + - 示例 + + ``` + Raises: + ValueError: If the rank of the input is less than 2. + ``` + +- Note + + - 格式 + + [Note] + + - 示例 + + fc没有注意事项,故该模块省略不写。如有注意事项应明确给出,当有多条注意事项,须分条列出,以scaled\_dot\_product\_attention为例: + + ``` + Note: + 1. When num_heads > 1, three linear projections are learned respectively + to map input queries, keys and values into queries', keys' and values'. + queries', keys' and values' have the same shapes with queries, keys + and values. + 2. When num_heads == 1, scaled_dot_product_attention has no learnable + parameters. + ``` + +- Examples + + - 格式 + + \[Python Code Snipper] + + - 示例 + + ``` + Examples: + .. code-block:: python + + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, size=1000, act="tanh") + ``` + +## 完整示例 + +fc 的完整注释见[示例](src/fc.py)。 diff --git a/doc/design/ci_build_whl.png b/doc/fluid/dev/ci_build_whl.png similarity index 100% rename from doc/design/ci_build_whl.png rename to doc/fluid/dev/ci_build_whl.png diff --git a/doc/fluid/dev/index_cn.rst b/doc/fluid/dev/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..e1edf079fa0f85eb7f6709fd945fffae88625d01 --- /dev/null +++ b/doc/fluid/dev/index_cn.rst @@ -0,0 +1,2 @@ +开发标准 +------------ diff --git a/doc/fluid/dev/index_en.rst b/doc/fluid/dev/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..faf9dfcd315fddc4774c3717b41086fa6c6bf85a --- /dev/null +++ b/doc/fluid/dev/index_en.rst @@ -0,0 +1,4 @@ +Development +------------ + +This is Development page diff --git a/paddle/fluid/operators/op_documentation/name_convention.md b/doc/fluid/dev/name_convention.md similarity index 100% rename from paddle/fluid/operators/op_documentation/name_convention.md rename to doc/fluid/dev/name_convention.md diff --git a/paddle/fluid/operators/op_documentation/op_markdown_format.md b/doc/fluid/dev/op_markdown_format.md similarity index 100% rename from paddle/fluid/operators/op_documentation/op_markdown_format.md rename to doc/fluid/dev/op_markdown_format.md diff --git a/doc/design/releasing_process.md b/doc/fluid/dev/releasing_process.md similarity index 100% rename from doc/design/releasing_process.md rename to doc/fluid/dev/releasing_process.md diff --git a/doc/fluid/dev/src/fc.py b/doc/fluid/dev/src/fc.py new file mode 100644 index 0000000000000000000000000000000000000000..3b074821cc2276a29b2a8639e82199fcf4d72020 --- /dev/null +++ b/doc/fluid/dev/src/fc.py @@ -0,0 +1,81 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def fc(input, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + name=None): + """ + **Fully Connected Layer** + + The fully connected layer can take multiple tensors as its inputs. It + creates a variable called weights for each input tensor, which represents + a fully connected weight matrix from each input unit to each output unit. + The fully connected layer multiplies each input tensor with its coresponding + weight to produce an output Tensor. If multiple input tensors are given, + the results of multiple multiplications will be sumed up. If bias_attr is + not None, a bias variable will be created and added to the output. Finally, + if activation is not None, it will be applied to the output as well. + + This process can be formulated as follows: + + .. math:: + + Out = Act({\sum_{i=0}^{N-1}X_iW_i + b}) + + In the above equation: + + * :math:`N`: Number of the input. + * :math:`X_i`: The input tensor. + * :math:`W`: The weights created by this layer. + * :math:`b`: The bias parameter created by this layer (if needed). + * :math:`Act`: The activation function. + * :math:`Out`: The output tensor. + + Args: + input (Variable|list of Variable): The input tensor(s) of this layer, and the dimension of + the input tensor(s) is at least 2. + size(int): The number of output units in this layer. + num_flatten_dims (int, default 1): The fc layer can accept an input tensor with more than + two dimensions. If this happens, the multidimensional tensor will first be flattened + into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input + tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1) + dimensions will be flatten to form the first dimension of the final matrix (height of + the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to + form the second dimension of the final matrix (width of the matrix). For example, suppose + `X` is a 6-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3. + Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. + param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable + parameters/weights of this layer. + bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias + of this layer. If it is set to None, no bias will be added to the output units. + act (str, default None): Activation to be applied to the output of this layer. + name (str, default None): The name of this layer. + + Returns: + A tensor variable storing the transformation result. + + Raises: + ValueError: If rank of the input tensor is less than 2. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, size=1000, act="tanh") + """ diff --git a/doc/design/support_new_device.md b/doc/fluid/dev/support_new_device.md similarity index 100% rename from doc/design/support_new_device.md rename to doc/fluid/dev/support_new_device.md diff --git a/doc/fluid/faq/index_cn.rst b/doc/fluid/faq/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..395c1109891b5a00eab6f0b44d855658def7fdd6 --- /dev/null +++ b/doc/fluid/faq/index_cn.rst @@ -0,0 +1,2 @@ +FAQ +------------ diff --git a/doc/fluid/faq/index_en.rst b/doc/fluid/faq/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..395c1109891b5a00eab6f0b44d855658def7fdd6 --- /dev/null +++ b/doc/fluid/faq/index_en.rst @@ -0,0 +1,2 @@ +FAQ +------------ diff --git a/doc/design/reader/README.md b/doc/fluid/getstarted/concepts/reader/README.md similarity index 100% rename from doc/design/reader/README.md rename to doc/fluid/getstarted/concepts/reader/README.md diff --git a/doc/design/model_format.md b/doc/fluid/getstarted/concepts/save_model/model_format.md similarity index 100% rename from doc/design/model_format.md rename to doc/fluid/getstarted/concepts/save_model/model_format.md diff --git a/doc/fluid/getstarted/index_cn.rst b/doc/fluid/getstarted/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..c4d8525f23ee18cb7f41ab2f0d148fc1dcc852b2 --- /dev/null +++ b/doc/fluid/getstarted/index_cn.rst @@ -0,0 +1,4 @@ +新手入门 +------------ + +新手入门 diff --git a/doc/fluid/getstarted/index_en.rst b/doc/fluid/getstarted/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..a4efd05e2fd94ac0e2cbbc8603e6b0261b7e787f --- /dev/null +++ b/doc/fluid/getstarted/index_en.rst @@ -0,0 +1,4 @@ +GET STARTED +------------ + +This is get started page diff --git a/doc/fluid/howto/cluster/fluid_cluster_train_cn.md b/doc/fluid/howto/cluster/fluid_cluster_train_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..1b6f767869aaa800c122c8e7a06a1413e48e10e0 --- /dev/null +++ b/doc/fluid/howto/cluster/fluid_cluster_train_cn.md @@ -0,0 +1,145 @@ +# Fluid 分布式版本使用指南 +本篇文章将说明如何在PaddlePaddle Fluid版本下进行分布式训练的配置和执行,以及将单机训练脚本改造成支持集群训练的版本 + +## 准备工作 +* 可用的集群 + + 包含一个或多个计算节点的集群,每一个节点都能够执行PaddlePaddle的训练任务且拥有唯一的IP地址,集群内的所有计算节点可以通过网络相互通信。 +* 安装PaddlePaddle Fluid with Distribution版本 + + 所有的计算节点上均需要按照分布式版本的PaddlePaddle, 在用于GPU等设备的机器上还需要额外安装好相应的驱动程序和CUDA的库。 + + **注意:**当前对外提供的PaddlePaddle版本并不支持分布式,需要通过源码重新编译。编译和安装方法参见[编译和安装指南](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/index_en.html)。 + cmake编译命令中需要将WITH_DISTRIBUTE设置为ON,下面是一个cmake编译指令示例: +``` bash +cmake .. -DWITH_DOC=OFF -DWITH_GPU=OFF -DWITH_DISTRIBUTE=ON -DWITH_SWIG_PY=ON -DWITH_PYTHON=ON +``` + +## 更新训练脚本 +这里,我们以[Deep Learing 101](http://www.paddlepaddle.org/docs/develop/book/01.fit_a_line/index.html)课程中的第一章 fit a line 为例,描述如何将单机训练脚本改造成支持集群训练的版本。 +### 单机训练脚本示例 +```python +import paddle.v2 as paddle +import paddle.fluid as fluid + +x = fluid.layers.data(name='x', shape=[13], dtype='float32') +y_predict = fluid.layers.fc(input=x, size=1, act=None) +y = fluid.layers.data(name='y', shape=[1], dtype='float32') + +cost = fluid.layers.square_error_cost(input=y_predict, label=y) +avg_cost = fluid.layers.mean(x=cost) + +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +sgd_optimizer.minimize(avg_cost) + +BATCH_SIZE = 20 + +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=BATCH_SIZE) + +place = fluid.CPUPlace() +feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) +exe = fluid.Executor(place) + +exe.run(fluid.default_startup_program()) + +PASS_NUM = 100 +for pass_id in range(PASS_NUM): + fluid.io.save_persistables(exe, "./fit_a_line.model/") + fluid.io.load_persistables(exe, "./fit_a_line.model/") + for data in train_reader(): + avg_loss_value, = exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost]) + + if avg_loss_value[0] < 10.0: + exit(0) # if avg cost less than 10.0, we think our code is good. +exit(1) +``` + +我们创建了一个简单的全连接神经网络程序,并且通过Fluid的Executor执行了100次迭代,现在我们需要将该单机版本的程序更新为分布式版本的程序。 +### 介绍Parameter Server +在非分布式版本的训练脚本中,只存在Trainer一种角色,它不仅处理常规的计算任务,也处理参数相关的计算、保存和优化任务。在分布式版本的训练过程中,由于存在多个Trainer节点进行同样的数据计算任务,因此需要有一个中心化的节点来统一处理参数相关的保存和分配。在PaddlePaddle中,我们称这样的节点为[Parameter Server](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/dist_train/parameter_server.md) + +**因此,在分布式的Fluid环境中,我们有两个角色需要创建,分别是Parameter Server和Trainer。** + +### 分布式训练 +Fliud专门提供了工具[Distributed Transpiler](https://github.com/PaddlePaddle/Paddle/blob/ba65d54d9d3b41cd3c5171b00f476d4e60133ddb/doc/fluid/design/dist_train/distributed_architecture.md#distributed-transpiler)用于将单机版的训练程序转换为分布式版本的训练程序。工具背后的理念是找出程序的优化算子和梯度参数,将他们分隔为两部分,通过send/recv 操作算子进行连接,优化算子和梯度参数可以在优化器的minimize函数的返回值中获取到。 +```python +optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) +``` +将Distributed Transpiler、优化算子和梯度函数放在一个代码中如下: +```python +... #define the program, cost, and create sgd optimizer + +optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) #get optimize OPs and gradient parameters + +t = fluid.DistributeTranspiler() # create the transpiler instance +# slice the program into 2 pieces with optimizer_ops and gradient parameters list, as well as pserver_endpoints, which is a comma separated list of [IP:PORT] and number of trainers +t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) + +... #create executor + +# in pserver, run this +#current_endpoint here means current pserver IP:PORT you wish to run on +pserver_prog = t.get_pserver_program(current_endpoint) +pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) +exe.run(pserver_startup) +exe.run(pserver_prog) + +# in trainer, run this +... # define data reader +exe.run(fluid.default_startup_program()) +for pass_id in range(100): + for data in train_reader(): + exe.run(t.get_trainer_program()) +``` +### 分布式训练脚本运行说明 +分布式任务的运行需要将表格中说明的多个参数进行赋值: + +| 参数名 | 值类型 | 说明 | 示例 | +|:-------------|:------|:---------------------------------------|:-------------| +| trainer_id | int | 当前训练节点的ID,训练节点ID编号为0 - n-1, n为trainers的值 | 0/1/2/3 | +| pservers | str | parameter server 列表 | 127.0.0.1:6710,127.0.0.1:6711 | +| trainers | int | 训练节点的总个数,>0的数字 | 4 | +| server_endpoint | str | 当前所起的服务节点的IP:PORT | 127.0.0.1:8789 | +| training_role | str | 节点角色, TRAINER/PSERVER | PSERVER | + +**注意:** ```training_role```是用来区分当前所起服务的角色的,用于训练程序中,用户可根据需要自行定义,其他参数为fluid.DistributeTranspiler的transpile函数所需要,需要在调用函数前进行定义,样例如下: + +```python +t = fluid.DistributeTranspiler() +t.transpile( + optimize_ops, + params_grads, + trainer_id, + pservers=pserver, + trainers=trainers) +if training_role == "PSERVER": + pserver_prog = t.get_pserver_program(server_endpoint) + pserver_startup = t.get_startup_program(server_endpoint, pserver_prog) +``` + +### Demo +完整的demo代码位于Fluid的test目录下的[book](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_fit_a_line.py)中。 + +第一步,进入demo代码所在目录: +```bash +cd /paddle/python/paddle/fluid/tests/book +``` + +第二步,启动Parameter Server: +```bash +PADDLE_INIT_PORT=6174 PADDLE_INIT_PSERVERS=192.168.1.2 TRAINERS=2 POD_IP=192.168.1.2 PADDLE_INIT_TRAINER_ID=1 TRAINING_ROLE=PSERVER python test_fit_a_line.py +``` +执行命令后请等待出现提示: ```Server listening on 192.168.1.2:6174 ```, 表示Paramter Server已经正常启动。 + +第三步,启动Trainer: +```bash +PADDLE_INIT_PORT=6174 PADDLE_INIT_PSERVERS=192.168.1.3 TRAINERS=2 POD_IP=192.168.1.3 PADDLE_INIT_TRAINER_ID=1 TRAINING_ROLE=TRAINER python test_fit_a_line.py +``` +由于我们定义的Trainer的数量是2个,因此需要在另外一个计算节点上再启动一个Trainer。 + +现在我们就启动了一个包含一个Parameter Server和两个Trainer的分布式训练任务。 diff --git a/doc/fluid/howto/index_cn.rst b/doc/fluid/howto/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a92abad0c56a4fd821f9a6b9f4f5909504c8aaf1 --- /dev/null +++ b/doc/fluid/howto/index_cn.rst @@ -0,0 +1,2 @@ +进阶使用 +------------ diff --git a/doc/fluid/howto/index_en.rst b/doc/fluid/howto/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..06036bdce554a96443ea1fa47c15f7670ea6089d --- /dev/null +++ b/doc/fluid/howto/index_en.rst @@ -0,0 +1,4 @@ +HOW TO +------------ + +This is how to page diff --git a/doc/design/error_clip.md b/doc/fluid/howto/performance/error_clip.md similarity index 100% rename from doc/design/error_clip.md rename to doc/fluid/howto/performance/error_clip.md diff --git a/doc/design/images/profiler.png b/doc/fluid/howto/performance/images/profiler.png similarity index 100% rename from doc/design/images/profiler.png rename to doc/fluid/howto/performance/images/profiler.png diff --git a/doc/design/profiler.md b/doc/fluid/howto/performance/profiler.md similarity index 100% rename from doc/design/profiler.md rename to doc/fluid/howto/performance/profiler.md diff --git a/doc/design/images/multigpu_allreduce.graffle b/doc/fluid/howto/third_party/images/multigpu_allreduce.graffle similarity index 100% rename from doc/design/images/multigpu_allreduce.graffle rename to doc/fluid/howto/third_party/images/multigpu_allreduce.graffle diff --git a/doc/design/images/multigpu_allreduce.png b/doc/fluid/howto/third_party/images/multigpu_allreduce.png similarity index 100% rename from doc/design/images/multigpu_allreduce.png rename to doc/fluid/howto/third_party/images/multigpu_allreduce.png diff --git a/doc/design/images/multigpu_before_convert.graffle b/doc/fluid/howto/third_party/images/multigpu_before_convert.graffle similarity index 100% rename from doc/design/images/multigpu_before_convert.graffle rename to doc/fluid/howto/third_party/images/multigpu_before_convert.graffle diff --git a/doc/design/images/multigpu_before_convert.png b/doc/fluid/howto/third_party/images/multigpu_before_convert.png similarity index 100% rename from doc/design/images/multigpu_before_convert.png rename to doc/fluid/howto/third_party/images/multigpu_before_convert.png diff --git a/doc/design/mkl/mkldnn_fluid.md b/doc/fluid/howto/third_party/mkldnn_fluid.md similarity index 100% rename from doc/design/mkl/mkldnn_fluid.md rename to doc/fluid/howto/third_party/mkldnn_fluid.md diff --git a/doc/design/paddle_nccl.md b/doc/fluid/howto/third_party/paddle_nccl.md similarity index 100% rename from doc/design/paddle_nccl.md rename to doc/fluid/howto/third_party/paddle_nccl.md diff --git a/doc/fluid/index_cn.rst b/doc/fluid/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..be3bed4393a7346d4f2a53e2c7409ee7165fb5b6 --- /dev/null +++ b/doc/fluid/index_cn.rst @@ -0,0 +1,12 @@ + PaddlePaddle Fluid +========================== + +.. toctree:: + :maxdepth: 1 + + getstarted/index_cn.rst + design/index_cn.rst + build_and_install/index_cn.rst + howto/index_cn.rst + dev/index_cn.rst + faq/index_cn.rst diff --git a/doc/fluid/index_en.rst b/doc/fluid/index_en.rst new file mode 100644 index 0000000000000000000000000000000000000000..87c831420a57b4b9ce77ecf44f7f4d0feec833a6 --- /dev/null +++ b/doc/fluid/index_en.rst @@ -0,0 +1,12 @@ + PaddlePaddle Fluid +========================== + +.. toctree:: + :maxdepth: 1 + + getstarted/index_en.rst + design/index_en.rst + build_and_install/index_en.rst + howto/index_en.rst + dev/index_en.rst + faq/index_en.rst diff --git a/doc/v2/api/fluid/optimizer.rst b/doc/v2/api/fluid/optimizer.rst index 9b165f870459b4f9ef2efe24f5604a3fcb96f7f3..2f820595c35c2bccd6a5c8a20c60d796c04c8e97 100644 --- a/doc/v2/api/fluid/optimizer.rst +++ b/doc/v2/api/fluid/optimizer.rst @@ -47,3 +47,10 @@ DecayedAdagrad :members: :noindex: +Adadelta +-------------- + +.. autoclass:: paddle.fluid.optimizer.AdadeltaOptimizer + :members: + :noindex: + diff --git a/doc/design/cluster_train/README.md b/doc/v2/design/cluster_train/README.md similarity index 100% rename from doc/design/cluster_train/README.md rename to doc/v2/design/cluster_train/README.md diff --git a/doc/design/cluster_train/checkpointing.md b/doc/v2/design/cluster_train/checkpointing.md similarity index 100% rename from doc/design/cluster_train/checkpointing.md rename to doc/v2/design/cluster_train/checkpointing.md diff --git a/doc/design/cluster_train/data_dispatch.md b/doc/v2/design/cluster_train/data_dispatch.md similarity index 100% rename from doc/design/cluster_train/data_dispatch.md rename to doc/v2/design/cluster_train/data_dispatch.md diff --git a/doc/design/cluster_train/large_model_dist_train.md b/doc/v2/design/cluster_train/large_model_dist_train.md similarity index 100% rename from doc/design/cluster_train/large_model_dist_train.md rename to doc/v2/design/cluster_train/large_model_dist_train.md diff --git a/doc/design/cluster_train/master_server.md b/doc/v2/design/cluster_train/master_server.md similarity index 100% rename from doc/design/cluster_train/master_server.md rename to doc/v2/design/cluster_train/master_server.md diff --git a/doc/design/cluster_train/pserver_client.md b/doc/v2/design/cluster_train/pserver_client.md similarity index 100% rename from doc/design/cluster_train/pserver_client.md rename to doc/v2/design/cluster_train/pserver_client.md diff --git a/doc/design/cluster_train/remote_parameter_updater.md b/doc/v2/design/cluster_train/remote_parameter_updater.md similarity index 100% rename from doc/design/cluster_train/remote_parameter_updater.md rename to doc/v2/design/cluster_train/remote_parameter_updater.md diff --git a/doc/design/cluster_train/save_model.md b/doc/v2/design/cluster_train/save_model.md similarity index 100% rename from doc/design/cluster_train/save_model.md rename to doc/v2/design/cluster_train/save_model.md diff --git a/doc/design/cluster_train/src/checkpointing.png b/doc/v2/design/cluster_train/src/checkpointing.png similarity index 100% rename from doc/design/cluster_train/src/checkpointing.png rename to doc/v2/design/cluster_train/src/checkpointing.png diff --git a/doc/design/cluster_train/src/data_dispatch.png b/doc/v2/design/cluster_train/src/data_dispatch.png similarity index 100% rename from doc/design/cluster_train/src/data_dispatch.png rename to doc/v2/design/cluster_train/src/data_dispatch.png diff --git a/doc/design/cluster_train/src/dataset.graffle b/doc/v2/design/cluster_train/src/dataset.graffle similarity index 100% rename from doc/design/cluster_train/src/dataset.graffle rename to doc/v2/design/cluster_train/src/dataset.graffle diff --git a/doc/design/cluster_train/src/dataset.png b/doc/v2/design/cluster_train/src/dataset.png similarity index 100% rename from doc/design/cluster_train/src/dataset.png rename to doc/v2/design/cluster_train/src/dataset.png diff --git a/doc/design/cluster_train/src/file_storage.graffle b/doc/v2/design/cluster_train/src/file_storage.graffle similarity index 100% rename from doc/design/cluster_train/src/file_storage.graffle rename to doc/v2/design/cluster_train/src/file_storage.graffle diff --git a/doc/design/cluster_train/src/file_storage.png b/doc/v2/design/cluster_train/src/file_storage.png similarity index 100% rename from doc/design/cluster_train/src/file_storage.png rename to doc/v2/design/cluster_train/src/file_storage.png diff --git a/doc/design/cluster_train/src/init_lock.graffle b/doc/v2/design/cluster_train/src/init_lock.graffle similarity index 100% rename from doc/design/cluster_train/src/init_lock.graffle rename to doc/v2/design/cluster_train/src/init_lock.graffle diff --git a/doc/design/cluster_train/src/init_lock.png b/doc/v2/design/cluster_train/src/init_lock.png similarity index 100% rename from doc/design/cluster_train/src/init_lock.png rename to doc/v2/design/cluster_train/src/init_lock.png diff --git a/doc/design/cluster_train/src/paddle-cloud-in-data-center.png b/doc/v2/design/cluster_train/src/paddle-cloud-in-data-center.png similarity index 100% rename from doc/design/cluster_train/src/paddle-cloud-in-data-center.png rename to doc/v2/design/cluster_train/src/paddle-cloud-in-data-center.png diff --git a/doc/design/cluster_train/src/paddle-etcd.graffle b/doc/v2/design/cluster_train/src/paddle-etcd.graffle similarity index 100% rename from doc/design/cluster_train/src/paddle-etcd.graffle rename to doc/v2/design/cluster_train/src/paddle-etcd.graffle diff --git a/doc/design/cluster_train/src/paddle-etcd.png b/doc/v2/design/cluster_train/src/paddle-etcd.png similarity index 100% rename from doc/design/cluster_train/src/paddle-etcd.png rename to doc/v2/design/cluster_train/src/paddle-etcd.png diff --git a/doc/design/cluster_train/src/paddle-model-sharding.graffle b/doc/v2/design/cluster_train/src/paddle-model-sharding.graffle similarity index 100% rename from doc/design/cluster_train/src/paddle-model-sharding.graffle rename to doc/v2/design/cluster_train/src/paddle-model-sharding.graffle diff --git a/doc/design/cluster_train/src/paddle-model-sharding.png b/doc/v2/design/cluster_train/src/paddle-model-sharding.png similarity index 100% rename from doc/design/cluster_train/src/paddle-model-sharding.png rename to doc/v2/design/cluster_train/src/paddle-model-sharding.png diff --git a/doc/design/cluster_train/src/paddle-ps-0.png b/doc/v2/design/cluster_train/src/paddle-ps-0.png similarity index 100% rename from doc/design/cluster_train/src/paddle-ps-0.png rename to doc/v2/design/cluster_train/src/paddle-ps-0.png diff --git a/doc/design/cluster_train/src/paddle-ps-1.png b/doc/v2/design/cluster_train/src/paddle-ps-1.png similarity index 100% rename from doc/design/cluster_train/src/paddle-ps-1.png rename to doc/v2/design/cluster_train/src/paddle-ps-1.png diff --git a/doc/design/cluster_train/src/paddle-ps.graffle b/doc/v2/design/cluster_train/src/paddle-ps.graffle similarity index 100% rename from doc/design/cluster_train/src/paddle-ps.graffle rename to doc/v2/design/cluster_train/src/paddle-ps.graffle diff --git a/doc/design/cluster_train/src/paddle-task-queues.graffle b/doc/v2/design/cluster_train/src/paddle-task-queues.graffle similarity index 100% rename from doc/design/cluster_train/src/paddle-task-queues.graffle rename to doc/v2/design/cluster_train/src/paddle-task-queues.graffle diff --git a/doc/design/cluster_train/src/paddle-task-queues.png b/doc/v2/design/cluster_train/src/paddle-task-queues.png similarity index 100% rename from doc/design/cluster_train/src/paddle-task-queues.png rename to doc/v2/design/cluster_train/src/paddle-task-queues.png diff --git a/doc/design/cluster_train/src/paddle-task-states.graffle b/doc/v2/design/cluster_train/src/paddle-task-states.graffle similarity index 100% rename from doc/design/cluster_train/src/paddle-task-states.graffle rename to doc/v2/design/cluster_train/src/paddle-task-states.graffle diff --git a/doc/design/cluster_train/src/paddle-task-states.png b/doc/v2/design/cluster_train/src/paddle-task-states.png similarity index 100% rename from doc/design/cluster_train/src/paddle-task-states.png rename to doc/v2/design/cluster_train/src/paddle-task-states.png diff --git a/doc/design/cluster_train/src/pserver_init.graffle b/doc/v2/design/cluster_train/src/pserver_init.graffle similarity index 100% rename from doc/design/cluster_train/src/pserver_init.graffle rename to doc/v2/design/cluster_train/src/pserver_init.graffle diff --git a/doc/design/cluster_train/src/pserver_init.png b/doc/v2/design/cluster_train/src/pserver_init.png similarity index 100% rename from doc/design/cluster_train/src/pserver_init.png rename to doc/v2/design/cluster_train/src/pserver_init.png diff --git a/doc/design/cluster_train/src/submit-job.graffle b/doc/v2/design/cluster_train/src/submit-job.graffle similarity index 100% rename from doc/design/cluster_train/src/submit-job.graffle rename to doc/v2/design/cluster_train/src/submit-job.graffle diff --git a/doc/design/cluster_train/src/submit-job.png b/doc/v2/design/cluster_train/src/submit-job.png similarity index 100% rename from doc/design/cluster_train/src/submit-job.png rename to doc/v2/design/cluster_train/src/submit-job.png diff --git a/doc/design/cluster_train/src/trainer.graffle b/doc/v2/design/cluster_train/src/trainer.graffle similarity index 100% rename from doc/design/cluster_train/src/trainer.graffle rename to doc/v2/design/cluster_train/src/trainer.graffle diff --git a/doc/design/cluster_train/src/trainer.png b/doc/v2/design/cluster_train/src/trainer.png similarity index 100% rename from doc/design/cluster_train/src/trainer.png rename to doc/v2/design/cluster_train/src/trainer.png diff --git a/doc/design/cluster_train/submit-job.md b/doc/v2/design/cluster_train/submit-job.md similarity index 100% rename from doc/design/cluster_train/submit-job.md rename to doc/v2/design/cluster_train/submit-job.md diff --git a/doc/design/mkl/image/engine.png b/doc/v2/design/mkl/image/engine.png similarity index 100% rename from doc/design/mkl/image/engine.png rename to doc/v2/design/mkl/image/engine.png diff --git a/doc/design/mkl/image/gradients.png b/doc/v2/design/mkl/image/gradients.png similarity index 100% rename from doc/design/mkl/image/gradients.png rename to doc/v2/design/mkl/image/gradients.png diff --git a/doc/design/mkl/image/layers.png b/doc/v2/design/mkl/image/layers.png similarity index 100% rename from doc/design/mkl/image/layers.png rename to doc/v2/design/mkl/image/layers.png diff --git a/doc/design/mkl/image/matrix.png b/doc/v2/design/mkl/image/matrix.png similarity index 100% rename from doc/design/mkl/image/matrix.png rename to doc/v2/design/mkl/image/matrix.png diff --git a/doc/design/mkl/image/overview.png b/doc/v2/design/mkl/image/overview.png similarity index 100% rename from doc/design/mkl/image/overview.png rename to doc/v2/design/mkl/image/overview.png diff --git a/doc/design/mkl/mkl_packed.md b/doc/v2/design/mkl/mkl_packed.md similarity index 100% rename from doc/design/mkl/mkl_packed.md rename to doc/v2/design/mkl/mkl_packed.md diff --git a/doc/design/mkl/mkldnn.md b/doc/v2/design/mkl/mkldnn.md similarity index 100% rename from doc/design/mkl/mkldnn.md rename to doc/v2/design/mkl/mkldnn.md diff --git a/doc/v2/dev/new_layer_cn.rst b/doc/v2/dev/new_layer_cn.rst index 0ded1c262adad44f4df000ef2933c7b68050f2fc..3115654b2bd87995fa63bb7828fd1b3039aea8cc 100644 --- a/doc/v2/dev/new_layer_cn.rst +++ b/doc/v2/dev/new_layer_cn.rst @@ -16,7 +16,7 @@ 下图是一个全连接层的示意图。在全连接层中,每个输出节点都连接到所有的输入节点上。 -.. image:: FullyConnected.jpg +.. image:: src/FullyConnected.jpg :align: center :scale: 60 % diff --git a/doc/v2/dev/new_layer_en.rst b/doc/v2/dev/new_layer_en.rst index 110a9fb38f890a766bb4480e91feb22d3b0838a5..b05bb45f11eb253dfb87d6283c29ec6689394d22 100644 --- a/doc/v2/dev/new_layer_en.rst +++ b/doc/v2/dev/new_layer_en.rst @@ -16,7 +16,7 @@ First we need to derive equations of the *forward* and *backward* part of the la The illustration of a fully connected layer is shown in the following figure. In a fully connected layer, all output nodes are connected to all the input nodes. -.. image:: FullyConnected.jpg +.. image:: src/FullyConnected.jpg :align: center :scale: 60 % diff --git a/doc/v2/dev/FullyConnected.jpg b/doc/v2/dev/src/FullyConnected.jpg similarity index 100% rename from doc/v2/dev/FullyConnected.jpg rename to doc/v2/dev/src/FullyConnected.jpg diff --git a/doc/v2/dev/src/doc_en.png b/doc/v2/dev/src/doc_en.png new file mode 100644 index 0000000000000000000000000000000000000000..ed6b9178fba91a3bdf45ae797a9924f84146fbc8 Binary files /dev/null and b/doc/v2/dev/src/doc_en.png differ diff --git a/doc/v2/dev/write_docs_cn.rst b/doc/v2/dev/write_docs_cn.rst index f79769b810b91c6984016d95f40b89186bfb61b0..23615f8830e99633676c83ec5d28139a732c623c 100644 --- a/doc/v2/dev/write_docs_cn.rst +++ b/doc/v2/dev/write_docs_cn.rst @@ -2,20 +2,20 @@ 如何贡献文档 ############# -PaddlePaddle的文档包括英文文档 ``doc`` 和中文文档 ``doc_cn`` 两个部分。文档都是通过 `cmake`_ 驱动 `sphinx`_ 编译生成,生成后的文档分别存储在编译目录的 ``doc`` 和 ``doc_cn`` 两个子目录下。 -也可以利用PaddlePaddle 工具来编译文档,这个情况下所有的文件会存在整理过的的文件目录 .ppo_workspace/content 下 +PaddlePaddle的文档包括中英文两个部分。文档都是通过 ``cmake`` 驱动 ``sphinx`` 编译生成的,PaddlePaddle.org工具可以帮助我们实现这一编译过程,并提供更好的预览效果。 如何构建文档 ============ -PaddlePaddle的文档构建有三种方式。 +PaddlePaddle的文档构建有两种方式,分别为使用paddlepaddle.org工具和不使用paddlepaddle.org工具,两种方式都有各自的优点,前者方便预览,后者方便开发者进行调试。这两种方式中又分别有使用docker和不使用docker的两种构建方法。 +我们建议使用PaddlePaddle.org工具来构建文档。 使用PaddlePaddle.org工具 --------------- -这个是目前推荐的使用方法。除了可以自动编译文档,也可以直接在网页预览文档。 +------------------------ +这个是目前推荐的使用方法。除了可以自动编译文档,还可以直接在网页中预览文档,需要注意的是,采用后续说明的其它方式虽然也可以预览文档,但是文档的样式与官网文档是不一致的,使用PaddlePaddle.org工具进行编译才能产生与官网文档样式一致的预览效果。 -文件工具是使用Docker,需要在系统里先安装好Docker工具包。Docker安装请参考Docker的官网。安装好Docker之后及可用以下命令启动工具 +PaddlePaddle.org工具可以配合Docker使用,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 `_ 。安装好Docker之后即可用以下命令启动工具 .. code-block:: bash @@ -32,10 +32,10 @@ PaddlePaddle的文档构建有三种方式。 docker run -it -p 8000:8000 -v `pwd`:/var/content paddlepaddle/paddlepaddle.org:latest 注意: PaddlePaddle.org 会在 -v (volume) 指定的内容存储库运行命令 -之后再用网页连到http://localhost:8000就可以在网页上生成需要的文档 +之后再用网页连到 http://localhost:8000 就可以在网页上生成需要的文档 编译后的文件将被存储在工作目录 /.ppo_workspace/content。 -如果不想使用 Docker,你还可以通过运行Django框架直接激活工具的服务器。使用下面的命令来运行它。 +如果不想使用Docker,你还可以通过运行Django框架直接激活工具的服务器。使用下面的命令来运行它。 .. code-block:: bash @@ -57,42 +57,51 @@ PaddlePaddle的文档构建有三种方式。 python manage.py runserver 工具服务器将读取环境变量 CONTENT_DIR 搜索代码库。请指定的PaddlePaddle工作目录给环境变量 CONTENT_DIR。 -之后再用网页连到http://localhost:8000就可以在网页上生成需要的文档。 +之后再用网页连到 http://localhost:8000 就可以在网页上生成需要的文档。 编译后的文件将被存储在工作目录 /.ppo_workspace/content。 想了解更多PaddlePaddle.org工具的详细信息,可以 `点击这里 `_ 。 -使用Docker构建 --------------- +不使用PaddlePaddle.org工具 +-------------------------- 使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 `_ 。安装好Docker之后可以使用源码目录下的脚本构建文档,即 -.. code-block:: bash +[TBD] - cd TO_YOUR_PADDLE_CLONE_PATH - cd paddle/scripts/tools/build_docs - sh build_docs.sh +如果不想使用Docker,也可以使用以下命令直接构建PaddlePaddle文档,即 -编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。 -打开浏览器访问对应目录下的index.html即可访问本地文档。 +.. code-block:: bash -直接构建 --------- + mkdir paddle + cd paddle + git clone https://github.com/PaddlePaddle/Paddle.git + mkdir -p build + cd build + cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON -如果提示正确,可以执行以下命令编译生成文档,即 + # 如果只需要构建使用文档,则执行以下命令 + make -j $processors gen_proto_py + make -j $processors paddle_docs paddle_docs_cn -.. code-block:: bash + # 如果只需要构建API,则执行以下命令 + make -j $processors gen_proto_py framework_py_proto + make -j $processors copy_paddle_pybind + make -j $processors paddle_api_docs + +其中$processors代表启动和CPU核一样多的进程来并行编译,可以根据本机的CPU核数设置相应的值。 + +编译完成后,进入 ``doc/v2`` 目录,如果选择构建文档则会在该目录下生成 ``cn/html/`` 、 ``en/html`` 两个子目录,选择构建API则会生成 ``api/en/html`` 目录,分别进入这些目录下,执行以下命令: + +.. code-block:: bash - cd TO_YOUR_PADDLE_CLONE_PATH - mkdir -p build - cd build - cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON - make gen_proto_py - make paddle_docs paddle_docs_cn + python -m SimpleHTTPServer 8088 -编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。 -打开浏览器访问对应目录下的index.html即可访问本地文档。 +在浏览器中输入 http://localhost:8088 就可以看到编译生成的中/英文的文档页面和英文的API页面,下图为生成的英文文档首页示例。注意,示例中由于使用了sphinx的原始主题,所以页面的风格与官网并不一致,但这并不影响开发者进行调试。 +.. image:: src/doc_en.png + :align: center + :scale: 60 % 如何书写文档 ============ @@ -102,7 +111,7 @@ PaddlePaddle文档使用 `sphinx`_ 自动生成,用户可以参考sphinx教程 如何更新www.paddlepaddle.org ============================ -更新的文档以PR的形式提交到github中,提交方式参见 `贡献文档 `_ 。 +更新的文档以PR的形式提交到github中,提交方式参见 `如何贡献文档 `_ 。 目前PaddlePaddle的develop分支的文档是自动触发更新的,用户可以分别查看最新的 `中文文档 `_ 和 `英文文档 `_ 。 diff --git a/doc/v2/dev/write_docs_en.rst b/doc/v2/dev/write_docs_en.rst index f3408a84269aaeef19986c220454555fbbe30e23..15ff0d34ad622f100fe98d8738b830e47c35b41b 100644 --- a/doc/v2/dev/write_docs_en.rst +++ b/doc/v2/dev/write_docs_en.rst @@ -2,21 +2,20 @@ Contribute Documentation ######################## -PaddlePaddle supports English documentation ``doc`` and Chinese documentation ``doc_cn``. -Both are compiled by `cmake`_ and `sphinx`_ , the compiled documentations will be stored under ``doc`` and ``doc_cn`` directories. -When using the PaddlePaddle.org to compile documentations, the compiled documentations will be stored under a consolidated directory: .ppo_workspace/content +PaddlePaddle's documentation includes both Chinese and English versions. The documentation is built using the ``cmake`` command to drive the ``sphinx`` compiler. The PaddlePaddle.org tool helps us to implement this compilation process and provides better preview results. -How to Build Documentations -============ +How to build Documentation +=========================== -We recommend using PaddlePaddle.org tool to build documentation +PaddlePaddle's documentation is built in two ways: using the PaddlePaddle.org tool and without using it. Both methods have their own advantages. The former facilitates previewing, while the latter facilitates debugging by the developer. We could choose to build the documentation with Docker or without it in each of the above ways. +We recommend using PaddlePaddle.org tool to build documentation. -Use PaddlePaddle.org tool --------------- -This is the recommended method to build documentation. It can compile documentation and preview the documentation in a web browser. +Using PaddlePaddle.org tool +----------------------------- +This is the recommended method to build documentation, because it can automatically compile the documentation and preview the documentation directly in a web page. Note that, although you can preview the documentation in other ways, its style may not be consistent with the official website. Compiling with the PaddlePaddle.org tool produces a preview that will be consistent with the official website documentation style. -The tool uses Docker, please install it on your system. Please check Docker official website on how to install Docker. You may use the following commands to activate the tool +The PaddlePaddle.org tool can be used with Docker and Docker needs to be installed first. Please refer to `Docker's official website `_ on how to install Docker. After installing Docker, you may use the following commands to activate the tool .. code-block:: bash @@ -32,8 +31,8 @@ The tool uses Docker, please install it on your system. Please check Docker offi # Please specify the working directory through -v docker run -it -p 8000:8000 -v `pwd`:/var/content paddlepaddle/paddlepaddle.org:latest -Note: PaddlePaddle.org will read the content repos specified in the -v (volume) flag of the docker run command -Use a web browser and navigate to http://localhost:8000, click the buttons to compile the documentation +Note: PaddlePaddle.org will read the content repos specified in the -v (volume) flag of the docker run commands +Use a web browser and navigate to http://localhost:8000. Click the buttons to compile the documentation. The compiled documentations will be stored in /.ppo_workspace/content @@ -58,19 +57,62 @@ If you don't wish to use Docker, you can also activate the tool through Django. pip install -r requirements.txt python manage.py runserver -Use a web browser and navigate to http://localhost:8000, click the buttons to compile the documentation +Specify the PaddlePaddle working directory for the environment variable CONTENT_DIR so that the tool could find where the working directory is. + +Use a web browser and navigate to http://localhost:8000. Click the buttons to compile the documentation The compiled documentations will be stored in /.ppo_workspace/content -If you want to learn more on the PaddlePaddle.org, please `click here `_ 。 +Please `click here `_ for more information about the PaddlePaddle.org tool. + + +Manually Building the Documentation +------------------------------------- + +Build PaddlePaddle's documentation with Docker,you need to install Docker first. Please refer to `Docker's official website `_ on how to install Docker. After Docker is installed, you could use the scripts in the source directory to build the documentation. + +[TBD] + +If you do not wish to use Docker, you can also use the following commands to directly build the PaddlePaddle documentation. + +.. code-block:: bash + + mkdir paddle + cd paddle + git clone https://github.com/PaddlePaddle/Paddle.git + mkdir -p build + cd build + cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON + + # If you only need to build documents, use the following commands + make -j $processors gen_proto_py + make -j $processors paddle_docs paddle_docs_cn + + # If you only need to build APIs, use the following commands + make -j $processors gen_proto_py framework_py_proto + make -j $processors copy_paddle_pybind + make -j $processors paddle_api_docs + +$processors indicates that as many processes as the CPU cores are started to compile in parallel. It should be set according to the number of CPU cores of your machine. + +After the compilation is complete, enter the ``doc/v2`` directory. If you chose to build documents, it will generate ``cn/html/`` and ``en/html`` subdirectories under this directory. If you chose to build APIs,it will generate``api/en/html`` subdirectory. Please enter these directories respectively and execute the following commands: + +.. code-block:: bash + + python -m SimpleHTTPServer 8088 + +Use a web browser and navigate to http://localhost:8000, you could see the compiled Chinese/English documents page and the English APIs page. The following figure is an example of the built English documents home page. Note that due to the sphinx's original theme used in the example, the style of the page is not consistent with the official website, but this does not affect the developer's debugging. -How to write Documentations -============ +.. image:: src/doc_en.png + :align: center + :scale: 60 % -PaddlePaddle uses `sphinx`_ to compile documentations,Please check sphinx official website for more detail. +How to write Documentation +=========================== +PaddlePaddle uses `sphinx`_ to compile documentation,Please check sphinx official website for more detail. How to update www.paddlepaddle.org -============================ +=================================== Please create PRs and submit them to github, please check `Contribute Code `_ 。 PaddlePaddle develop branch will update the documentation once the PR is merged. User may check latest `Chinese Docs `_ and diff --git a/doc/v2/faq/index_en.rst b/doc/v2/faq/index_en.rst index 57df868f760038b25fae30df7ab20a68875ad36a..3fa220792b252617848a1c76bc2be49928e35f64 100644 --- a/doc/v2/faq/index_en.rst +++ b/doc/v2/faq/index_en.rst @@ -1,7 +1,8 @@ FAQ ==== - +This document provides answers to some of the frequently asked questions about PaddlePaddle. If you have a question that is not covered here, please go to `PaddlePaddle Community `_ , to find an answer or submit new `issue `_ , we will reply in time. + .. toctree:: :maxdepth: 1 diff --git a/doc/v2/getstarted/index_en.rst b/doc/v2/getstarted/index_en.rst index 33f299be5680e0aa4a3f36638f51135503193d94..94b306895c9ddf6140cf600131930a6675a583eb 100644 --- a/doc/v2/getstarted/index_en.rst +++ b/doc/v2/getstarted/index_en.rst @@ -1,8 +1,19 @@ GET STARTED ============ +If you want to quickly know how to use PaddlePaddle, please refer to the following guide: + .. toctree:: :maxdepth: 1 quickstart_en.rst + + +While using PaddlePaddle to build applications, please understand some basic concepts. + +Here is an example of linear regression. It introduces workflow of PaddlePaddle, including data format, model configuration and training, etc. + +.. toctree:: + :maxdepth: 1 + concepts/use_concepts_en.rst diff --git a/doc/v2/howto/capi/index_en.rst b/doc/v2/howto/capi/index_en.rst index 2cbbe362fd8e06abe9866d998f60fbb3458a80b5..4ec39c9d5223442cf6872edaf7befeb5053b538e 100644 --- a/doc/v2/howto/capi/index_en.rst +++ b/doc/v2/howto/capi/index_en.rst @@ -1,6 +1,23 @@ -C-API Prediction Library +C-API Inference Library ======================== +After we train a neural network, we use it to do inference. Inference is the process of preparing input data and propagating it through the model to produce the result. + +Compared with model training, prediction has the following features: + +#. Inference does not require backpropagation and parameter updates, as required during training. +#. Labels are not needed in prediction. +#. Most of the time, predictions need to be integrated with the user system. + +Therefore, the model prediction SDK needs to be designed separately and has the following features: + +#. The predictive SDK does not include backpropagation and parameter updates to reduce the size of the SDK. +#. The predictive SDK needs a simple user interface for ease of use. +#. Since the input data may have a variety of structures, the format of the input data is clearly and compactly packaged. +#. In order to be compatible with user's system, the SDK's interface must conform to the C-standard interface. + +PaddlePaddle provides C-API to solve the above problem. Following are the guidelines to use the C-API: + .. toctree:: :maxdepth: 1 diff --git a/doc/v2/howto/cluster/index_en.rst b/doc/v2/howto/cluster/index_en.rst index 2640a09dcc904619bc97c9bd3f3d81a9dc307663..31eda57c4fb3947d92df45ea8dbb9274c9814140 100644 --- a/doc/v2/howto/cluster/index_en.rst +++ b/doc/v2/howto/cluster/index_en.rst @@ -1,7 +1,9 @@ Distributed Training ==================== -In this section, we'll explain how to run distributed training jobs with PaddlePaddle on different types of clusters. The diagram below shows the main architecture of a distributed trainning job: +The effectiveness of the deep learning model is often directly related to the scale of the data: it can generally achieve better results after increasing the size of the dataset on the same model. However, it can not fit in one single computer when the amount of data increases to a certain extent. At this point, using multiple computers for distributed training is a natural solution. In distributed training, the training data is divided into multiple copies (sharding), and multiple machines participating in the training read their own data for training and collaboratively update the parameters of the overall model. + +Distributed training generally has framwork as shown below: .. image:: src/ps_en.png :width: 500 @@ -10,13 +12,27 @@ In this section, we'll explain how to run distributed training jobs with PaddleP - Trainer: each trainer reads the data shard, and train the neural network. Then the trainer will upload calculated "gradients" to parameter servers, and wait for parameters to be optimized on the parameter server side. When that finishes, the trainer download optimized parameters and continues its training. - Parameter server: every parameter server stores part of the whole neural network model data. They will do optimization calculations when gradients are uploaded from trainers, and then send updated parameters to trainers. -PaddlePaddle can support both synchronize stochastic gradient descent (SGD) and asynchronous SGD. +The training of synchronous random gradient descent for neural network can be achieved by cooperation of trainers and parameter servers. + +PaddlePaddle supports both synchronize stochastic gradient descent (SGD) and asynchronous SGD. -When training with synchronize SGD, PaddlePaddle uses an internal "synchronize barrier" which makes gradients update and parameter download in strict order. On the other hand, asynchronous SGD won't wait for all trainers to finish upload at a single step, this will increase the parallelism of distributed training: parameter servers do not depend on each other, they'll do parameter optimization concurrently. Parameter servers will not wait for trainers, so trainers will also do their work concurrently. But asynchronous SGD will introduce more randomness and noises in the gradient. +Before starting the cluster training, you need to prepare the cluster configuration, PaddlePaddle installation, and other preparations. To understand how to configure the basic environment for distributed training, check the link below: .. toctree:: :maxdepth: 1 preparations_en.md + +Cluster training has a large number of configurable parameters, such as the number of machines used, communication ports, etc. To learn how to configure the distributed training process by setting startup these parameters, check the link below: + +.. toctree:: + :maxdepth: 1 + cmd_argument_en.md + +PaddlePaddle is compatible with a variety of different clusters. Each cluster has its own advantages, To learn how to run PaddlePaddle in different types of them, check the link below: + +.. toctree:: + :maxdepth: 1 + multi_cluster/index_en.rst diff --git a/doc/v2/howto/index_en.rst b/doc/v2/howto/index_en.rst index 2079be766f2d8e6d63ca11dccd98f80613309ceb..bf2320a169df149cc8d44612d975ecf64f8ea779 100644 --- a/doc/v2/howto/index_en.rst +++ b/doc/v2/howto/index_en.rst @@ -1,11 +1,37 @@ HOW TO -======= +======== + +PaddlePaddle provides the users the ability to flexibly set various command line parameters to control the model training and inference process. Please refer to the following instructions on using PaddlePaddle: + +.. toctree:: + :maxdepth: 1 + + cmd_parameter/index_cn.rst + +PaddlePaddle supports distributed training tasks on fabric clusters, MPI clusters, and Kubernetes clusters. For detailed configuration and usage instructions, refer to: + +.. toctree:: + :maxdepth: 1 + + cluster/index_cn.rst + +PaddlePaddle provides a C-API for inference. We provide the following guidelines for using the C-API: + +.. toctree:: + :maxdepth: 1 + + capi/index_cn.rst + +PaddlePaddle supports a variety of flexible and efficient recurrent neural networks. For details, please refer to: + +.. toctree:: + :maxdepth: 1 + + rnn/index_cn.rst + +How to use the built-in timing tool, nvprof, or nvvp to run performance analysis and tuning, please refer to: .. toctree:: :maxdepth: 1 - cmd_parameter/index_en.rst - cluster/index_en.rst - capi/index_en.rst - rnn/index_en.rst - optimization/gpu_profiling_en.rst + optimization/gpu_profiling_cn.rst diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index a7b249d43bf3ad9924749d5e66618750f19d8bf7..d2a4b1335464f553a361728e64ed5ca177ca53da 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -1,27 +1,29 @@ -add_subdirectory(cuda) -add_subdirectory(function) -add_subdirectory(utils) -add_subdirectory(math) -add_subdirectory(gserver) -add_subdirectory(parameter) -add_subdirectory(testing) - -if(MOBILE_INFERENCE) - add_subdirectory(capi) -else() - add_subdirectory(pserver) - add_subdirectory(trainer) - add_subdirectory(scripts) +if(NOT WITH_FLUID) + add_subdirectory(cuda) + add_subdirectory(function) + add_subdirectory(utils) + add_subdirectory(math) + add_subdirectory(gserver) + add_subdirectory(parameter) - if(WITH_C_API) + if(MOBILE_INFERENCE) add_subdirectory(capi) - endif() + else() + add_subdirectory(pserver) + add_subdirectory(trainer) + add_subdirectory(scripts) - if(NOT ANDROID AND NOT IOS) - add_subdirectory(fluid) - endif() + if(WITH_C_API) + add_subdirectory(capi) + endif() - if(WITH_SWIG_PY) - add_subdirectory(api) + if(WITH_SWIG_PY) + add_subdirectory(api) + endif() endif() endif() + +add_subdirectory(testing) +if(NOT MOBILE_INFERENCE AND NOT ANDROID AND NOT IOS) + add_subdirectory(fluid) +endif() diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index ebb083c5a477d5be91ef14be74dd9de349d07931..e06e9a2b363d1ffc6876b98bcb7304b0a54dbcaa 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -36,7 +36,7 @@ target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) add_style_check_target(paddle_capi ${CAPI_SOURCES} ${CAPI_HEADER} ${CAPI_PRIVATE_HEADER}) -add_dependencies(paddle_capi paddle_proto) +add_dependencies(paddle_capi paddle_proto paddle_gserver) # TODO: paddle_capi_whole will be removed. set(PADDLE_CAPI_LAYERS_LIBS diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 48713f2c2ac62a37b7b7a4602f7f6a325aecb0b8..a4ea74a6d2fbc29dc33a6b57ee453f49ed36c7fa 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -21,7 +21,7 @@ endif() cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place paddle_memory device_context init) -cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto) +cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto recordio) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor paddle_memory) nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor init) @@ -103,4 +103,5 @@ cc_test(cow_ptr_tests SRCS details/cow_ptr_test.cc) cc_test(channel_test SRCS channel_test.cc) cc_test(tuple_test SRCS tuple_test.cc ) cc_test(concurrency_test SRCS concurrency_test.cc DEPS go_op channel_close_op channel_create_op - channel_send_op channel_recv_op sum_op elementwise_add_op executor proto_desc) + channel_send_op channel_recv_op sum_op select_op elementwise_add_op compare_op + conditional_block_op while_op assign_op print_op executor proto_desc) diff --git a/paddle/fluid/framework/channel.h b/paddle/fluid/framework/channel.h index 9f8fb12098d622058a86f83c1c42a1feb1cfb2e2..adfaba26ace78f547161ad4029a741f3ca8a6764 100644 --- a/paddle/fluid/framework/channel.h +++ b/paddle/fluid/framework/channel.h @@ -15,23 +15,43 @@ limitations under the License. */ #pragma once #include // for size_t +#include #include #include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { +enum class ChannelAction { + SEND = 0, + RECEIVE = 1, + CLOSE = 2, +}; + // Channel is the abstract class of buffered and un-buffered channels. template class Channel { public: + virtual bool CanSend() = 0; + virtual bool CanReceive() = 0; virtual bool Send(T*) = 0; virtual bool Receive(T*) = 0; virtual size_t Cap() = 0; virtual void Lock() = 0; + virtual void Unlock() = 0; + virtual bool IsClosed() = 0; virtual void Close() = 0; virtual ~Channel() {} + + virtual void AddToSendQ(const void* referrer, T* data, + std::shared_ptr cond, + std::function cb) = 0; + virtual void AddToReceiveQ(const void* referrer, T* data, + std::shared_ptr cond, + std::function cb) = 0; + virtual void RemoveFromSendQ(const void* referrer) = 0; + virtual void RemoveFromReceiveQ(const void* referrer) = 0; }; // Forward declaration of channel implementations. @@ -80,6 +100,27 @@ class ChannelHolder { return channel != nullptr ? channel->Receive(data) : false; } + bool IsClosed() { + if (IsInitialized()) { + return holder_->IsClosed(); + } + return false; + } + + bool CanSend() { + if (IsInitialized()) { + return holder_->CanSend(); + } + return false; + } + + bool CanReceive() { + if (IsInitialized()) { + return holder_->CanReceive(); + } + return false; + } + void close() { if (IsInitialized()) holder_->Close(); } @@ -97,6 +138,38 @@ class ChannelHolder { if (IsInitialized()) holder_->Unlock(); } + template + void AddToSendQ(const void* referrer, T* data, + std::shared_ptr cond, + std::function cb) { + if (IsInitialized()) { + Channel* channel = static_cast*>(holder_->Ptr()); + if (channel != nullptr) { + channel->AddToSendQ(referrer, data, cond, cb); + } + } + } + + template + void AddToReceiveQ(const void* referrer, T* data, + std::shared_ptr cond, + std::function cb) { + if (IsInitialized()) { + Channel* channel = static_cast*>(holder_->Ptr()); + if (channel != nullptr) { + channel->AddToReceiveQ(referrer, data, cond, cb); + } + } + } + + void RemoveFromSendQ(const void* referrer) { + if (IsInitialized()) holder_->RemoveFromSendQ(referrer); + } + + void RemoveFromReceiveQ(const void* referrer) { + if (IsInitialized()) holder_->RemoveFromReceiveQ(referrer); + } + inline bool IsInitialized() const { return holder_ != nullptr; } inline const std::type_index Type() { @@ -113,6 +186,11 @@ class ChannelHolder { virtual ~Placeholder() {} virtual const std::type_index Type() const = 0; virtual void* Ptr() const = 0; + virtual bool IsClosed() = 0; + virtual bool CanSend() = 0; + virtual bool CanReceive() = 0; + virtual void RemoveFromSendQ(const void* referrer) = 0; + virtual void RemoveFromReceiveQ(const void* referrer) = 0; virtual void Close() = 0; virtual void Lock() = 0; virtual void Unlock() = 0; @@ -129,6 +207,39 @@ class ChannelHolder { virtual void* Ptr() const { return static_cast(channel_.get()); } + virtual bool IsClosed() { + if (channel_) { + return channel_->IsClosed(); + } + return false; + } + + virtual bool CanSend() { + if (channel_) { + return channel_->CanSend(); + } + return false; + } + + virtual bool CanReceive() { + if (channel_) { + return channel_->CanReceive(); + } + return false; + } + + virtual void RemoveFromSendQ(const void* referrer) { + if (channel_) { + channel_->RemoveFromSendQ(referrer); + } + } + + virtual void RemoveFromReceiveQ(const void* referrer) { + if (channel_) { + channel_->RemoveFromReceiveQ(referrer); + } + } + virtual void Close() { if (channel_) channel_->Close(); } diff --git a/paddle/fluid/framework/channel_impl.h b/paddle/fluid/framework/channel_impl.h index a4561031fd8c49613269e7008ce558f25f9765e4..457abbf373d4549229e8fd8bd6b2087cc6b8f5c8 100644 --- a/paddle/fluid/framework/channel_impl.h +++ b/paddle/fluid/framework/channel_impl.h @@ -29,32 +29,50 @@ class ChannelImpl : public paddle::framework::Channel { friend void paddle::framework::CloseChannel(Channel *); public: + virtual bool CanSend(); + virtual bool CanReceive(); virtual bool Send(T *); virtual bool Receive(T *); virtual size_t Cap() { return cap_; } virtual void Lock(); virtual void Unlock(); + virtual bool IsClosed(); virtual void Close(); - ChannelImpl(size_t); virtual ~ChannelImpl(); + virtual void AddToSendQ(const void *referrer, T *data, + std::shared_ptr cond, + std::function cb); + virtual void AddToReceiveQ(const void *referrer, T *data, + std::shared_ptr cond, + std::function cb); + + virtual void RemoveFromSendQ(const void *referrer); + virtual void RemoveFromReceiveQ(const void *referrer); + private: struct QueueMessage { T *data; - std::condition_variable_any cond; + std::shared_ptr cond; bool chan_closed = false; bool completed = false; + const void *referrer; // TODO(thuan): figure out better way to do this + std::function callback; - QueueMessage(T *item) : data(item) {} + QueueMessage(T *item) + : data(item), cond(std::make_shared()) {} + + QueueMessage(T *item, std::shared_ptr cond) + : data(item), cond(cond) {} void Wait(std::unique_lock &lock) { - cond.wait(lock, [this]() { return completed; }); + cond->wait(lock, [this]() { return completed; }); } void Notify() { completed = true; - cond.notify_all(); + cond->notify_all(); } }; @@ -87,6 +105,18 @@ ChannelImpl::ChannelImpl(size_t capacity) PADDLE_ENFORCE_GE(capacity, 0); } +template +bool ChannelImpl::CanSend() { + std::lock_guard lock{mu_}; + return !closed_ && (!recvq.empty() || buf_.size() < cap_); +} + +template +bool ChannelImpl::CanReceive() { + std::lock_guard lock{mu_}; + return !(closed_ && buf_.empty()) && (!sendq.empty() || buf_.size() > 0); +} + template bool ChannelImpl::Send(T *item) { send_ctr++; @@ -105,7 +135,24 @@ bool ChannelImpl::Send(T *item) { std::shared_ptr m = recvq.front(); recvq.pop_front(); // Do the data transfer - *(m->data) = std::move(*item); + // We will do this data transfer if either of the following + // cases are true + // 1. callback == nullptr // This means it was a regular channel send + // 2. callback returns true + bool do_send = true; + if (m->callback != nullptr) do_send = m->callback(ChannelAction::SEND); + if (do_send) + *(m->data) = std::move(*item); + else + // We cannot do the data transfer because + // this QueueMessage was added by Select + // and some other case was executed. + // So call the Send function again. + // We do not care about notifying other + // because they would have been notified + // by the executed select case. + return send_return(Send(item)); + // Wake up the blocked process and unlock m->Notify(); lock.unlock(); @@ -150,7 +197,25 @@ bool ChannelImpl::Receive(T *item) { std::shared_ptr m = sendq.front(); sendq.pop_front(); // Do the data transfer - *item = std::move(*(m->data)); + // We will do this data transfer if either of the following + // cases are true + // 1. callback == nullptr // This means it was a regular channel send + // 2. callback returns true + bool do_receive = true; + if (m->callback != nullptr) + do_receive = m->callback(ChannelAction::RECEIVE); + if (do_receive) + *item = std::move(*(m->data)); + else + // We cannot do the data transfer because + // this QueueMessage was added by Select + // and some other case was executed. + // So call the Receive function again. + // We do not care about notifying other + // because they would have been notified + // by the executed select case. + return recv_return(Receive(item)); + // Wake up the blocked process and unlock m->Notify(); lock.unlock(); @@ -186,6 +251,12 @@ void ChannelImpl::Unlock() { mu_.unlock(); } +template +bool ChannelImpl::IsClosed() { + std::lock_guard lock{mu_}; + return closed_; +} + template void ChannelImpl::Close() { std::unique_lock lock{mu_}; @@ -203,6 +274,12 @@ void ChannelImpl::Close() { std::shared_ptr m = recvq.front(); recvq.pop_front(); m->chan_closed = true; + + // Execute callback function (if any) + if (m->callback != nullptr) { + m->callback(ChannelAction::CLOSE); + } + m->Notify(); } @@ -211,10 +288,70 @@ void ChannelImpl::Close() { std::shared_ptr m = sendq.front(); sendq.pop_front(); m->chan_closed = true; + + // Execute callback function (if any) + if (m->callback != nullptr) { + m->callback(ChannelAction::CLOSE); + } + m->Notify(); } } +template +void ChannelImpl::AddToSendQ( + const void *referrer, T *data, + std::shared_ptr cond, + std::function cb) { + std::lock_guard lock{mu_}; + auto m = std::make_shared(data, cond); + m->referrer = referrer; + m->callback = cb; + sendq.push_back(m); +} + +template +void ChannelImpl::AddToReceiveQ( + const void *referrer, T *data, + std::shared_ptr cond, + std::function cb) { + std::lock_guard lock{mu_}; + auto m = std::make_shared(data, cond); + m->referrer = referrer; + m->callback = cb; + recvq.push_back(m); +} + +template +void ChannelImpl::RemoveFromSendQ(const void *referrer) { + std::lock_guard lock{mu_}; + + for (auto it = sendq.begin(); it != sendq.end();) { + std::shared_ptr sendMsg = (std::shared_ptr)*it; + + if (sendMsg->referrer == referrer) { + it = sendq.erase(it); + } else { + ++it; + } + } +} + +template +void ChannelImpl::RemoveFromReceiveQ(const void *referrer) { + std::lock_guard lock{mu_}; + + for (auto it = recvq.begin(); it != recvq.end();) { + std::shared_ptr recvMsg = (std::shared_ptr)*it; + + if (recvMsg->referrer == referrer) { + it = recvq.erase(it); + } else { + ++it; + } + } +} + template ChannelImpl::~ChannelImpl() { Close(); diff --git a/paddle/fluid/framework/channel_test.cc b/paddle/fluid/framework/channel_test.cc index edfb41c72489113d9803c2957baed1ce44f8296d..73be5cdbe2a1f5994ecee4c415e83962f50532fe 100644 --- a/paddle/fluid/framework/channel_test.cc +++ b/paddle/fluid/framework/channel_test.cc @@ -871,3 +871,67 @@ TEST(ChannelHolder, ChannelHolderDestroyUnblocksSendersTest) { ch->Reset(0); ChannelHolderDestroyUnblockSenders(ch, false); } + +// This tests that closing a channelholder many times. +void ChannelHolderManyTimesClose(ChannelHolder *ch) { + const int num_threads = 15; + std::thread t[num_threads]; + bool thread_ended[num_threads]; + + // Launches threads that try to send data to channel. + for (size_t i = 0; i < num_threads / 3; i++) { + thread_ended[i] = false; + t[i] = std::thread( + [&](bool *ended) { + int data = 10; + ch->Send(&data); + *ended = true; + }, + &thread_ended[i]); + } + + // Launches threads that try to receive data to channel. + for (size_t i = num_threads / 3; i < 2 * num_threads / 3; i++) { + thread_ended[i] = false; + t[i] = std::thread( + [&](bool *p) { + int data; + if (ch->Receive(&data)) { + EXPECT_EQ(data, 10); + } + *p = true; + }, + &thread_ended[i]); + } + + // Launches threads that try to close the channel. + for (size_t i = 2 * num_threads / 3; i < num_threads; i++) { + thread_ended[i] = false; + t[i] = std::thread( + [&](bool *p) { + if (!ch->IsClosed()) { + ch->close(); + } + *p = true; + }, + &thread_ended[i]); + } + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait + + // Verify that all threads are unblocked + for (size_t i = 0; i < num_threads; i++) { + EXPECT_EQ(thread_ended[i], true); + } + EXPECT_TRUE(ch->IsClosed()); + // delete the channel + delete ch; + for (size_t i = 0; i < num_threads; i++) t[i].join(); +} + +TEST(ChannelHolder, ChannelHolderManyTimesCloseTest) { + // Check for Buffered Channel + ChannelHolder *ch = new ChannelHolder(); + ch->Reset(10); + ChannelHolderManyTimesClose(ch); +} diff --git a/paddle/fluid/framework/concurrency_test.cc b/paddle/fluid/framework/concurrency_test.cc index 5770b0a5a18659e615e80a7c48113d8b543b69ec..e98e9d94bf71fe9ac226ab3ad7f587b37a5c6e33 100644 --- a/paddle/fluid/framework/concurrency_test.cc +++ b/paddle/fluid/framework/concurrency_test.cc @@ -19,7 +19,6 @@ limitations under the License. */ #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/program_desc.h" USE_NO_KERNEL_OP(go); USE_NO_KERNEL_OP(channel_close); @@ -27,6 +26,12 @@ USE_NO_KERNEL_OP(channel_create); USE_NO_KERNEL_OP(channel_recv); USE_NO_KERNEL_OP(channel_send); USE_NO_KERNEL_OP(elementwise_add); +USE_NO_KERNEL_OP(select); +USE_NO_KERNEL_OP(conditional_block); +USE_NO_KERNEL_OP(equal); +USE_NO_KERNEL_OP(assign); +USE_NO_KERNEL_OP(while); +USE_NO_KERNEL_OP(print); namespace f = paddle::framework; namespace p = paddle::platform; @@ -35,27 +40,15 @@ namespace paddle { namespace framework { template -void CreateIntVariable(Scope &scope, p::CPUPlace &place, std::string name, - T value) { - // Create LoDTensor of dim [1,1] +LoDTensor *CreateVariable(Scope &scope, p::CPUPlace &place, std::string name, + T value) { + // Create LoDTensor of dim [1] auto var = scope.Var(name); auto tensor = var->GetMutable(); - tensor->Resize({1, 1}); + tensor->Resize({1}); T *expect = tensor->mutable_data(place); expect[0] = value; -} - -void InitTensorsInScope(Scope &scope, p::CPUPlace &place) { - p::CPUDeviceContext ctx(place); - - // Create channel variable - scope.Var("Channel"); - - // Create Variables, x0 will be put into channel, - // result will be pulled from channel - CreateIntVariable(scope, place, "Status", false); - CreateIntVariable(scope, place, "x0", 99); - CreateIntVariable(scope, place, "result", 0); + return tensor; } void AddOp(const std::string &type, const VariableNameMap &inputs, @@ -73,12 +66,117 @@ void AddOp(const std::string &type, const VariableNameMap &inputs, op->SetAttrMap(attrs); } +void AddCase(ProgramDesc *program, Scope *scope, p::CPUPlace *place, + BlockDesc *casesBlock, int caseId, int caseType, + std::string caseChannel, std::string caseVarName, + std::function func) { + std::string caseCondName = std::string("caseCond") + std::to_string(caseId); + std::string caseCondXVarName = + std::string("caseCondX") + std::to_string(caseId); + + BlockDesc *caseBlock = program->AppendBlock(*casesBlock); + func(caseBlock, scope); + + CreateVariable(*scope, *place, caseCondName, false); + CreateVariable(*scope, *place, caseCondXVarName, caseId); + CreateVariable(*scope, *place, caseVarName, caseId); + + scope->Var("step_scope"); + + AddOp("equal", {{"X", {caseCondXVarName}}, {"Y", {"caseToExecute"}}}, + {{"Out", {caseCondName}}}, {}, casesBlock); + + AddOp("conditional_block", {{"X", {caseCondName}}, {"Params", {}}}, + {{"Out", {}}, {"Scope", {"step_scope"}}}, + {{"sub_block", caseBlock}, {"is_scalar_condition", true}}, casesBlock); +} + +void AddFibonacciSelect(Scope *scope, p::CPUPlace *place, ProgramDesc *program, + BlockDesc *parentBlock, std::string dataChanName, + std::string quitChanName) { + BlockDesc *whileBlock = program->AppendBlock(*parentBlock); + + CreateVariable(*scope, *place, "whileExitCond", true); + CreateVariable(*scope, *place, "caseToExecute", -1); + CreateVariable(*scope, *place, "case1var", 0); + + CreateVariable(*scope, *place, "xtemp", 0); + + // TODO(thuan): Need to create fibXToSend, since channel send moves the actual + // data, + // which causes the data to be no longer accessible to do the fib calculation + // TODO(abhinav): Change channel send to do a copy instead of a move! + CreateVariable(*scope, *place, "fibXToSend", 0); + + CreateVariable(*scope, *place, "fibX", 0); + CreateVariable(*scope, *place, "fibY", 1); + CreateVariable(*scope, *place, "quitVar", 0); + + BlockDesc *casesBlock = program->AppendBlock(*whileBlock); + std::function f = [](BlockDesc *caseBlock) {}; + + // TODO(thuan): Remove this once we change channel send to do a copy instead + // of move + AddOp("assign", {{"X", {"fibX"}}}, {{"Out", {"fibXToSend"}}}, {}, whileBlock); + + // Case 0: Send to dataChanName + std::function case0Func = [&]( + BlockDesc *caseBlock, Scope *scope) { + AddOp("assign", {{"X", {"fibX"}}}, {{"Out", {"xtemp"}}}, {}, caseBlock); + AddOp("assign", {{"X", {"fibY"}}}, {{"Out", {"fibX"}}}, {}, caseBlock); + AddOp("elementwise_add", {{"X", {"xtemp"}}, {"Y", {"fibY"}}}, + {{"Out", {"fibY"}}}, {}, caseBlock); + }; + AddCase(program, scope, place, casesBlock, 0, 1, dataChanName, "fibXToSend", + case0Func); + std::string case0Config = + std::string("0,1,") + dataChanName + std::string(",fibXToSend"); + + // Case 1: Receive from quitChanName + std::function case2Func = [&]( + BlockDesc *caseBlock, Scope *scope) { + // Exit the while loop after we receive from quit channel. + // We assign a false to "whileExitCond" variable, which will + // break out of while_op loop + CreateVariable(*scope, *place, "whileFalse", false); + AddOp("assign", {{"X", {"whileFalse"}}}, {{"Out", {"whileExitCond"}}}, {}, + caseBlock); + }; + AddCase(program, scope, place, casesBlock, 1, 2, quitChanName, "quitVar", + case2Func); + std::string case1Config = + std::string("1,2,") + quitChanName + std::string(",quitVar"); + + // Select block + AddOp("select", {{"X", {dataChanName, quitChanName}}, + {"case_to_execute", {"caseToExecute"}}}, + {{"Out", {}}}, + {{"sub_block", casesBlock}, + {"cases", std::vector{case0Config, case1Config}}}, + whileBlock); + + scope->Var("stepScopes"); + AddOp("while", + {{"X", {dataChanName, quitChanName}}, {"Condition", {"whileExitCond"}}}, + {{"Out", {}}, {"StepScopes", {"stepScopes"}}}, + {{"sub_block", whileBlock}}, parentBlock); +} + TEST(Concurrency, Go_Op) { Scope scope; p::CPUPlace place; // Initialize scope variables - InitTensorsInScope(scope, place); + p::CPUDeviceContext ctx(place); + + // Create channel variable + scope.Var("Channel"); + + // Create Variables, x0 will be put into channel, + // result will be pulled from channel + CreateVariable(scope, place, "Status", false); + CreateVariable(scope, place, "x0", 99); + CreateVariable(scope, place, "result", 0); framework::Executor executor(place); ProgramDesc program; @@ -112,11 +210,83 @@ TEST(Concurrency, Go_Op) { executor.Run(program, &scope, 0, true, true); - // After we call executor.run, the Go operator should do a channel_send to set - // the - // "result" variable to 99 + // After we call executor.run, the Go operator should do a channel_send to + // set the "result" variable to 99. auto *finalData = tensor.data(); EXPECT_EQ(finalData[0], 99); } + +/** + * This test implements the fibonacci function using go_op and select_op + */ +TEST(Concurrency, Select) { + Scope scope; + p::CPUPlace place; + + // Initialize scope variables + p::CPUDeviceContext ctx(place); + + CreateVariable(scope, place, "Status", false); + CreateVariable(scope, place, "result", 0); + CreateVariable(scope, place, "currentXFib", 0); + + framework::Executor executor(place); + ProgramDesc program; + BlockDesc *block = program.MutableBlock(0); + + // Create channel OP + std::string dataChanName = "Channel"; + scope.Var(dataChanName); + AddOp("channel_create", {}, {{"Out", {dataChanName}}}, + {{"capacity", 0}, {"data_type", f::proto::VarType::LOD_TENSOR}}, block); + + std::string quitChanName = "Quit"; + scope.Var(quitChanName); + AddOp("channel_create", {}, {{"Out", {quitChanName}}}, + {{"capacity", 0}, {"data_type", f::proto::VarType::LOD_TENSOR}}, block); + + // Create Go Op routine, which loops 10 times over fibonacci sequence + CreateVariable(scope, place, "xReceiveVar", 0); + + BlockDesc *goOpBlock = program.AppendBlock(program.Block(0)); + for (int i = 0; i < 10; ++i) { + AddOp("channel_recv", {{"Channel", {dataChanName}}}, + {{"Status", {"Status"}}, {"Out", {"currentXFib"}}}, {}, goOpBlock); + AddOp("print", {{"In", {"currentXFib"}}}, {{"Out", {"currentXFib"}}}, + {{"first_n", 100}, + {"summarize", -1}, + {"print_tensor_name", false}, + {"print_tensor_type", true}, + {"print_tensor_shape", false}, + {"print_tensor_lod", false}, + {"print_phase", std::string("FORWARD")}, + {"message", std::string("X: ")}}, + goOpBlock); + } + + CreateVariable(scope, place, "quitSignal", 0); + AddOp("channel_send", {{"Channel", {quitChanName}}, {"X", {"quitSignal"}}}, + {{"Status", {"Status"}}}, {}, goOpBlock); + + // Create Go Op + AddOp("go", {{"X", {dataChanName, quitChanName}}}, {}, + {{"sub_block", goOpBlock}}, block); + + AddFibonacciSelect(&scope, &place, &program, block, dataChanName, + quitChanName); + + // Create Channel Close Op + AddOp("channel_close", {{"Channel", {dataChanName}}}, {}, {}, block); + AddOp("channel_close", {{"Channel", {quitChanName}}}, {}, {}, block); + + executor.Run(program, &scope, 0, true, true); + + // After we call executor.run, "result" variable should be equal to 34 + // (which is 10 loops through fibonacci sequence) + const LoDTensor &tensor = (scope.FindVar("currentXFib"))->Get(); + auto *finalData = tensor.data(); + EXPECT_EQ(finalData[0], 34); +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 5cae38b2a857b2037f0e5ae4da50d1591da0c11a..0b171e1dcfa90c3ad8f5a9ace8a9342baaf76e61 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -14,17 +14,14 @@ limitations under the License. */ #include "paddle/fluid/framework/executor.h" -#include - -#include "gflags/gflags.h" #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/framework/feed_fetch_method.h" -#include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/reader.h" #include "paddle/fluid/platform/place.h" +#include "paddle/fluid/platform/profiler.h" DECLARE_bool(benchmark); DEFINE_bool(check_nan_inf, false, @@ -33,15 +30,19 @@ DEFINE_bool(check_nan_inf, false, namespace paddle { namespace framework { - -struct ExecutorPrepareContext { - ExecutorPrepareContext(const framework::ProgramDesc& prog, size_t block_id) - : prog_(prog), block_id_(block_id) {} - - framework::ProgramDesc prog_; - size_t block_id_; - std::vector> ops_; -}; +namespace { +// block id starts from 0. This id is used to represent the codeblock +// wrapping the first block 0. +int kProgramId = -1; +} // namespace + +ExecutorPrepareContext::ExecutorPrepareContext( + const framework::ProgramDesc& prog, size_t block_id) + : prog_(prog), block_id_(block_id) {} + +ExecutorPrepareContext::~ExecutorPrepareContext() { + VLOG(5) << "destroy ExecutorPrepareContext"; +} Executor::Executor(const platform::Place& place) : place_(place) {} @@ -94,9 +95,9 @@ static void CheckTensorNANOrInf(const std::string& name, void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, bool create_local_scope, bool create_vars) { - auto* ctx = Prepare(pdesc, block_id); - RunPreparedContext(ctx, scope, create_local_scope, create_vars); - delete ctx; + platform::RecordBlock b(block_id); + auto ctx = Prepare(pdesc, block_id); + RunPreparedContext(ctx.get(), scope, create_local_scope, create_vars); } // Check whether the block already has feed operators and feed_holder. @@ -106,10 +107,11 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, // and feed_holder_name. Raise exception when any mismatch is found. // Return true if the block has feed operators and holder of matching info. static bool has_feed_operators( - BlockDesc* block, std::map& feed_targets, + const BlockDesc& block, + std::map& feed_targets, const std::string& feed_holder_name) { size_t feed_count = 0; - for (auto* op : block->AllOps()) { + for (auto* op : block.AllOps()) { if (op->Type() == kFeedOpType) { feed_count++; PADDLE_ENFORCE_EQ(op->Input("X")[0], feed_holder_name, @@ -128,7 +130,7 @@ static bool has_feed_operators( "The number of feed operators should match 'feed_targets'"); // When feed operator are present, so should be feed_holder - auto var = block->FindVar(feed_holder_name); + auto var = block.FindVar(feed_holder_name); PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", feed_holder_name); PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FEED_MINIBATCH, @@ -146,10 +148,10 @@ static bool has_feed_operators( // and fetch_holder_name. Raise exception when any mismatch is found. // Return true if the block has fetch operators and holder of matching info. static bool has_fetch_operators( - BlockDesc* block, std::map& fetch_targets, + const BlockDesc& block, std::map& fetch_targets, const std::string& fetch_holder_name) { size_t fetch_count = 0; - for (auto* op : block->AllOps()) { + for (auto* op : block.AllOps()) { if (op->Type() == kFetchOpType) { fetch_count++; PADDLE_ENFORCE_EQ(op->Output("Out")[0], fetch_holder_name, @@ -168,7 +170,7 @@ static bool has_fetch_operators( "The number of fetch operators should match 'fetch_targets'"); // When fetch operator are present, so should be fetch_holder - auto var = block->FindVar(fetch_holder_name); + auto var = block.FindVar(fetch_holder_name); PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable", fetch_holder_name); PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FETCH_LIST, @@ -183,11 +185,21 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, std::map& feed_targets, std::map& fetch_targets, const std::string& feed_holder_name, - const std::string& fetch_holder_name) { - auto* copy_program = new ProgramDesc(program); + const std::string& fetch_holder_name, bool create_vars) { + platform::RecordBlock b(kProgramId); + bool has_feed_ops = + has_feed_operators(program.Block(0), feed_targets, feed_holder_name); + bool has_fetch_ops = + has_fetch_operators(program.Block(0), fetch_targets, fetch_holder_name); + + ProgramDesc* copy_program = const_cast(&program); + if (!has_feed_ops || !has_fetch_ops) { + copy_program = std::unique_ptr(new ProgramDesc(program)).get(); + } + auto* global_block = copy_program->MutableBlock(0); - if (!has_feed_operators(global_block, feed_targets, feed_holder_name)) { + if (!has_feed_ops) { // create feed_holder variable auto* feed_holder = global_block->Var(feed_holder_name); feed_holder->SetType(proto::VarType::FEED_MINIBATCH); @@ -220,7 +232,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, } } - if (!has_fetch_operators(global_block, fetch_targets, fetch_holder_name)) { + if (!has_fetch_ops) { // create fetch_holder variable auto* fetch_holder = global_block->Var(fetch_holder_name); fetch_holder->SetType(proto::VarType::FETCH_LIST); @@ -243,7 +255,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, } } - Run(*copy_program, scope, 0, true, true); + Run(*copy_program, scope, 0, create_vars, create_vars); // obtain the data of fetch_targets from fetch_holder for (auto* op : global_block->AllOps()) { @@ -254,19 +266,17 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, GetFetchVariable(*scope, fetch_holder_name, idx); } } - - delete copy_program; } -ExecutorPrepareContext* Executor::Prepare(const ProgramDesc& program, - int block_id) { +std::unique_ptr Executor::Prepare( + const ProgramDesc& program, int block_id) { auto* ctx = new ExecutorPrepareContext(program, block_id); PADDLE_ENFORCE_LT(static_cast(block_id), program.Size()); auto& block = program.Block(block_id); for (auto& op_desc : block.AllOps()) { ctx->ops_.push_back(OpRegistry::CreateOp(*op_desc)); } - return ctx; + return std::unique_ptr(ctx); } void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, @@ -305,9 +315,8 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, } // if (create_vars) for (auto& op : ctx->ops_) { - VLOG(4) << place_ << " " << op->DebugStringEx(local_scope); - op->Run(*local_scope, place_); VLOG(3) << place_ << " " << op->DebugStringEx(local_scope); + op->Run(*local_scope, place_); if (FLAGS_benchmark) { VLOG(2) << "Memory used after operator " + op->Type() + " running: " diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 28ce3315154cea45412984df4daf7385ce2cf572..d8dd82469af06a4c5c6a37d2249ee23413884a91 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -22,7 +22,16 @@ limitations under the License. */ namespace paddle { namespace framework { -struct ExecutorPrepareContext; + +struct ExecutorPrepareContext { + ExecutorPrepareContext(const framework::ProgramDesc& prog, size_t block_id); + ~ExecutorPrepareContext(); + + const framework::ProgramDesc& prog_; + size_t block_id_; + std::vector> ops_; +}; + class Executor { public: // TODO(dzhwinter) : Do not rely on this function, it will be removed @@ -45,10 +54,11 @@ class Executor { std::map& feed_targets, std::map& fetch_targets, const std::string& feed_holder_name = "feed", - const std::string& fetch_holder_name = "fetch"); + const std::string& fetch_holder_name = "fetch", + bool create_vars = true); - static ExecutorPrepareContext* Prepare(const ProgramDesc& program, - int block_id); + static std::unique_ptr Prepare( + const ProgramDesc& program, int block_id); void RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, bool create_local_scope = true, diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index 2e0a224ff5df749fd8c809dc88a85a1643542abf..3c0d93642ac41e8d90f9a248e81cea7a4fe12293 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -26,6 +26,7 @@ namespace paddle { namespace framework { std::once_flag gflags_init_flag; +std::once_flag p2p_init_flag; void InitGflags(std::vector &argv) { std::call_once(gflags_init_flag, [&]() { @@ -42,6 +43,27 @@ void InitGflags(std::vector &argv) { }); } +void InitP2P(int count) { +#ifdef PADDLE_WITH_CUDA + std::call_once(p2p_init_flag, [&]() { + for (int i = 0; i < count; ++i) { + for (int j = 0; j < count; ++j) { + if (i == j) continue; + int can_acess = -1; + PADDLE_ENFORCE(cudaDeviceCanAccessPeer(&can_acess, i, j), + "Failed to test P2P access."); + if (can_acess != 1) { + LOG(WARNING) << "Cannot enable P2P access from " << i << " to " << j; + } else { + cudaSetDevice(i); + cudaDeviceEnablePeerAccess(j, 0); + } + } + } + }); +#endif +} + void InitDevices() { /*Init all avaiable devices by default */ @@ -63,7 +85,7 @@ void InitDevices() { for (int i = 0; i < count; ++i) { places.emplace_back(platform::CUDAPlace(i)); } - + InitP2P(count); platform::DeviceContextPool::Init(places); } diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index e2f4e9cad1996578b7c51257785e1273d126f80f..8155cb55a468a09320b1196b49fc3e34cea261b1 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -19,6 +19,9 @@ limitations under the License. */ #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/recordio/scanner.h" +#include "paddle/fluid/recordio/writer.h" + #include #include #include @@ -291,6 +294,31 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor, TensorFromStream(is, static_cast(tensor), dev_ctx); } +void WriteToRecordIO(recordio::Writer &writer, + const std::vector &tensor, + const platform::DeviceContext &dev_ctx) { + std::stringstream buffer; + size_t sz = tensor.size(); + buffer.write(reinterpret_cast(&sz), sizeof(uint32_t)); + for (auto &each : tensor) { + SerializeToStream(buffer, each, dev_ctx); + } + writer.Write(buffer.str()); +} + +std::vector ReadFromRecordIO( + recordio::Scanner &scanner, const platform::DeviceContext &dev_ctx) { + std::istringstream sin(scanner.Next()); + uint32_t sz; + sin.read(reinterpret_cast(&sz), sizeof(uint32_t)); + std::vector result; + result.resize(sz); + for (uint32_t i = 0; i < sz; ++i) { + DeserializeFromStream(sin, &result[i], dev_ctx); + } + return result; +} + std::vector LoDTensor::SplitLoDTensor( const std::vector places) const { check_memory_size(); diff --git a/paddle/fluid/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h index 94d5a6e9fd9b68d3d8230a8c258316efadda5a05..dee505fee0dccd8d60bb290a8bec4df243e504a2 100644 --- a/paddle/fluid/framework/lod_tensor.h +++ b/paddle/fluid/framework/lod_tensor.h @@ -29,6 +29,12 @@ limitations under the License. */ #include "paddle/fluid/platform/place.h" namespace paddle { + +namespace recordio { +class Writer; +class Scanner; +} + namespace framework { /* @@ -209,5 +215,12 @@ void SerializeToStream(std::ostream& os, const LoDTensor& tensor, void DeserializeFromStream(std::istream& is, LoDTensor* tensor, const platform::DeviceContext& dev_ctx); +extern void WriteToRecordIO(recordio::Writer& writer, + const std::vector& tensor, + const platform::DeviceContext& dev_ctx); + +extern std::vector ReadFromRecordIO( + recordio::Scanner& scanner, const platform::DeviceContext& dev_ctx); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/lod_tensor_test.cc b/paddle/fluid/framework/lod_tensor_test.cc index 5e135192ce774ab5c351b89164be9d7600ae3640..e691e29383d4842b80769021e0e494967d38e9bb 100644 --- a/paddle/fluid/framework/lod_tensor_test.cc +++ b/paddle/fluid/framework/lod_tensor_test.cc @@ -14,6 +14,9 @@ #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/recordio/scanner.h" +#include "paddle/fluid/recordio/writer.h" + #include #include #include @@ -224,5 +227,43 @@ TEST(LoD, CheckAbsLoD) { abs_lod0.push_back(std::vector({0})); ASSERT_FALSE(CheckAbsLoD(abs_lod0)); } + +TEST(LoDTensor, RecordIO) { + LoDTensor tensor; + int* tmp = tensor.mutable_data(make_ddim({4, 5}), platform::CPUPlace()); + for (int i = 0; i < 20; ++i) { + tmp[i] = i; + } + + std::stringstream* stream = new std::stringstream(); + auto& ctx = + *platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); + { + recordio::Writer writer(stream, recordio::Compressor::kSnappy); + WriteToRecordIO(writer, {tensor, tensor}, ctx); + WriteToRecordIO(writer, {tensor, tensor}, ctx); + writer.Flush(); + } + + auto assert_tensor_ok = [](const LoDTensor& tensor) { + for (int i = 0; i < 20; ++i) { + ASSERT_EQ(tensor.data()[i], i); + } + }; + + { + std::unique_ptr stream_ptr(stream); + recordio::Scanner scanner(std::move(stream_ptr)); + auto tensors = ReadFromRecordIO(scanner, ctx); + ASSERT_EQ(tensors.size(), 2); + assert_tensor_ok(tensors[0]); + assert_tensor_ok(tensors[1]); + tensors = ReadFromRecordIO(scanner, ctx); + ASSERT_EQ(tensors.size(), 2); + assert_tensor_ok(tensors[0]); + assert_tensor_ok(tensors[1]); + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 6a6fa538718837a958b7d82c37f583f62f4bf96e..d99a15547b77a0e0d71b14bd1c798cd1485720b0 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -176,7 +176,7 @@ class Vector { // resize the vector void resize(size_t size) { - if (size + 1 < capacity()) { + if (size + 1 <= capacity()) { size_ = size; } else { MutableCPU(); diff --git a/paddle/fluid/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu index 4bf78499f2fda2d2631e05ddcbbd0bc49498af1a..d57f82510833d6a0cea7009cf1f0b49543812f8d 100644 --- a/paddle/fluid/framework/mixed_vector_test.cu +++ b/paddle/fluid/framework/mixed_vector_test.cu @@ -104,3 +104,11 @@ TEST(mixed_vector, ForEach) { for (auto& v : tmp) { } } + +TEST(mixed_vector, Reserve) { + paddle::framework::Vector vec; + vec.reserve(1); + vec.push_back(0); + vec.push_back(0); + vec.push_back(0); +} diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 371c2fad97b1efd06eea9ac631122f194e65d656..b39a1164dbd9877d9f45cc6415d74f930921a42f 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -74,9 +74,6 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) { platform::SetDeviceId(dev_id); #endif } - // profile - auto* dev_ctx = platform::DeviceContextPool::Instance().Get(place); - platform::RecordEvent record_event(Type(), dev_ctx); RunImpl(scope, place); } @@ -445,15 +442,7 @@ class RuntimeInferShapeContext : public InferShapeContext { } std::vector GetRepeatedDims(const std::string& name) const override { - Variable* var = scope_.FindVar(name); - if (var->IsType()) { - return var->Get().shapes(); - } else { - PADDLE_THROW( - "Only ReaderHolder support 'GetRepeatedDims', but Variable %s's " - "type_id is %s.", - name, var->Type().name()); - } + PADDLE_THROW("Only compile time support this method"); } void SetDim(const std::string& name, const DDim& dim) override { @@ -470,15 +459,7 @@ class RuntimeInferShapeContext : public InferShapeContext { void SetRepeatedDims(const std::string& name, const std::vector& dims) override { - Variable* var = scope_.FindVar(name); - if (var->IsType()) { - var->GetMutable()->set_shapes(dims); - } else { - PADDLE_THROW( - "Only ReaderHolder support 'SetRepeatedDims', but Variable %s's " - "type_id is %s.", - name, var->Type().name()); - } + PADDLE_THROW("Only compile time support this method"); } proto::VarType::Type GetVarType(const std::string& name) const override { @@ -501,6 +482,10 @@ void OperatorWithKernel::RunImpl(const Scope& scope, this->InferShape(&infer_shape_ctx); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto* dev_ctx = pool.Get(place); + + // For profiling, don't move out of this function because that will result + // in the failure of multi-GPU profiling. + platform::RecordEvent record_event(Type(), dev_ctx); // check if op[type] has kernel registered. auto& all_op_kernels = AllOpKernels(); auto kernels_iter = all_op_kernels.find(type_); diff --git a/paddle/fluid/framework/reader.cc b/paddle/fluid/framework/reader.cc index 91879d6d45868bb37ca44baafb8b0e8677cd6d1a..fa00c08e0d5791ee1187aed38b4d140564b7c97d 100644 --- a/paddle/fluid/framework/reader.cc +++ b/paddle/fluid/framework/reader.cc @@ -16,14 +16,22 @@ namespace paddle { namespace framework { +ReaderBase::~ReaderBase() {} -DDim ReaderBase::shape(size_t idx) const { - PADDLE_ENFORCE_LT( - idx, shapes_.size(), - "Cannot get the %d'th shape, 'shapes_' only has %d elements.", idx, - shapes_.size()); - return shapes_[idx]; -} +FileReader::FileReader(const std::vector &dims) : dims_(dims) {} + +void FileReader::ReadNext(std::vector *out) { + ReadNextImpl(out); + PADDLE_ENFORCE_EQ(out->size(), dims_.size()); + for (size_t i = 0; i < dims_.size(); ++i) { + auto &actual = out->at(i).dims(); + auto &expect = dims_[i]; + PADDLE_ENFORCE_EQ(actual.size(), expect.size()); + for (int j = 0; j < actual.size(); ++j) { + PADDLE_ENFORCE(actual[i] == expect[i] || expect[i] == -1); + } + } +} } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/reader.h b/paddle/fluid/framework/reader.h index e820c3d07e85fd1dea9080786b48ad031330ee00..3573b99becf6d657c680c5fec0bda4bdde5dd7a2 100644 --- a/paddle/fluid/framework/reader.h +++ b/paddle/fluid/framework/reader.h @@ -16,47 +16,53 @@ #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/platform/place.h" + +#include +#include +#include namespace paddle { namespace framework { class ReaderBase { public: - explicit ReaderBase(const std::vector& shapes) : shapes_(shapes) { - PADDLE_ENFORCE(!shapes_.empty()); - } virtual void ReadNext(std::vector* out) = 0; virtual void ReInit() = 0; - DDim shape(size_t idx) const; - std::vector shapes() const { return shapes_; } - void set_shapes(const std::vector& shapes) { shapes_ = shapes; } - - virtual ~ReaderBase() {} - - protected: - std::vector shapes_; -}; + virtual bool HasNext() const = 0; -class FileReader : public ReaderBase { - public: - explicit FileReader(const std::vector& shapes) : ReaderBase(shapes) {} + virtual ~ReaderBase(); }; class DecoratedReader : public ReaderBase { public: - explicit DecoratedReader(ReaderBase* reader) - : ReaderBase(reader->shapes()), reader_(reader) { + explicit DecoratedReader(ReaderBase* reader) : ReaderBase(), reader_(reader) { PADDLE_ENFORCE_NOT_NULL(reader_); } void ReInit() override { reader_->ReInit(); } + bool HasNext() const override { return reader_->HasNext(); } + protected: ReaderBase* reader_; }; +class FileReader : public ReaderBase { + public: + explicit FileReader(const std::vector& dims); + + void ReadNext(std::vector* out) override; + + protected: + virtual void ReadNextImpl(std::vector* out) = 0; + + private: + std::vector dims_; +}; + // The ReaderHolder is used as reader' unified wrapper, // making it easier to access different type reader in Variables. class ReaderHolder { @@ -74,18 +80,7 @@ class ReaderHolder { reader_->ReInit(); } - DDim shape(size_t idx) const { - PADDLE_ENFORCE_NOT_NULL(reader_); - return reader_->shape(idx); - } - std::vector shapes() const { - PADDLE_ENFORCE_NOT_NULL(reader_); - return reader_->shapes(); - } - void set_shapes(const std::vector& shapes) { - PADDLE_ENFORCE_NOT_NULL(reader_); - reader_->set_shapes(shapes); - } + bool HasNext() const { return reader_->HasNext(); } private: std::unique_ptr reader_; diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 17ccca8cdcbcaabaddbbc0ca1d3ca4fdf054b0fb..aff427310f15be72f5c8d0fa1537ffa6bbe2881d 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -13,6 +13,11 @@ cc_library(paddle_fluid_shared SHARED SRCS io.cc DEPS ARCHIVE_START ${GLOB_OP_LIB} ${FLUID_CORE_MODULES} ARCHIVE_END) set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) +if(NOT APPLE) + # TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac. + set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.map") + set_target_properties(paddle_fluid_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") +endif() if(WITH_TESTING) add_subdirectory(tests/book) diff --git a/paddle/fluid/inference/paddle_fluid.map b/paddle/fluid/inference/paddle_fluid.map new file mode 100644 index 0000000000000000000000000000000000000000..5203784dc1fcb672eb6a26d9dfd3ffbe02e08038 --- /dev/null +++ b/paddle/fluid/inference/paddle_fluid.map @@ -0,0 +1,6 @@ +{ + global: + *paddle*; + local: + *; +}; diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 625e0f7561899d30b40f9daa56f743a37bdaa27f..c0245379ac481d922ee936c75bfc6b63a81be5fd 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -12,6 +12,8 @@ function(op_library TARGET) set(OP_LIBRARY ${TARGET} ${OP_LIBRARY} PARENT_SCOPE) set(cc_srcs) set(cu_srcs) + set(hip_cu_srcs) + set(miopen_hip_cc_srcs) set(cu_cc_srcs) set(cudnn_cu_cc_srcs) set(CUDNN_FILE) @@ -36,10 +38,19 @@ function(op_library TARGET) if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu) list(APPEND cu_srcs ${TARGET}.cu) endif() + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.hip.cu) + list(APPEND hip_cu_srcs ${TARGET}.hip.cu) + endif() string(REPLACE "_op" "_cudnn_op" CUDNN_FILE "${TARGET}") if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${CUDNN_FILE}.cu.cc) list(APPEND cudnn_cu_cc_srcs ${CUDNN_FILE}.cu.cc) endif() + if(WITH_AMD_GPU) + string(REPLACE "_op" "_miopen_op" MIOPEN_FILE "${TARGET}") + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${MIOPEN_FILE}.hip.cc) + list(APPEND miopen_hip_cc_srcs ${MIOPEN_FILE}.hip.cc) + endif() + endif() if(WITH_MKLDNN) string(REPLACE "_op" "_mkldnn_op" MKLDNN_FILE "${TARGET}") if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${MKLDNN_FILE}.cc) @@ -48,10 +59,14 @@ function(op_library TARGET) endif() else() foreach(src ${op_library_SRCS}) - if (${src} MATCHES ".*\\.cu$") + if (${src} MATCHES ".*\\.hip.cu$") + list(APPEND hip_cu_srcs ${src}) + elseif (${src} MATCHES ".*\\.cu$") list(APPEND cu_srcs ${src}) elseif(${src} MATCHES ".*_cudnn_op.cu.cc$") list(APPEND cudnn_cu_cc_srcs ${src}) + elseif(WITH_AMD_GPU AND ${src} MATCHES ".*_miopen_op.hip.cc$") + list(APPEND miopen_hip_cc_srcs ${src}) elseif(WITH_MKLDNN AND ${src} MATCHES ".*_mkldnn_op.cc$") list(APPEND mkldnn_cc_srcs ${src}) elseif(${src} MATCHES ".*\\.cu.cc$") @@ -76,6 +91,9 @@ function(op_library TARGET) if (WITH_GPU) nv_library(${TARGET} SRCS ${cc_srcs} ${cu_cc_srcs} ${cudnn_cu_cc_srcs} ${mkldnn_cc_srcs} ${cu_srcs} DEPS ${op_library_DEPS} ${op_common_deps}) + elseif (WITH_AMD_GPU) + hip_library(${TARGET} SRCS ${cc_srcs} ${hip_cu_srcs} ${miopen_hip_cc_srcs} ${mkldnn_cc_srcs} DEPS ${op_library_DEPS} + ${op_common_deps}) else() cc_library(${TARGET} SRCS ${cc_srcs} ${mkldnn_cc_srcs} DEPS ${op_library_DEPS} ${op_common_deps}) @@ -88,7 +106,7 @@ function(op_library TARGET) endif() endforeach() - # The registration of USE_OP, please refer to paddle/framework/op_registry.h. + # The registration of USE_OP, please refer to paddle/fluid/framework/op_registry.h. # Note that it's enough to just adding one operator to pybind in a *_op.cc file. # And for detail pybind information, please see generated paddle/pybind/pybind.h. file(READ ${TARGET}.cc TARGET_CONTENT) @@ -114,7 +132,10 @@ function(op_library TARGET) list(LENGTH cu_srcs cu_srcs_len) list(LENGTH cu_cc_srcs cu_cc_srcs_len) list(LENGTH mkldnn_cc_srcs mkldnn_cc_srcs_len) - if (${pybind_flag} EQUAL 0 AND ${mkldnn_cc_srcs_len} EQUAL 0 AND ${cu_srcs_len} EQUAL 0 AND ${cu_cc_srcs_len} EQUAL 0) + list(LENGTH hip_cu_srcs hip_cu_srcs_len) + list(LENGTH miopen_hip_cc_srcs miopen_hip_cc_srcs_len) + if (${pybind_flag} EQUAL 0 AND ${mkldnn_cc_srcs_len} EQUAL 0 AND ${cu_srcs_len} EQUAL 0 AND ${cu_cc_srcs_len} EQUAL 0 AND + ${hip_cu_srcs_len} EQUAL 0 AND ${miopen_hip_cc_srcs_len} EQUAL 0) file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(${TARGET});\n") set(pybind_flag 1) endif() @@ -125,6 +146,11 @@ function(op_library TARGET) file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, CUDNN);\n") endif() + # pybind USE_OP_DEVICE_KERNEL for MIOPEN + if (WITH_AMD_GPU AND ${miopen_hip_cc_srcs_len} GREATER 0) + file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, MIOPEN);\n") + endif() + # pybind USE_OP_DEVICE_KERNEL for MKLDNN if (WITH_MKLDNN AND ${mkldnn_cc_srcs_len} GREATER 0) file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, MKLDNN);\n") @@ -165,7 +191,6 @@ op_library(cond_op DEPS framework_proto tensor net_op) op_library(cross_entropy_op DEPS cross_entropy) op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) op_library(softmax_op DEPS softmax) -op_library(detection_output_op DEPS softmax) op_library(sequence_softmax_op DEPS softmax) op_library(sum_op DEPS selected_rows_functor) op_library(sgd_op DEPS selected_rows_functor) @@ -203,6 +228,11 @@ op_library(save_combine_op DEPS lod_tensor) op_library(load_combine_op DEPS lod_tensor) op_library(concat_op DEPS concat) +# FIXME(thuan): Move CSP operators to paddle/fluid/framework/operators/concurrency +add_subdirectory(concurrency) +op_library(channel_send_op DEPS concurrency) +op_library(channel_recv_op DEPS concurrency) + list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) op_library(${src}) diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index d74c47b981e51f12d99098818c71f3f6ec455d98..ec637658c03ad94624ee9a4f5def6a84387d293e 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -613,3 +613,14 @@ REGISTER_OP(swish, ops::ActivationOp, ops::SwishOpMaker, swish_grad, ops::grad_functor>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CPU_KERNEL); + +REGISTER_OP_CPU_KERNEL(relu, + ops::ActivationKernel>, + ops::ActivationKernel>); +REGISTER_OP_CPU_KERNEL( + relu_grad, ops::ActivationGradKernel>, + ops::ActivationGradKernel>); diff --git a/paddle/fluid/operators/activation_op.cu b/paddle/fluid/operators/activation_op.cu index b2633d017623c3a6a3bab2b416009d6d7c8fc1d4..7709a551dc155e1f3cd2a19a689999608f497beb 100644 --- a/paddle/fluid/operators/activation_op.cu +++ b/paddle/fluid/operators/activation_op.cu @@ -14,6 +14,7 @@ limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/activation_op.h" +#include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; @@ -31,3 +32,16 @@ namespace ops = paddle::operators; ops::grad_functor>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CUDA_KERNEL); + +REGISTER_OP_CUDA_KERNEL( + relu, ops::ActivationKernel>, + ops::ActivationKernel>, + ops::ActivationKernel>); +REGISTER_OP_CUDA_KERNEL( + relu_grad, ops::ActivationGradKernel>, + ops::ActivationGradKernel>); diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 8f791a6ca81c13a92fd8adf0d1620203bd4cf7d6..b95e793586219b7c413d0c7adb835081874d9363 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -772,7 +772,6 @@ struct SwishGradFunctor : public BaseActivationFunctor { __macro(sigmoid, SigmoidFunctor, SigmoidGradFunctor); \ __macro(logsigmoid, LogSigmoidFunctor, LogSigmoidGradFunctor); \ __macro(exp, ExpFunctor, ExpGradFunctor); \ - __macro(relu, ReluFunctor, ReluGradFunctor); \ __macro(tanh, TanhFunctor, TanhGradFunctor); \ __macro(softshrink, SoftShrinkFunctor, SoftShrinkGradFunctor); \ __macro(sqrt, SqrtFunctor, SqrtGradFunctor); \ diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index 39ae3c0040d04a6d901f1d6c992d547a6778c28e..d372213e1b6008b0c4227103dd40730f86a84301 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -56,6 +56,7 @@ class AssignFunctor { private: void copy_tensor(const framework::LoDTensor &lod_tensor, framework::LoDTensor *out) const { + if (lod_tensor.numel() == 0) return; auto &out_tensor = *out; TensorCopy(lod_tensor, lod_tensor.place(), dev_ctx_, &out_tensor); out_tensor.set_lod(lod_tensor.lod()); diff --git a/paddle/fluid/operators/average_accumulates_op.cc b/paddle/fluid/operators/average_accumulates_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..c95077fcbdb6b6c0da31f30b795dbe4d7d4fe6fe --- /dev/null +++ b/paddle/fluid/operators/average_accumulates_op.cc @@ -0,0 +1,216 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/average_accumulates_op.h" + +namespace paddle { +namespace operators { + +template <> +void GetAccumulators( + const framework::ExecutionContext& ctx, int64_t& num_updates_, + int64_t& num_accumulates_, int64_t& old_num_accumulates_) { + auto* in_old_num_accumulates = ctx.Input("in_old_num_accumulates"); + auto* in_num_accumulates = ctx.Input("in_num_accumulates"); + auto* in_num_updates = ctx.Input("in_num_updates"); + + old_num_accumulates_ = in_old_num_accumulates->data()[0]; + num_accumulates_ = in_num_accumulates->data()[0]; + num_updates_ = in_num_updates->data()[0]; +} + +template <> +void SetAccumulators( + const framework::ExecutionContext& ctx, int64_t num_updates_, + int64_t num_accumulates_, int64_t old_num_accumulates_) { + auto* out_old_num_accumulates = ctx.Output("out_old_num_accumulates"); + auto* out_num_accumulates = ctx.Output("out_num_accumulates"); + auto* out_num_updates = ctx.Output("out_num_updates"); + + out_old_num_accumulates->data()[0] = old_num_accumulates_; + out_num_accumulates->data()[0] = num_accumulates_; + out_num_updates->data()[0] = num_updates_; +} + +class AverageAccumulatesOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE( + ctx->HasInput("param"), + "Input (param) of average_accumulates op should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("in_sum_1"), + "Input (sum_1) of average_accumulates op should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("in_sum_2"), + "Input (sum_2) of average_accumulates op should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("in_sum_3"), + "Input (sum_3) of average_accumulates op should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("in_num_accumulates"), + "Input (in_num_accumulates) of average_accumulates op should " + "not be null."); + PADDLE_ENFORCE(ctx->HasInput("in_old_num_accumulates"), + "Input (old_num_accumulates) of average_accumulates op " + "should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("in_num_updates"), + "Input (num_updates) of average_accumulates op should not be null."); + + PADDLE_ENFORCE( + ctx->HasOutput("out_sum_1"), + "Output (sum_1) of average_accumulates op should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("out_sum_2"), + "Output (sum_2) of average_accumulates op should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("out_sum_3"), + "Output (sum_3) of average_accumulates op should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("out_num_accumulates"), + "Output (num_accumulates) of average_accumulates op should " + "not be null."); + PADDLE_ENFORCE(ctx->HasOutput("out_old_num_accumulates"), + "Output (old_num_accumulates) of average_accumulates op " + "should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("out_num_updates"), + "Output (num_updates) of average_accumulates op should not be null."); + + auto in_dim = ctx->GetInputDim("param"); + + ctx->SetOutputDim("out_sum_1", in_dim); + ctx->SetOutputDim("out_sum_2", in_dim); + ctx->SetOutputDim("out_sum_3", in_dim); + ctx->SetOutputDim("out_num_accumulates", {1}); + ctx->SetOutputDim("out_old_num_accumulates", {1}); + ctx->SetOutputDim("out_num_updates", {1}); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("param")->type()), + ctx.GetPlace()); + } +}; + +class AverageAccumulatesOpMaker : public framework::OpProtoAndCheckerMaker { + public: + AverageAccumulatesOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("param", "(Tensor), The parameter to be accumulated."); + AddInput("in_sum_1", + "(Tensor), A tensor used to store the parameter " + "sums with the same shape as input(param)."); + AddInput("in_sum_2", + "(Tensor), A auxiliary tensor to help " + "accumulating sums of parameter values with the same shape as " + "input(param). It is used to avoid loss of precision due to too " + "many sums."); + AddInput("in_sum_3", + "(Tensor), A auxiliary tensor to help " + "accumulating sums of parameter values with the same shape as " + "input(param)."); + AddInput("in_num_accumulates", + "(Tensor), The accumulating times of current window with " + "shape [1]."); + AddInput( + "in_old_num_accumulates", + "(Tensor), The accumulating times of previous window with " + "shape [1]."); + AddInput("in_num_updates", + "(Tensor), The total number of batches used by trainning " + "before this batch with shape [1]."); + + AddOutput("out_sum_1", + "(Tensor), A tensor used to store the " + "parameter sums with the same shape as input(param)."); + AddOutput("out_sum_2", + "(Tensor), A auxiliary tensor to help " + "accumulating sums of parameter values with the same shape as " + "input(param). It is used to avoid loss of precision due to too " + "many sums."); + AddOutput("out_sum_3", + "(Tensor), A auxiliary tensor to help " + "accumulating sums of parameter values with the same shape as " + "input(param)."); + AddOutput( + "out_num_accumulates", + "(Tensor), The accumulating times of current window with " + "shape [1]."); + AddOutput( + "out_old_num_accumulates", + "(Tensor) The accumulating times of previous window with " + "shape [1]."); + AddOutput( + "out_num_updates", + "(Tensor), The total number of batches used by trainning " + "before this batch with shape [1]."); + + AddAttr("average_window", + "(float, default 0) " + "The rate of average window size relative to num_updates.") + .SetDefault(0); + AddAttr("max_average_window", + "(int64_t) " + "Maximum size of average window. It suggests that the " + "number of mini-batches " + "in one pass is appropriate value to set."); + AddAttr("min_average_window", + "(int64_t, default 10000L) " + "Minimu size of average window.") + .SetDefault(10000L); + + AddComment(R"DOC( +AverageAccumulates Operator. +Accumulate the sum of parameter whtin sliding window. The size of sliding window is +determined by 'average_window', 'max_average_window' and 'min_average_window'. +Memory was shared by Input(in_sum_1) and Output(out_sum_1) which acts as an accumulator 'sum_1'. +'sum_2', 'sum_3', 'num_accumulates', 'old_num_accumulates' and 'num_updates' were the same as 'sum_1'. + +All the accumulators were inited to zero before training. + +And for a mini-batch in training, accumulators were computed as below steps: + num_updates += 1 + num_accumulates += 1 + sum_1 += param + if num_updates % kMaxNumAccumulates == 0: + sum_2 += sum_1 + sum_1 = 0 + if num_accumulates >= min_average_window && num_accumulates >= min(max_average_window, num_updates * average_window): + sum_3 = sum_1 + sum_2 + sum_1 = 0 + sum_2 = 0 + old_num_accumulates = num_accumulates + num_accumulates = 0 + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(average_accumulates, ops::AverageAccumulatesOp, + ops::AverageAccumulatesOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL( + average_accumulates, + ops::AverageAccumulatesKernel, + ops::AverageAccumulatesKernel); diff --git a/paddle/fluid/operators/average_accumulates_op.cu b/paddle/fluid/operators/average_accumulates_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..270c46984465e5ca62eaa8da3955ce7a3eaa0c57 --- /dev/null +++ b/paddle/fluid/operators/average_accumulates_op.cu @@ -0,0 +1,63 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/average_accumulates_op.h" +#include "paddle/fluid/platform/gpu_info.h" + +namespace paddle { +namespace operators { +template <> +void GetAccumulators( + const framework::ExecutionContext& ctx, int64_t& num_updates_, + int64_t& num_accumulates_, int64_t& old_num_accumulates_) { + auto* in_old_num_accumulates = ctx.Input("in_old_num_accumulates"); + auto* in_num_accumulates = ctx.Input("in_num_accumulates"); + auto* in_num_updates = ctx.Input("in_num_updates"); + auto stream = ctx.cuda_device_context().stream(); + memory::Copy(platform::CPUPlace(), &old_num_accumulates_, + platform::CUDAPlace(), in_old_num_accumulates->data(), + sizeof(int64_t), stream); + memory::Copy(platform::CPUPlace(), &num_accumulates_, platform::CUDAPlace(), + in_num_accumulates->data(), sizeof(int64_t), stream); + memory::Copy(platform::CPUPlace(), &num_updates_, platform::CUDAPlace(), + in_num_updates->data(), sizeof(int64_t), stream); +} + +template <> +void SetAccumulators( + const framework::ExecutionContext& ctx, int64_t num_updates_, + int64_t num_accumulates_, int64_t old_num_accumulates_) { + auto stream = ctx.cuda_device_context().stream(); + auto* out_old_num_accumulates = ctx.Output("out_old_num_accumulates"); + auto* out_num_accumulates = ctx.Output("out_num_accumulates"); + auto* out_num_updates = ctx.Output("out_num_updates"); + + memory::Copy(platform::CUDAPlace(), out_old_num_accumulates->data(), + platform::CPUPlace(), &old_num_accumulates_, sizeof(int64_t), + stream); + memory::Copy(platform::CUDAPlace(), out_num_accumulates->data(), + platform::CPUPlace(), &num_accumulates_, sizeof(int64_t), + stream); + memory::Copy(platform::CUDAPlace(), out_num_updates->data(), + platform::CPUPlace(), &num_updates_, sizeof(int64_t), stream); +} + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + average_accumulates, + ops::AverageAccumulatesKernel, + ops::AverageAccumulatesKernel); diff --git a/paddle/fluid/operators/average_accumulates_op.h b/paddle/fluid/operators/average_accumulates_op.h new file mode 100644 index 0000000000000000000000000000000000000000..f858109d1428dc67d94c253e5a39818eb2d4560d --- /dev/null +++ b/paddle/fluid/operators/average_accumulates_op.h @@ -0,0 +1,113 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +using EigenVector = framework::EigenVector; + +template +void GetAccumulators(const framework::ExecutionContext& ctx, + int64_t& num_updates, int64_t& num_accumulates, + int64_t& old_num_accumulates); + +template +void SetAccumulators(const framework::ExecutionContext& ctx, + int64_t num_updates, int64_t num_accumulates, + int64_t old_num_accumulates); + +template +class AverageAccumulatesKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + // It is used to avoid loss of precision + static const int64_t kMaxNumAccumulates = 16384; + // Get accumulators from input + int64_t num_updates = 0; + int64_t num_accumulates = 0; + int64_t old_num_accumulates = 0; + GetAccumulators(ctx, num_updates, num_accumulates, + old_num_accumulates); + + // Get attrs + float average_window = ctx.Attr("average_window"); + int64_t max_average_window = ctx.Attr("max_average_window"); + int64_t min_average_window = ctx.Attr("min_average_window"); + min_average_window = + std::min(min_average_window, max_average_window); + + // Get inputs + auto* param = ctx.Input("param"); + auto* in_sum_1 = ctx.Input("in_sum_1"); + auto* in_sum_2 = ctx.Input("in_sum_2"); + auto* in_sum_3 = ctx.Input("in_sum_3"); + auto param_tensor = EigenVector::Flatten(*param); + auto in_sum_1_tensor = EigenVector::Flatten(*in_sum_1); + auto in_sum_2_tensor = EigenVector::Flatten(*in_sum_2); + auto in_sum_3_tensor = EigenVector::Flatten(*in_sum_3); + + // Get outputs + auto* out_sum_1 = ctx.Output("out_sum_1"); + auto* out_sum_2 = ctx.Output("out_sum_2"); + auto* out_sum_3 = ctx.Output("out_sum_3"); + auto out_sum_1_tensor = EigenVector::Flatten(*out_sum_1); + auto out_sum_2_tensor = EigenVector::Flatten(*out_sum_2); + auto out_sum_3_tensor = EigenVector::Flatten(*out_sum_3); + + // Compute + auto& place = *ctx.template device_context().eigen_device(); + math::SetConstant constant_functor; + ++num_updates; + ++num_accumulates; + out_sum_1_tensor.device(place) = in_sum_1_tensor + param_tensor; + out_sum_2_tensor.device(place) = in_sum_2_tensor; + out_sum_3_tensor.device(place) = in_sum_3_tensor; + if (num_updates % kMaxNumAccumulates == 0) { + // Move the sum to a different buffer to avoid loss of precision due to + // too many sums. + out_sum_2_tensor.device(place) = in_sum_2_tensor + in_sum_1_tensor; + constant_functor(ctx.template device_context(), out_sum_1, + 0.0); + } + if (num_accumulates >= min_average_window && + num_accumulates >= std::min(max_average_window, + num_updates * average_window)) { + // Now the average window is too long, discard the old sum. + out_sum_3_tensor.device(place) = in_sum_1_tensor + in_sum_2_tensor; + constant_functor(ctx.template device_context(), out_sum_1, + 0.0); + constant_functor(ctx.template device_context(), out_sum_2, + 0.0); + old_num_accumulates = num_accumulates; + num_accumulates = 0; + } + + // Set accumulators to output + SetAccumulators(ctx, num_updates, num_accumulates, + old_num_accumulates); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 215ae229aff96d76fc948e19bdb42db319af65dc..36049ee6a4a0d2a251b6d10cf1ff05a9d9845089 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -80,6 +80,29 @@ class BatchNormOp : public framework::OperatorWithKernel { ctx->SetOutputDim("SavedVariance", {C}); ctx->ShareLoD("X", "Y"); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + auto input_data_type = + framework::ToDataType(ctx.Input("X")->type()); + // For float or float16 input tensor, the type of the scale, bias, mean, + // and var tensors should both be float. + auto bn_param_type = framework::proto::VarType::FP32; + PADDLE_ENFORCE_EQ(bn_param_type, + framework::ToDataType(ctx.Input("Scale")->type()), + "Scale input should be of float type"); + PADDLE_ENFORCE_EQ(bn_param_type, + framework::ToDataType(ctx.Input("Bias")->type()), + "Bias input should be of float type"); + PADDLE_ENFORCE_EQ(bn_param_type, + framework::ToDataType(ctx.Input("Mean")->type()), + "Mean input should be of float type"); + PADDLE_ENFORCE_EQ(bn_param_type, framework::ToDataType( + ctx.Input("Variance")->type()), + "Variance input should be of float type"); + return framework::OpKernelType(input_data_type, ctx.GetPlace()); + } }; class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { @@ -434,12 +457,39 @@ class BatchNormGradKernel } }; +class BatchNormGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *op = new framework::OpDesc(); + op->SetType("batch_norm_grad"); + op->SetInput("X", Input("X")); + op->SetInput(framework::GradVarName("Y"), OutputGrad("Y")); + + op->SetInput("Scale", Input("Scale")); + op->SetInput("SavedMean", Output("SavedMean")); + op->SetInput("SavedVariance", Output("SavedVariance")); + + op->SetAttrMap(Attrs()); + + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetOutput(framework::GradVarName("Scale"), InputGrad("Scale")); + op->SetOutput(framework::GradVarName("Bias"), InputGrad("Bias")); + + return std::unique_ptr(op); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(batch_norm, ops::BatchNormOp, ops::BatchNormOpMaker, - batch_norm_grad, ops::BatchNormGradOp); +REGISTER_OPERATOR(batch_norm, ops::BatchNormOp, ops::BatchNormOpMaker, + ops::BatchNormGradMaker); +REGISTER_OPERATOR(batch_norm_grad, ops::BatchNormGradOp); + REGISTER_OP_CPU_KERNEL( batch_norm, ops::BatchNormKernel); diff --git a/paddle/fluid/operators/batch_norm_op.cu.cc b/paddle/fluid/operators/batch_norm_op.cu.cc index 2d1556efc66826ea9847de8311ccecdee0ea7871..6ceacc39924a7558e380aaf563aaf234f1bf30a5 100644 --- a/paddle/fluid/operators/batch_norm_op.cu.cc +++ b/paddle/fluid/operators/batch_norm_op.cu.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { @@ -26,6 +27,8 @@ using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; template using CudnnDataType = platform::CudnnDataType; +template +using BatchNormParamType = typename CudnnDataType::BatchNormParamType; void ExtractNCWHD(const framework::DDim &dims, const DataLayout &data_layout, int *N, int *C, int *H, int *W, int *D) { @@ -104,8 +107,9 @@ class BatchNormKernel CUDNN_ENFORCE(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); + // Note: PERSISTENT not implemented for inference CUDNN_ENFORCE(platform::dynload::cudnnDeriveBNTensorDescriptor( - bn_param_desc_, data_desc_, mode_)); + bn_param_desc_, data_desc_, is_test ? CUDNN_BATCHNORM_SPATIAL : mode_)); const auto *scale = ctx.Input("Scale"); const auto *bias = ctx.Input("Bias"); @@ -118,15 +122,16 @@ class BatchNormKernel // alloc memory y->mutable_data(ctx.GetPlace()); - mean_out->mutable_data(ctx.GetPlace()); - variance_out->mutable_data(ctx.GetPlace()); - saved_mean->mutable_data(ctx.GetPlace()); - saved_variance->mutable_data(ctx.GetPlace()); + mean_out->mutable_data>(ctx.GetPlace()); + variance_out->mutable_data>(ctx.GetPlace()); + saved_mean->mutable_data>(ctx.GetPlace()); + saved_variance->mutable_data>(ctx.GetPlace()); auto &dev_ctx = ctx.template device_context(); - math::SetConstant functor; - functor(dev_ctx, saved_mean, 0); - functor(dev_ctx, saved_variance, 0); + math::SetConstant> + functor; + functor(dev_ctx, saved_mean, static_cast>(0)); + functor(dev_ctx, saved_variance, static_cast>(0)); auto handle = dev_ctx.cudnn_handle(); @@ -147,8 +152,10 @@ class BatchNormKernel CUDNN_BATCHNORM_SPATIAL, CudnnDataType::kOne(), CudnnDataType::kZero(), data_desc_, x->template data(), data_desc_, y->template mutable_data(ctx.GetPlace()), - bn_param_desc_, scale->template data(), bias->template data(), - est_mean->template data(), est_var->template data(), epsilon)); + bn_param_desc_, scale->template data>(), + bias->template data>(), + est_mean->template data>(), + est_var->template data>(), epsilon)); } else { // Run training mode. // obtain running mean and running inv var, and see if we need to @@ -159,11 +166,16 @@ class BatchNormKernel handle, mode_, CudnnDataType::kOne(), CudnnDataType::kZero(), data_desc_, x->template data(), data_desc_, y->template mutable_data(ctx.GetPlace()), bn_param_desc_, - scale->template data(), bias->template data(), this_factor, - mean_out->template mutable_data(ctx.GetPlace()), - variance_out->template mutable_data(ctx.GetPlace()), epsilon, - saved_mean->template mutable_data(ctx.GetPlace()), - saved_variance->template mutable_data(ctx.GetPlace()))); + scale->template data>(), + bias->template data>(), this_factor, + mean_out->template mutable_data>( + ctx.GetPlace()), + variance_out->template mutable_data>( + ctx.GetPlace()), + epsilon, saved_mean->template mutable_data>( + ctx.GetPlace()), + saved_variance->template mutable_data>( + ctx.GetPlace()))); } // clean when exit. @@ -270,9 +282,9 @@ class BatchNormGradKernel } // namespace paddle namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( - batch_norm, - ops::BatchNormKernel); + batch_norm, ops::BatchNormKernel, + ops::BatchNormKernel); REGISTER_OP_CUDA_KERNEL( - batch_norm_grad, - ops::BatchNormGradKernel); + batch_norm_grad, ops::BatchNormGradKernel); diff --git a/paddle/fluid/operators/box_coder_op.cc b/paddle/fluid/operators/box_coder_op.cc index eccdd408a17a07a541480705242b137f8207c139..ec416f725e75fae57484751ee8a066c0b9da8a70 100644 --- a/paddle/fluid/operators/box_coder_op.cc +++ b/paddle/fluid/operators/box_coder_op.cc @@ -126,6 +126,7 @@ width and height. } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(box_coder, ops::BoxCoderOp, ops::BoxCoderOpMaker); +REGISTER_OPERATOR(box_coder, ops::BoxCoderOp, ops::BoxCoderOpMaker, + paddle::framework::EmptyGradOpMaker); REGISTER_OP_CPU_KERNEL(box_coder, ops::BoxCoderKernel, ops::BoxCoderKernel); diff --git a/paddle/fluid/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc index 72f8cb04f2de3af4ee526c3d9b86ff96e34f0b0a..dd0068d571f72c9c22334e523cd091fe4c8da5a6 100644 --- a/paddle/fluid/operators/cast_op.cc +++ b/paddle/fluid/operators/cast_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/cast_op.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { @@ -88,4 +89,5 @@ REGISTER_OP_CPU_KERNEL(cast, ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel, - ops::CastOpKernel); + ops::CastOpKernel, + ops::CastOpKernel); diff --git a/paddle/fluid/operators/cast_op.cu b/paddle/fluid/operators/cast_op.cu index 507e9a531aae70e60bc6748bfab800310d6e0c21..c486c5850e25fcf4370f02cb145c244743a4cc4b 100644 --- a/paddle/fluid/operators/cast_op.cu +++ b/paddle/fluid/operators/cast_op.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/cast_op.h" +#include "paddle/fluid/platform/float16.h" template using CastOpKernel = @@ -20,4 +21,5 @@ using CastOpKernel = REGISTER_OP_CUDA_KERNEL(cast, CastOpKernel, CastOpKernel, CastOpKernel, CastOpKernel, - CastOpKernel); + CastOpKernel, + CastOpKernel); diff --git a/paddle/fluid/operators/channel_recv_op.cc b/paddle/fluid/operators/channel_recv_op.cc index c12b88e7a91c4ea7044223464a2f902db494d1a8..844b3ae3b7bf87c9b253128165b3c938801d5d60 100644 --- a/paddle/fluid/operators/channel_recv_op.cc +++ b/paddle/fluid/operators/channel_recv_op.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/operators/concurrency/channel_util.h" #include "paddle/fluid/operators/math/math_function.h" static constexpr char Channel[] = "Channel"; @@ -36,25 +37,6 @@ void SetReceiveStatus(const platform::Place &dev_place, status_tensor[0] = status; } -bool ChannelReceive(framework::ChannelHolder *ch, framework::Variable *var) { - // Get type of channel and use that to call mutable data for Variable - auto type = framework::ToVarType(ch->Type()); - if (type == framework::proto::VarType_Type_LOD_TENSOR) - return ch->Receive(var->GetMutable()); - else if (type == framework::proto::VarType_Type_LOD_RANK_TABLE) - return ch->Receive(var->GetMutable()); - else if (type == framework::proto::VarType_Type_LOD_TENSOR_ARRAY) - return ch->Receive(var->GetMutable()); - else if (type == framework::proto::VarType_Type_SELECTED_ROWS) - return ch->Receive(var->GetMutable()); - else if (type == framework::proto::VarType_Type_READER) - return ch->Receive(var->GetMutable()); - else if (type == framework::proto::VarType_Type_CHANNEL) - return ch->Receive(var->GetMutable()); - else - PADDLE_THROW("ChannelReceive:Unsupported type"); -} - class ChannelRecvOp : public framework::OperatorBase { public: ChannelRecvOp(const std::string &type, @@ -81,7 +63,7 @@ class ChannelRecvOp : public framework::OperatorBase { scope.FindVar(Input(Channel))->GetMutable(); auto output_var = scope.FindVar(Output(Out)); // Receive the data from the channel. - bool ok = ChannelReceive(ch, output_var); + bool ok = concurrency::ChannelReceive(ch, output_var); // Set the status output of the `ChannelReceive` call. SetReceiveStatus(dev_place, *scope.FindVar(Output(Status)), ok); diff --git a/paddle/fluid/operators/channel_send_op.cc b/paddle/fluid/operators/channel_send_op.cc index 6d7715ad229e821f02437246e3326063cb1ee757..47cf7d7efc9996e8a8db11b79c0310f77c2435a4 100644 --- a/paddle/fluid/operators/channel_send_op.cc +++ b/paddle/fluid/operators/channel_send_op.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/operators/concurrency/channel_util.h" #include "paddle/fluid/operators/math/math_function.h" static constexpr char Channel[] = "Channel"; @@ -37,24 +38,6 @@ void SetSendStatus(const platform::Place &dev_place, status_tensor[0] = status; } -bool ChannelSend(framework::ChannelHolder *ch, framework::Variable *var) { - auto type = framework::ToVarType(var->Type()); - if (type == framework::proto::VarType_Type_LOD_TENSOR) - return ch->Send(var->GetMutable()); - else if (type == framework::proto::VarType_Type_LOD_RANK_TABLE) - return ch->Send(var->GetMutable()); - else if (type == framework::proto::VarType_Type_LOD_TENSOR_ARRAY) - return ch->Send(var->GetMutable()); - else if (type == framework::proto::VarType_Type_SELECTED_ROWS) - return ch->Send(var->GetMutable()); - else if (type == framework::proto::VarType_Type_READER) - return ch->Send(var->GetMutable()); - else if (type == framework::proto::VarType_Type_CHANNEL) - return ch->Send(var->GetMutable()); - else - PADDLE_THROW("ChannelSend:Unsupported type"); -} - class ChannelSendOp : public framework::OperatorBase { public: ChannelSendOp(const std::string &type, @@ -82,7 +65,7 @@ class ChannelSendOp : public framework::OperatorBase { auto input_var = scope.FindVar(Input(X)); // Send the input data through the channel. - bool ok = ChannelSend(ch, input_var); + bool ok = concurrency::ChannelSend(ch, input_var); // Set the status output of the `ChannelSend` call. SetSendStatus(dev_place, *scope.FindVar(Output(Status)), ok); diff --git a/paddle/fluid/operators/concurrency/CMakeLists.txt b/paddle/fluid/operators/concurrency/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e4617440d152b4c15d09e81cd19c76739b95b979 --- /dev/null +++ b/paddle/fluid/operators/concurrency/CMakeLists.txt @@ -0,0 +1 @@ +cc_library(concurrency SRCS channel_util.cc DEPS device_context framework_proto boost eigen3) diff --git a/paddle/fluid/operators/concurrency/channel_util.cc b/paddle/fluid/operators/concurrency/channel_util.cc new file mode 100644 index 0000000000000000000000000000000000000000..a483af7affd824da7d18676d934dc959167ef71f --- /dev/null +++ b/paddle/fluid/operators/concurrency/channel_util.cc @@ -0,0 +1,111 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "channel_util.h" +#include "paddle/fluid/framework/var_type.h" + +namespace poc = paddle::operators::concurrency; + +bool poc::ChannelSend(framework::ChannelHolder *ch, framework::Variable *var) { + auto type = framework::ToVarType(var->Type()); + if (type == framework::proto::VarType_Type_LOD_TENSOR) + return ch->Send(var->GetMutable()); + else if (type == framework::proto::VarType_Type_LOD_RANK_TABLE) + return ch->Send(var->GetMutable()); + else if (type == framework::proto::VarType_Type_LOD_TENSOR_ARRAY) + return ch->Send(var->GetMutable()); + else if (type == framework::proto::VarType_Type_SELECTED_ROWS) + return ch->Send(var->GetMutable()); + else if (type == framework::proto::VarType_Type_READER) + return ch->Send(var->GetMutable()); + else if (type == framework::proto::VarType_Type_CHANNEL) + return ch->Send(var->GetMutable()); + else + PADDLE_THROW("ChannelSend:Unsupported type"); +} + +bool poc::ChannelReceive(framework::ChannelHolder *ch, + framework::Variable *var) { + // Get type of channel and use that to call mutable data for Variable + auto type = framework::ToVarType(ch->Type()); + if (type == framework::proto::VarType_Type_LOD_TENSOR) + return ch->Receive(var->GetMutable()); + else if (type == framework::proto::VarType_Type_LOD_RANK_TABLE) + return ch->Receive(var->GetMutable()); + else if (type == framework::proto::VarType_Type_LOD_TENSOR_ARRAY) + return ch->Receive(var->GetMutable()); + else if (type == framework::proto::VarType_Type_SELECTED_ROWS) + return ch->Receive(var->GetMutable()); + else if (type == framework::proto::VarType_Type_READER) + return ch->Receive(var->GetMutable()); + else if (type == framework::proto::VarType_Type_CHANNEL) + return ch->Receive(var->GetMutable()); + else + PADDLE_THROW("ChannelReceive:Unsupported type"); +} + +void poc::ChannelAddToSendQ(framework::ChannelHolder *ch, const void *referrer, + framework::Variable *var, + std::shared_ptr cond, + std::function cb) { + auto type = framework::ToVarType(var->Type()); + if (type == framework::proto::VarType_Type_LOD_TENSOR) { + ch->AddToSendQ(referrer, var->GetMutable(), cond, cb); + } else if (type == framework::proto::VarType_Type_LOD_RANK_TABLE) { + ch->AddToSendQ(referrer, var->GetMutable(), cond, + cb); + } else if (type == framework::proto::VarType_Type_LOD_TENSOR_ARRAY) { + ch->AddToSendQ(referrer, var->GetMutable(), cond, + cb); + } else if (type == framework::proto::VarType_Type_SELECTED_ROWS) { + ch->AddToSendQ(referrer, var->GetMutable(), cond, + cb); + } else if (type == framework::proto::VarType_Type_READER) { + ch->AddToSendQ(referrer, var->GetMutable(), cond, + cb); + } else if (type == framework::proto::VarType_Type_CHANNEL) { + ch->AddToSendQ(referrer, var->GetMutable(), cond, + cb); + } else { + PADDLE_THROW("ChannelAddToSendQ:Unsupported type"); + } +} + +void poc::ChannelAddToReceiveQ( + framework::ChannelHolder *ch, const void *referrer, + framework::Variable *var, std::shared_ptr cond, + std::function cb) { + auto type = framework::ToVarType(var->Type()); + if (type == framework::proto::VarType_Type_LOD_TENSOR) { + ch->AddToReceiveQ(referrer, var->GetMutable(), cond, + cb); + } else if (type == framework::proto::VarType_Type_LOD_RANK_TABLE) { + ch->AddToReceiveQ(referrer, var->GetMutable(), + cond, cb); + } else if (type == framework::proto::VarType_Type_LOD_TENSOR_ARRAY) { + ch->AddToReceiveQ(referrer, var->GetMutable(), + cond, cb); + } else if (type == framework::proto::VarType_Type_SELECTED_ROWS) { + ch->AddToReceiveQ(referrer, var->GetMutable(), + cond, cb); + } else if (type == framework::proto::VarType_Type_READER) { + ch->AddToReceiveQ(referrer, var->GetMutable(), + cond, cb); + } else if (type == framework::proto::VarType_Type_CHANNEL) { + ch->AddToReceiveQ(referrer, var->GetMutable(), + cond, cb); + } else { + PADDLE_THROW("ChannelAddToReceiveQ:Unsupported type"); + } +} diff --git a/paddle/fluid/operators/concurrency/channel_util.h b/paddle/fluid/operators/concurrency/channel_util.h new file mode 100644 index 0000000000000000000000000000000000000000..c3674bd9815df451751707bfa84d18dbb5fa0f6b --- /dev/null +++ b/paddle/fluid/operators/concurrency/channel_util.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/channel.h" +#include "paddle/fluid/framework/variable.h" + +namespace paddle { +namespace operators { +namespace concurrency { + +bool ChannelSend(framework::ChannelHolder *ch, framework::Variable *var); +bool ChannelReceive(framework::ChannelHolder *ch, framework::Variable *var); + +void ChannelAddToSendQ(framework::ChannelHolder *ch, const void *referrer, + framework::Variable *var, + std::shared_ptr cond, + std::function cb); +void ChannelAddToReceiveQ(framework::ChannelHolder *ch, const void *referrer, + framework::Variable *var, + std::shared_ptr cond, + std::function cb); + +} // namespace concurrency +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index ff0fbf21f86269885df5491afab7443df813f13f..a32aba4c1ff2f5e775aeb41f25b02322dbc6a64a 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/cudnn_helper.h" +#include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { @@ -27,6 +28,8 @@ using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using DataLayout = platform::DataLayout; +template +using ScalingParamType = typename platform::CudnnDataType::ScalingParamType; static constexpr size_t kCONV_CUDNN_WORKSPACE_LIMIT_BYTES = static_cast(1024) * 1024 * 1024; @@ -133,7 +136,7 @@ class CUDNNConvOpKernel : public framework::OpKernel { platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv forward --------------------- - T alpha = 1.0f, beta = 0.0f; + ScalingParamType alpha = 1.0f, beta = 0.0f; for (int i = 0; i < groups; i++) { PADDLE_ENFORCE(platform::dynload::cudnnConvolutionForward( handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, @@ -280,7 +283,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv backward data --------------------- - T alpha = 1.0f, beta = 0.0f; + ScalingParamType alpha = 1.0f, beta = 0.0f; if (input_grad) { T* input_grad_data = input_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset input_grad. @@ -315,16 +318,18 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -REGISTER_OP_KERNEL(conv2d, CUDNN, ::paddle::platform::CUDAPlace, +namespace plat = paddle::platform; +REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel, - paddle::operators::CUDNNConvOpKernel); -REGISTER_OP_KERNEL(conv2d_grad, CUDNN, ::paddle::platform::CUDAPlace, + paddle::operators::CUDNNConvOpKernel, + paddle::operators::CUDNNConvOpKernel); +REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel, paddle::operators::CUDNNConvGradOpKernel); -REGISTER_OP_KERNEL(conv3d, CUDNN, ::paddle::platform::CUDAPlace, +REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel, paddle::operators::CUDNNConvOpKernel); -REGISTER_OP_KERNEL(conv3d_grad, CUDNN, ::paddle::platform::CUDAPlace, +REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel, paddle::operators::CUDNNConvGradOpKernel); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 4b02b80d7772fa15d2333692551da5e59d93765f..650bc92be22af9ea8afcacf590a11190109e8811 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -70,25 +70,36 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType ConvOp::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { - framework::LibraryType library_{framework::LibraryType::kPlain}; + framework::LibraryType library{framework::LibraryType::kPlain}; #ifdef PADDLE_WITH_CUDA if (platform::CanCUDNNBeUsed(ctx)) { - library_ = framework::LibraryType::kCUDNN; + library = framework::LibraryType::kCUDNN; } #endif #ifdef PADDLE_WITH_MKLDNN - if (library_ == framework::LibraryType::kPlain && + if (library == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { - library_ = framework::LibraryType::kMKLDNN; + library = framework::LibraryType::kMKLDNN; } #endif + auto input_data_type = + framework::ToDataType(ctx.Input("Input")->type()); + auto filter_data_type = + framework::ToDataType(ctx.Input("Filter")->type()); + PADDLE_ENFORCE_EQ(input_data_type, filter_data_type, + "input and filter data type should be consistent"); + + if (input_data_type == framework::proto::VarType::FP16) { + PADDLE_ENFORCE_EQ(library, framework::LibraryType::kCUDNN, + "float16 can only be used when CUDNN is used"); + } + std::string data_format = ctx.Attr("data_format"); // TODO(pzelazko-intel): enable MKLDNN layout when it's ready - framework::DataLayout layout_ = framework::StringToDataLayout(data_format); - return framework::OpKernelType( - framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), - layout_, library_); + framework::DataLayout layout = framework::StringToDataLayout(data_format); + return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout, + library); } Conv2DOpMaker::Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker) diff --git a/paddle/fluid/operators/cross_entropy_op.h b/paddle/fluid/operators/cross_entropy_op.h index ec315695a68befc2e3de798fdb3fa146a903aaff..6da3a24dc89a85fe432b6350d3af7b0e84337c9d 100644 --- a/paddle/fluid/operators/cross_entropy_op.h +++ b/paddle/fluid/operators/cross_entropy_op.h @@ -78,7 +78,7 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel { for (int64_t i = 0; i < batch_size; ++i) { PADDLE_ASSERT(label_data[i] >= 0 || label_data[i] < class_num); int64_t index = i * class_num + label_data[i]; - dx_data[index] = -dy_data[i] / x_data[index]; + dx_data[index] = math::TolerableValue()(-dy_data[i] / x_data[index]); } } } diff --git a/paddle/fluid/operators/detail/CMakeLists.txt b/paddle/fluid/operators/detail/CMakeLists.txt index 94395ccfbcbd74ee40552a5c70dc8b8063a5f851..2b19f0448955d2d7582f23ac133c14ffdf5c9e49 100644 --- a/paddle/fluid/operators/detail/CMakeLists.txt +++ b/paddle/fluid/operators/detail/CMakeLists.txt @@ -1,6 +1,8 @@ if(WITH_DISTRIBUTE) - grpc_library(sendrecvop_grpc SRCS bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc grpc_server.cc PROTO send_recv.proto DEPS lod_tensor selected_rows) + grpc_library(sendrecvop_grpc SRCS bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc + grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows) set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") set_source_files_properties(test_serde.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - cc_test(serde_test SRCS test_serde.cc DEPS grpc++_unsecure grpc_unsecure gpr cares zlib protobuf sendrecvop_grpc) + cc_test(serde_test SRCS test_serde.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr + cares zlib protobuf sendrecvop_grpc) endif() diff --git a/paddle/fluid/operators/detail/bytebuffer_stream.h b/paddle/fluid/operators/detail/bytebuffer_stream.h index 099deb12d0e436427c147ab9b1eb553b712e14fb..1791a48aab1b66147f645c90757b35ef5f6e001b 100644 --- a/paddle/fluid/operators/detail/bytebuffer_stream.h +++ b/paddle/fluid/operators/detail/bytebuffer_stream.h @@ -23,9 +23,107 @@ limitations under the License. */ #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream.h" +namespace grpc { +// A ZeroCopyInputStream that reads from grpc_byte_buffer +class GrpcBufferReader final + : public ::google::protobuf::io::ZeroCopyInputStream { + typedef void (CoreCodegenInterface::*OldReaderInitAPI)( + grpc_byte_buffer_reader* reader, grpc_byte_buffer* buffer); + typedef int (CoreCodegenInterface::*NewReaderInitAPI)( + grpc_byte_buffer_reader* reader, grpc_byte_buffer* buffer); + void ReaderInit(OldReaderInitAPI ptr, grpc_byte_buffer_reader* reader, + grpc_byte_buffer* buffer) { + (g_core_codegen_interface->*ptr)(reader, buffer); + } + void ReaderInit(NewReaderInitAPI ptr, grpc_byte_buffer_reader* reader, + grpc_byte_buffer* buffer) { + int result = (g_core_codegen_interface->*ptr)(reader, buffer); + (void)result; + } + + public: + explicit GrpcBufferReader(grpc_byte_buffer* buffer) + : byte_count_(0), backup_count_(0) { + ReaderInit(&CoreCodegenInterface::grpc_byte_buffer_reader_init, &reader_, + buffer); + } + ~GrpcBufferReader() override { + g_core_codegen_interface->grpc_byte_buffer_reader_destroy(&reader_); + } + + bool Next(const void** data, int* size) override { + if (backup_count_ > 0) { + *data = GRPC_SLICE_START_PTR(slice_) + GRPC_SLICE_LENGTH(slice_) - + backup_count_; + GPR_CODEGEN_ASSERT(backup_count_ <= INT_MAX); + *size = (int)backup_count_; + backup_count_ = 0; + return true; + } + if (!g_core_codegen_interface->grpc_byte_buffer_reader_next(&reader_, + &slice_)) { + return false; + } + g_core_codegen_interface->grpc_slice_unref(slice_); + *data = GRPC_SLICE_START_PTR(slice_); + // On win x64, int is only 32bit + GPR_CODEGEN_ASSERT(GRPC_SLICE_LENGTH(slice_) <= INT_MAX); + byte_count_ += * size = (int)GRPC_SLICE_LENGTH(slice_); + return true; + } + + void BackUp(int count) override { backup_count_ = count; } + + bool Skip(int count) override { + const void* data; + int size; + while (Next(&data, &size)) { + if (size >= count) { + BackUp(size - count); + return true; + } + // size < count; + count -= size; + } + // error or we have too large count; + return false; + } + + ::google::protobuf::int64 ByteCount() const override { + return byte_count_ - backup_count_; + } + + private: + int64_t byte_count_; + int64_t backup_count_; + grpc_byte_buffer_reader reader_; + grpc_slice slice_; +}; + +}; // namespace grpc + namespace paddle { namespace operators { namespace detail { +// Source provides a way for a particular RPC implementation to provide +// received data to ParseFrom. +class Source { + public: + virtual ~Source() {} + + // Return the stream that contains the data to be parsed. + // Note that this method might be invoked more than once if + // ParseFrom needs to fall back to a more expensive parsing method. + // Every call must return a stream pointing at the beginning of + // the serialized RecvTensorResponse. + // + // Note that a subsequent call to contents() invalidates previous + // results of contents(). + // + // Ownership of the returned stream is retained by the Source and + // should not be deleted by the caller. + virtual ::google::protobuf::io::ZeroCopyInputStream* contents() = 0; +}; // A ZeroCopyInputStream that reads from a grpc::ByteBuffer. class GrpcByteBufferSource @@ -46,6 +144,43 @@ class GrpcByteBufferSource ::google::protobuf::int64 byte_count_; }; +class GrpcByteBufferSourceWrapper : public Source { + public: + explicit GrpcByteBufferSourceWrapper(GrpcByteBufferSource* source) + : source_(source) {} + ::google::protobuf::io::ZeroCopyInputStream* contents() override { + return source_; + } + + private: + GrpcByteBufferSource* source_; +}; + +class GrpcByteSource : public Source { + public: + explicit GrpcByteSource(grpc_byte_buffer* buffer) : buffer_(buffer) {} + ~GrpcByteSource() override { DeleteStream(); } + + typedef ::grpc::GrpcBufferReader Reader; + + ::google::protobuf::io::ZeroCopyInputStream* contents() override { + DeleteStream(); + stream_ = new (&space_) Reader(buffer_); + return stream_; + } + + private: + void DeleteStream() { + if (stream_) { + stream_->~Reader(); + } + } + + grpc_byte_buffer* buffer_; // Not owned + Reader* stream_ = nullptr; // Points into space_ if non-nullptr + char space_[sizeof(Reader)]; +}; + } // namespace detail } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index 7266f3276477891d3c7b6827316a428ef7a31c6e..eb19685aa6cb73862b9e31afbf9c5138659b1b13 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -13,7 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "grpc_client.h" +#include #include "paddle/fluid/framework/threadpool.h" + namespace paddle { namespace operators { namespace detail { @@ -31,8 +33,9 @@ bool RPCClient::AsyncSendVariable(const std::string& ep, framework::Async([var_name_val, p_ctx, ep_val, p_scope, time_out, ch, this] { auto* var = p_scope->FindVar(var_name_val); - sendrecv::VariableMessage req; - SerializeToMessage(var_name_val, var, *p_ctx, &req); + + ::grpc::ByteBuffer req; + SerializeToByteBuffer(var_name_val, var, *p_ctx, &req); // varhandle VarHandle var_h; @@ -46,8 +49,11 @@ bool RPCClient::AsyncSendVariable(const std::string& ep, s->Prepare(var_h, time_out); s->response_call_back_ = NULL; - auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); - rpc->Finish(&s->reply_, &s->status_, (void*)s); + auto call = std::move(s->stub_g_.PrepareUnaryCall( + s->context_.get(), "/sendrecv.SendRecvService/SendVariable", req, + &cq_)); + call->StartCall(); + call->Finish(&s->reply_, &s->status_, (void*)s); }); req_count_++; @@ -56,9 +62,19 @@ bool RPCClient::AsyncSendVariable(const std::string& ep, } void ProcGetResponse(const VarHandle& var_h, - const sendrecv::VariableMessage& ret_msg) { - auto* outvar = var_h.scope->FindVar(var_h.name); - DeserializeFromMessage(ret_msg, *var_h.ctx, outvar); + // const sendrecv::VariableMessage& ret_msg) { + const ::grpc::ByteBuffer& ret_msg) { + framework::Variable* outvar = NULL; + DeserializeFromByteBuffer(ret_msg, *var_h.ctx, var_h.scope, outvar); +} + +template +void RequestToByteBuffer(const T& proto, ::grpc::ByteBuffer* result) { + ::grpc::Slice slice(proto.ByteSizeLong()); + proto.SerializeWithCachedSizesToArray( + const_cast(reinterpret_cast(slice.begin()))); + ::grpc::ByteBuffer tmp(&slice, 1); + result->Swap(&tmp); } bool RPCClient::AsyncGetVariable(const std::string& ep, @@ -88,8 +104,13 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, s->Prepare(var_h, time_out); s->response_call_back_ = ProcGetResponse; - auto rpc = s->stub_->AsyncGetVariable(s->context_.get(), req, &cq_); - rpc->Finish(&s->reply_, &s->status_, (void*)s); + ::grpc::ByteBuffer buf; + RequestToByteBuffer(req, &buf); + + auto call = std::move(s->stub_g_.PrepareUnaryCall( + s->context_.get(), "/sendrecv.SendRecvService/GetVariable", buf, &cq_)); + call->StartCall(); + call->Finish(&s->reply_, &s->status_, (void*)s); }); req_count_++; @@ -97,7 +118,7 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, return true; } -bool RPCClient::AsyncSendBatchBarrier(const std::string& ep, int64_t time_out) { +void RPCClient::AsyncSendBatchBarrier(const std::string& ep, int64_t time_out) { const auto ch = GetChannel(ep); BatchBarrierProcessor* s = new BatchBarrierProcessor(ch); @@ -108,8 +129,18 @@ bool RPCClient::AsyncSendBatchBarrier(const std::string& ep, int64_t time_out) { auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, (void*)s); req_count_++; +} - return true; +void RPCClient::AsyncSendFetchBarrier(const std::string& ep, int64_t time_out) { + const auto ch = GetChannel(ep); + FetchBarrierProcessor* s = new FetchBarrierProcessor(ch); + s->Prepare(time_out); + + sendrecv::VariableMessage req; + req.set_varname(FETCH_BARRIER_MESSAGE); + auto rpc = s->stub_->AsyncGetVariable(s->context_.get(), req, &cq_); + rpc->Finish(&s->reply_, &s->status_, (void*)s); + req_count_++; } bool RPCClient::Wait() { @@ -154,7 +185,7 @@ bool RPCClient::Proceed() { PADDLE_ENFORCE(tag); // TODO(gongwb): add more retries. - ClientBase* c = static_cast(tag); + BaseProcessor* c = static_cast(tag); if (!c->status_.ok()) { LOG(ERROR) << "proc param error:" << c->var_h_.String() << " grpc error:" << c->status_.error_message(); @@ -174,6 +205,8 @@ std::shared_ptr RPCClient::GetChannel(const std::string& ep) { } grpc::ChannelArguments args; + args.SetInt("grpc.testing.fixed_reconnect_backoff_ms", 5000); + args.SetCompressionAlgorithm(GRPC_COMPRESS_NONE); args.SetMaxSendMessageSize(std::numeric_limits::max()); args.SetMaxReceiveMessageSize(std::numeric_limits::max()); diff --git a/paddle/fluid/operators/detail/grpc_client.h b/paddle/fluid/operators/detail/grpc_client.h index 669838810de240358857300c1014aa622c17f808..8216ac52fbbb3dcd2f30957cde58a850a77b08d6 100644 --- a/paddle/fluid/operators/detail/grpc_client.h +++ b/paddle/fluid/operators/detail/grpc_client.h @@ -25,6 +25,11 @@ limitations under the License. */ #include #include +#include +#include +#include +#include + #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" @@ -49,17 +54,13 @@ struct VarHandle { } }; -void ProcGetResponse(const VarHandle& var_h, - const sendrecv::VariableMessage& msg); +void ProcGetResponse(const VarHandle& var_h, const grpc::ByteBuffer& msg); -class ClientBase { +class BaseProcessor { public: - explicit ClientBase(std::shared_ptr ch) { - stub_ = sendrecv::SendRecvService::NewStub(ch); - context_ = NULL; - } + explicit BaseProcessor(std::shared_ptr ch) { context_ = NULL; } - virtual ~ClientBase() {} + virtual ~BaseProcessor() {} virtual void Prepare(const VarHandle& var_info, int64_t time_out) { context_.reset(new grpc::ClientContext()); @@ -82,18 +83,18 @@ class ClientBase { virtual void Process() = 0; - std::unique_ptr stub_; std::unique_ptr context_; grpc::Status status_; VarHandle var_h_; }; -typedef std::function +typedef std::function RequestSendCallBack; -class SendProcessor : public ClientBase { +class SendProcessor : public BaseProcessor { public: - explicit SendProcessor(std::shared_ptr ch) : ClientBase(ch) {} + explicit SendProcessor(std::shared_ptr ch) + : BaseProcessor(ch), stub_g_(ch) {} virtual ~SendProcessor() {} @@ -103,16 +104,18 @@ class SendProcessor : public ClientBase { } } - sendrecv::VoidMessage reply_; + ::grpc::GenericStub stub_g_; + ::grpc::ByteBuffer reply_; RequestSendCallBack response_call_back_ = NULL; }; -typedef std::function +typedef std::function RequestGetCallBack; -class GetProcessor : public ClientBase { +class GetProcessor : public BaseProcessor { public: - explicit GetProcessor(std::shared_ptr ch) : ClientBase(ch) {} + explicit GetProcessor(std::shared_ptr ch) + : BaseProcessor(ch), stub_g_(ch) {} virtual ~GetProcessor() {} @@ -122,19 +125,37 @@ class GetProcessor : public ClientBase { } } - sendrecv::VariableMessage reply_; + ::grpc::ByteBuffer reply_; + ::grpc::GenericStub stub_g_; RequestGetCallBack response_call_back_ = ProcGetResponse; }; -class BatchBarrierProcessor : public ClientBase { +class BatchBarrierProcessor : public BaseProcessor { public: explicit BatchBarrierProcessor(std::shared_ptr ch) - : ClientBase(ch) {} + : BaseProcessor(ch) { + stub_ = sendrecv::SendRecvService::NewStub(ch); + } virtual ~BatchBarrierProcessor() {} virtual void Process() {} sendrecv::VoidMessage reply_; + std::unique_ptr stub_; +}; + +class FetchBarrierProcessor : public BaseProcessor { + public: + explicit FetchBarrierProcessor(std::shared_ptr ch) + : BaseProcessor(ch) { + stub_ = sendrecv::SendRecvService::NewStub(ch); + } + + virtual ~FetchBarrierProcessor() {} + + virtual void Process() {} + sendrecv::VariableMessage reply_; + std::unique_ptr stub_; }; class RPCClient { @@ -151,7 +172,10 @@ class RPCClient { const std::string& var_name, int64_t time_out = 600 * 1000); - bool AsyncSendBatchBarrier(const std::string& ep, + void AsyncSendBatchBarrier(const std::string& ep, + int64_t time_out = 600 * 1000); + + void AsyncSendFetchBarrier(const std::string& ep, int64_t time_out = 600 * 1000); bool Wait(); diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 2a567516614aac39da9d069824f598ed26c5fb0c..9691d1e86b111def5b82e022dd01795aaf5c7b0d 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/detail/grpc_server.h" -using grpc::ServerAsyncResponseWriter; +using ::grpc::ServerAsyncResponseWriter; namespace paddle { namespace operators { @@ -26,9 +26,10 @@ enum CallStatus { PROCESS = 0, FINISH }; // https://stackoverflow.com/questions/41732884/grpc-multiple-services-in-cpp-async-server class RequestBase { public: - explicit RequestBase(sendrecv::SendRecvService::AsyncService* service, - grpc::ServerCompletionQueue* cq) - : service_(service), cq_(cq), status_(PROCESS) { + explicit RequestBase(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + const platform::DeviceContext* dev_ctx) + : service_(service), cq_(cq), status_(PROCESS), dev_ctx_(dev_ctx) { PADDLE_ENFORCE(cq_); } virtual ~RequestBase() {} @@ -42,55 +43,58 @@ class RequestBase { } protected: - grpc::ServerContext ctx_; - sendrecv::SendRecvService::AsyncService* service_; - grpc::ServerCompletionQueue* cq_; + ::grpc::ServerContext ctx_; + GrpcService::AsyncService* service_; + ::grpc::ServerCompletionQueue* cq_; CallStatus status_; + const platform::DeviceContext* dev_ctx_; }; -typedef std::pair MessageWithName; - class RequestSend final : public RequestBase { public: - explicit RequestSend(sendrecv::SendRecvService::AsyncService* service, - grpc::ServerCompletionQueue* cq, - SimpleBlockQueue* queue) - : RequestBase(service, cq), queue_(queue), responder_(&ctx_) { - service_->RequestSendVariable(&ctx_, &request_, &responder_, cq_, cq_, - this); + explicit RequestSend(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + framework::Scope* scope, ReceivedQueue* queue, + const platform::DeviceContext* dev_ctx) + : RequestBase(service, cq, dev_ctx), queue_(queue), responder_(&ctx_) { + request_.reset(new VariableResponse(scope, dev_ctx_)); + int method_id = static_cast(detail::GrpcMethod::kSendVariable); + service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, + cq_, cq_, this); } virtual ~RequestSend() {} - virtual std::string GetReqName() { return request_.varname(); } + virtual std::string GetReqName() { return request_->Varname(); } virtual void Process() { - MessageWithName msg_with_name = - std::make_pair(request_.varname(), std::move(request_)); - queue_->Push(std::move(msg_with_name)); - responder_.Finish(reply_, grpc::Status::OK, this); + queue_->Push(std::make_pair(request_->Varname(), request_)); + + sendrecv::VoidMessage reply; + responder_.Finish(reply, ::grpc::Status::OK, this); status_ = FINISH; } protected: - sendrecv::VariableMessage request_; - sendrecv::VoidMessage reply_; - SimpleBlockQueue* queue_; + std::shared_ptr request_; + ReceivedQueue* queue_; ServerAsyncResponseWriter responder_; }; class RequestGet final : public RequestBase { public: - explicit RequestGet(sendrecv::SendRecvService::AsyncService* service, - grpc::ServerCompletionQueue* cq, framework::Scope* scope, + explicit RequestGet(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + framework::Scope* scope, const platform::DeviceContext* dev_ctx, - SimpleBlockQueue* queue) - : RequestBase(service, cq), + SimpleBlockQueue* queue) + : RequestBase(service, cq, dev_ctx), responder_(&ctx_), scope_(scope), - dev_ctx_(dev_ctx), queue_(queue) { - service_->RequestGetVariable(&ctx_, &request_, &responder_, cq_, cq_, this); + int method_id = static_cast(detail::GrpcMethod::kGetVariable); + service_->RequestAsyncUnary(method_id, &ctx_, &request_, &responder_, cq_, + cq_, this); } virtual ~RequestGet() {} @@ -101,31 +105,42 @@ class RequestGet final : public RequestBase { // proc request. std::string var_name = request_.varname(); auto* var = scope_->FindVar(var_name); - SerializeToMessage(var_name, var, *dev_ctx_, &reply_); - // TODO(gongwb): check var's info. - responder_.Finish(reply_, grpc::Status::OK, this); + + ::grpc::ByteBuffer reply; + if (var_name != FETCH_BARRIER_MESSAGE) { + SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply); + } + + responder_.Finish(reply, ::grpc::Status::OK, this); status_ = FINISH; - queue_->Push('c'); + + if (var_name == FETCH_BARRIER_MESSAGE) { + sendrecv::VariableMessage msg; + MessageWithName msg_with_name = std::make_pair(var_name, msg); + queue_->Push(msg_with_name); + } } protected: sendrecv::VariableMessage request_; - sendrecv::VariableMessage reply_; - ServerAsyncResponseWriter responder_; + ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; framework::Scope* scope_; - const platform::DeviceContext* dev_ctx_; - SimpleBlockQueue* queue_; + SimpleBlockQueue* queue_; }; void AsyncGRPCServer::WaitClientGet(int count) { - for (int i = 0; i < count; ++i) { - var_get_queue_.Pop(); + int fetch_barriers = 0; + while (fetch_barriers < count) { + auto msg = var_get_queue_.Pop(); + if (msg.first == FETCH_BARRIER_MESSAGE) { + fetch_barriers++; + } } } void AsyncGRPCServer::RunSyncUpdate() { - grpc::ServerBuilder builder; - builder.AddListeningPort(address_, grpc::InsecureServerCredentials()); + ::grpc::ServerBuilder builder; + builder.AddListeningPort(address_, ::grpc::InsecureServerCredentials()); builder.SetMaxSendMessageSize(std::numeric_limits::max()); builder.SetMaxReceiveMessageSize(std::numeric_limits::max()); builder.RegisterService(&service_); @@ -173,8 +188,8 @@ void AsyncGRPCServer::TryToRegisterNewSendOne() { if (is_shut_down_) { return; } - RequestSend* send = - new RequestSend(&service_, cq_send_.get(), &var_recv_queue_); + RequestSend* send = new RequestSend(&service_, cq_send_.get(), scope_, + &var_recv_queue_, dev_ctx_); VLOG(4) << "Create RequestSend status:" << send->Status(); } @@ -189,7 +204,7 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() { } // FIXME(typhoonzero): change cq_name to enum. -void AsyncGRPCServer::HandleRequest(grpc::ServerCompletionQueue* cq, +void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, std::string cq_name, std::function TryToRegisterNewOne) { TryToRegisterNewOne(); diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index e9402ff6aafe73684b57442c03ee4c573a902a46..f891c75dbc81a5cdb5274bbae84e9e85f42464fb 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -14,28 +14,30 @@ limitations under the License. */ #pragma once +#include +#include + #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/framework/var_type.h" -#include "paddle/fluid/operators/detail/simple_block_queue.h" - +#include "paddle/fluid/operators/detail/grpc_service.h" +#include "paddle/fluid/operators/detail/grpc_service.h" #include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" #include "paddle/fluid/operators/detail/send_recv.pb.h" -#include -#include -#include -#include "paddle/fluid/operators/detail/sendrecvop_utils.h" - namespace paddle { namespace operators { namespace detail { +typedef std::pair> + ReceivedMessage; +typedef SimpleBlockQueue ReceivedQueue; + typedef std::pair MessageWithName; class RequestBase; -class AsyncGRPCServer final : public sendrecv::SendRecvService::Service { +class AsyncGRPCServer final { public: explicit AsyncGRPCServer(const std::string &address) : address_(address) {} @@ -50,14 +52,16 @@ class AsyncGRPCServer final : public sendrecv::SendRecvService::Service { void SetDevCtx(const platform::DeviceContext *dev_ctx) { dev_ctx_ = dev_ctx; } - const MessageWithName Get() { return this->var_recv_queue_.Pop(); } + const ReceivedMessage Get() { return this->var_recv_queue_.Pop(); } - void Push(const MessageWithName &msg) { this->var_recv_queue_.Push(msg); } + void Push(const std::string &msg_name) { + this->var_recv_queue_.Push(std::make_pair(msg_name, nullptr)); + } void ShutDown(); protected: - void HandleRequest(grpc::ServerCompletionQueue *cq, std::string cq_name, + void HandleRequest(::grpc::ServerCompletionQueue *cq, std::string cq_name, std::function TryToRegisterNewOne); void TryToRegisterNewSendOne(); void TryToRegisterNewGetOne(); @@ -66,18 +70,19 @@ class AsyncGRPCServer final : public sendrecv::SendRecvService::Service { private: std::mutex cq_mutex_; volatile bool is_shut_down_ = false; - std::unique_ptr cq_send_; - std::unique_ptr cq_get_; + std::unique_ptr<::grpc::ServerCompletionQueue> cq_send_; + std::unique_ptr<::grpc::ServerCompletionQueue> cq_get_; - sendrecv::SendRecvService::AsyncService service_; - std::unique_ptr server_; + GrpcService::AsyncService service_; + std::unique_ptr<::grpc::Server> server_; std::string address_; framework::Scope *scope_; const platform::DeviceContext *dev_ctx_; + // received variable from RPC, operators fetch variable from this queue. - SimpleBlockQueue var_recv_queue_; - SimpleBlockQueue var_get_queue_; + SimpleBlockQueue var_get_queue_; + ReceivedQueue var_recv_queue_; // condition of the sub program std::mutex barrier_mutex_; diff --git a/paddle/fluid/operators/detail/grpc_service.h b/paddle/fluid/operators/detail/grpc_service.h new file mode 100644 index 0000000000000000000000000000000000000000..ae6f9db3bd31a4b4839b34e8e53dd87f1ecf4b1d --- /dev/null +++ b/paddle/fluid/operators/detail/grpc_service.h @@ -0,0 +1,118 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "paddle/fluid/operators/detail/variable_response.h" + +// NOTE: This method was originally created by tensorflow +// (https://github.com/tensorflow/tensorflow/) we borrow this +// method and did some modifications so that we can parse gRPC +// requests without too much copying of the tensor data. + +namespace grpc { +class CompletionQueue; +class Channel; +class RpcService; +class ServerCompletionQueue; +class ServerContext; + +// Support parsing/unparsing of tensorflow::VariableResponse. +// Wire-format is identical to RecvVariableResponse. +template <> +class SerializationTraits { + public: + static Status Serialize( + const paddle::operators::detail::VariableResponse& msg, + grpc_byte_buffer** bp, bool* own_buffer) { + PADDLE_ENFORCE(false, "SerializationTraits::Serialize not implemented!"); + return Status(); + } + static Status Deserialize(grpc_byte_buffer* buffer, + paddle::operators::detail::VariableResponse* msg, + int max_message_size = INT_MAX) { + if (buffer == nullptr) { + return Status(StatusCode::INTERNAL, "No payload"); + } + + Status result = g_core_codegen_interface->ok(); + if (result.ok()) { + paddle::operators::detail::GrpcByteSource source(buffer); + int ret = msg->Parse(&source); + if (ret != 0) { + result = Status(StatusCode::INTERNAL, "VariableResponse parse error"); + } + } + g_core_codegen_interface->grpc_byte_buffer_destroy(buffer); + return result; + } +}; +} // namespace grpc + +namespace paddle { +namespace operators { +namespace detail { + +enum class GrpcMethod { + kSendVariable, + kGetVariable, +}; + +static const int kGrpcNumMethods = + static_cast(GrpcMethod::kGetVariable) + 1; + +inline const char* GrpcMethodName(GrpcMethod id) { + switch (id) { + case GrpcMethod::kSendVariable: + return "/sendrecv.SendRecvService/SendVariable"; + case GrpcMethod::kGetVariable: + return "/sendrecv.SendRecvService/GetVariable"; + } + + // Shouldn't be reached. + PADDLE_ENFORCE(false, "Invalid id: not found valid method name"); + return nullptr; +} + +class GrpcService final { + public: + class AsyncService : public ::grpc::Service { + public: + AsyncService() { + for (int i = 0; i < kGrpcNumMethods; ++i) { + AddMethod(new ::grpc::internal::RpcServiceMethod( + GrpcMethodName(static_cast(i)), + ::grpc::internal::RpcMethod::NORMAL_RPC, nullptr)); + ::grpc::Service::MarkMethodAsync(i); + } + } + virtual ~AsyncService() {} + + // Make RequestAsyncUnary public for grpc_call.h + using ::grpc::Service::RequestAsyncUnary; + }; +}; + +} // namespace detail +} // namespace operator +} // namespace paddle diff --git a/paddle/fluid/operators/detail/safe_ref.h b/paddle/fluid/operators/detail/safe_ref.h index 9cb5851deba6b16261d4499afcfb867d9d706498..48bdce740878ea486eda6821dc29885a3e480114 100644 --- a/paddle/fluid/operators/detail/safe_ref.h +++ b/paddle/fluid/operators/detail/safe_ref.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include "paddle/fluid/platform/enforce.h" + namespace paddle { namespace operators { namespace detail { diff --git a/paddle/fluid/operators/detail/send_recv.proto b/paddle/fluid/operators/detail/send_recv.proto index b0215d4a80c9440f09c35434903fd6166b03e8b0..598aaa4c51a6c5cd32eeffe08bbae849aee1a1df 100644 --- a/paddle/fluid/operators/detail/send_recv.proto +++ b/paddle/fluid/operators/detail/send_recv.proto @@ -32,6 +32,9 @@ enum VarType { SELECTED_ROWS = 1; } +// NOTICE(gongwb):don't modify this proto if you are not +// not familar with how we serialize in sendrecvop_utils.h +// and deserilize it in variable_response.h. message VariableMessage { enum Type { // Pod Types @@ -45,7 +48,6 @@ message VariableMessage { } message LodData { repeated int64 lod_data = 1; } - string varname = 1; // TODO(Yancey1989): reference framework::proto::VarDesc::VarType VarType type = 2; @@ -64,3 +66,5 @@ message VariableMessage { } message VoidMessage {} + +message TestMessage { int64 test_1 = 1; } diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc index 39117eeeb611b025c426938c60ddf82c6af232ca..d7bbf79c50651943d91c38bbaab775f5ee8dc395 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ b/paddle/fluid/operators/detail/sendrecvop_utils.cc @@ -13,61 +13,19 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detail/sendrecvop_utils.h" +#include +#include #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream.h" #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/operators/detail/bytebuffer_stream.h" #include "paddle/fluid/operators/detail/proto_encoder_helper.h" +#include "paddle/fluid/operators/detail/variable_response.h" namespace paddle { namespace operators { namespace detail { -void SerializeToMessage(const std::string& name, const framework::Variable* var, - const platform::DeviceContext& ctx, - sendrecv::VariableMessage* msg) { - msg->set_varname(name); - std::ostringstream oss; - switch (framework::ToVarType(var->Type())) { - case framework::proto::VarType_Type_LOD_TENSOR: - msg->set_type(sendrecv::VarType::LOD_TENSOR); - framework::SerializeToStream(oss, var->Get(), ctx); - break; - case framework::proto::VarType_Type_SELECTED_ROWS: - msg->set_type(sendrecv::VarType::SELECTED_ROWS); - framework::SerializeToStream(oss, var->Get(), - ctx); - break; - default: { - PADDLE_THROW("Serialize does not support type: %s", - typeid(var->Type()).name()); - break; - } - } - msg->set_serialized(oss.str()); -} - -void DeserializeFromMessage(const sendrecv::VariableMessage& msg, - const platform::DeviceContext& ctx, - framework::Variable* var) { - std::istringstream iss(msg.serialized()); - switch (msg.type()) { - case sendrecv::VarType::LOD_TENSOR: - DeserializeFromStream(iss, var->GetMutable(), ctx); - break; - case sendrecv::VarType::SELECTED_ROWS: { - DeserializeFromStream(iss, var->GetMutable(), - ctx); - break; - } - default: { - PADDLE_THROW("Deserialize does not support type: %s", - typeid(var->Type()).name()); - break; - } - } -} - void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, ::grpc::ByteBuffer* msg) { @@ -123,6 +81,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, static_cast(ctx); auto copy_size = tensor.memory_size(); payload = memory::Alloc(cpu, copy_size); + memory::Copy(cpu, payload, boost::get(tensor.place()), reinterpret_cast(tensor.data()), @@ -132,6 +91,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, platform::CPUPlace cpu; memory::Free(cpu, backing); }; + #endif } else { payload = tensor.data(); @@ -219,80 +179,11 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, - framework::Variable* var) { - sendrecv::VariableMessage meta; - GrpcByteBufferSource source; - source.Init(msg); - ::google::protobuf::io::CodedInputStream input(&source); - // do zerocopy parsing - PADDLE_ENFORCE(meta.ParseFromCodedStream(&input)); - PADDLE_ENFORCE(input.ConsumedEntireMessage()); - // dims is needed by both tensor and selectedrows - std::vector vecdims; - for (auto& d : meta.dims()) { - vecdims.push_back(d); - } - framework::DDim dims = framework::make_ddim(vecdims); - - if (meta.type() == sendrecv::LOD_TENSOR) { - auto* tensor = var->GetMutable(); - tensor->Resize(dims); - void* tensor_data = tensor->mutable_data( - ctx.GetPlace(), - paddle::operators::detail::ToTypeIndex(meta.data_type())); - framework::LoD lod; - for (int i = 0; i < meta.lod_level(); ++i) { - framework::Vector v; - for (int j = 0; j < meta.lod(i).lod_data_size(); ++j) { - v.push_back(meta.lod(i).lod_data(j)); - } - lod.push_back(v); - } - tensor->set_lod(lod); - // How to avoid copying and use the message buffer directly? - // Maybe need to find a way to release all memory except tensor content. - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - platform::CPUPlace cpu; - auto& gpu_dev_ctx = static_cast(ctx); - memory::Copy(boost::get(tensor->place()), - tensor_data, cpu, - reinterpret_cast(meta.serialized().data()), - meta.serialized().size(), gpu_dev_ctx.stream()); - ctx.Wait(); -#endif - } else { - memcpy(tensor_data, - reinterpret_cast(meta.serialized().data()), - meta.serialized().size()); - } - } else if (meta.type() == sendrecv::SELECTED_ROWS) { - auto* slr = var->GetMutable(); - auto* tensor = slr->mutable_value(); - int64_t* rows_data = slr->mutable_rows()->data(); - tensor->Resize(dims); - void* tensor_data = tensor->mutable_data( - ctx.GetPlace(), - paddle::operators::detail::ToTypeIndex(meta.data_type())); - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - platform::CPUPlace cpu; - auto& gpu_dev_ctx = static_cast(ctx); - memory::Copy(boost::get(tensor->place()), - tensor_data, cpu, - reinterpret_cast(meta.serialized().data()), - meta.serialized().size(), gpu_dev_ctx.stream()); - ctx.Wait(); -#endif - } else { - memcpy(tensor_data, - reinterpret_cast(meta.serialized().data()), - meta.serialized().size()); - } - // copy rows CPU data, GPU data will be copied lazly - memcpy(rows_data, reinterpret_cast(meta.rows().data()), - meta.rows().size()); - } + const framework::Scope* scope, + framework::Variable*& var) { + operators::detail::VariableResponse resp(scope, &ctx); + PADDLE_ENFORCE(resp.Parse(msg) == 0, "parse bytebuffer to tensor error!"); + var = resp.GetVar(); } } // namespace detail diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.h b/paddle/fluid/operators/detail/sendrecvop_utils.h index 5208091e54b4da2bb0265f84827ce23b57e954dc..3b875627032a6b08cc70280b3cc825c2a703923f 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.h +++ b/paddle/fluid/operators/detail/sendrecvop_utils.h @@ -21,6 +21,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" @@ -32,24 +33,18 @@ namespace detail { #define LISTEN_TERMINATE_MESSAGE "TERMINATE@RECV" #define BATCH_BARRIER_MESSAGE "BATCH_BARRIER@RECV" +#define FETCH_BARRIER_MESSAGE "FETCH_BARRIER@RECV" typedef void (*DestroyCallback)(void*); -void SerializeToMessage(const std::string& name, const framework::Variable* var, - const platform::DeviceContext& ctx, - sendrecv::VariableMessage* msg); - -void DeserializeFromMessage(const sendrecv::VariableMessage& msg, - const platform::DeviceContext& ctx, - framework::Variable* var); - void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, ::grpc::ByteBuffer* msg); void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, - framework::Variable* var); + const framework::Scope* scope, + framework::Variable*& var); inline std::type_index ToTypeIndex(sendrecv::VariableMessage::Type type) { switch (type) { diff --git a/paddle/fluid/operators/detail/test_serde.cc b/paddle/fluid/operators/detail/test_serde.cc index 2f06e5a686b996858d21930a1afa2861efca4a9b..99c1577223c4fe8001b4ce651b10e9e1f0024296 100644 --- a/paddle/fluid/operators/detail/test_serde.cc +++ b/paddle/fluid/operators/detail/test_serde.cc @@ -16,11 +16,13 @@ limitations under the License. */ #include #include +#include #include "gtest/gtest.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/variable.h" #include "paddle/fluid/operators/detail/sendrecvop_utils.h" +#include "paddle/fluid/operators/detail/variable_response.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/string/printf.h" @@ -31,19 +33,21 @@ namespace operators = paddle::operators; namespace math = paddle::operators::math; namespace memory = paddle::memory; -void RunSerdeTestTensor(platform::Place place) { - // serialize var to ByteBuffer - framework::Variable var; - auto* tensor = var.GetMutable(); - tensor->Resize(framework::make_ddim({4, 8, 4, 2})); - framework::LoD lod; - lod.push_back(framework::Vector({1, 3, 8})); - tensor->set_lod(lod); - int tensor_numel = 4 * 8 * 4 * 2; +void RunSerdeTestSelectedRows(platform::Place place) { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); + + // serialize var to ByteBuffer + framework::Variable var; + auto* slr = var.GetMutable(); + auto* tensor = slr->mutable_value(); + auto* rows = slr->mutable_rows(); + tensor->Resize(framework::make_ddim({2, 10})); tensor->mutable_data(place); - math::set_constant(ctx, tensor, 31.9); + int tensor_numel = 2 * 10; + math::set_constant(ctx, tensor, 32.7); + rows->push_back(3); + rows->push_back(10); ::grpc::ByteBuffer msg; operators::detail::SerializeToByteBuffer("myvar", &var, ctx, &msg); @@ -56,62 +60,67 @@ void RunSerdeTestTensor(platform::Place place) { for (const auto& s : slices) { tmp.append(reinterpret_cast(s.begin()), s.size()); } + sendrecv::VariableMessage varmsg; EXPECT_TRUE(varmsg.ParseFromString(tmp)); + EXPECT_EQ(varmsg.varname(), "myvar"); - EXPECT_EQ(varmsg.type(), 0); - EXPECT_EQ(varmsg.dims()[0], 4); - EXPECT_EQ(varmsg.dims()[1], 8); - EXPECT_EQ(varmsg.dims()[2], 4); - EXPECT_EQ(varmsg.dims()[3], 2); - EXPECT_EQ(varmsg.lod_level(), 1); - EXPECT_EQ(varmsg.lod(0).lod_data(0), 1); - EXPECT_EQ(varmsg.lod(0).lod_data(1), 3); - EXPECT_EQ(varmsg.lod(0).lod_data(2), 8); + EXPECT_EQ(varmsg.type(), 1); const float* tensor_data = reinterpret_cast(varmsg.serialized().data()); + const int64_t* rows_data = + reinterpret_cast(varmsg.rows().data()); for (int i = 0; i < tensor_numel; ++i) { - EXPECT_FLOAT_EQ(tensor_data[i], 31.9); + EXPECT_FLOAT_EQ(tensor_data[i], 32.7); } - + EXPECT_EQ(rows_data[0], 3); + EXPECT_EQ(rows_data[1], 10); // deserialize zero-copy - framework::Variable var2; - operators::detail::DeserializeFromByteBuffer(msg, ctx, &var2); - auto tensor2 = var2.Get(); + // framework::Variable var2; + // operators::detail::DeserializeFromByteBuffer(msg, ctx, &var2); + framework::Scope scope; + scope.Var("myvar"); + operators::detail::VariableResponse resp(&scope, &ctx); + EXPECT_EQ(resp.Parse(msg), 0); + + framework::Variable* var2 = resp.GetVar(); + + auto* slr2 = var2->GetMutable(); + auto* tensor2 = slr2->mutable_value(); + auto* rows2 = slr2->mutable_rows(); float* tensor_data2 = nullptr; framework::Tensor tmp_tensor; if (platform::is_gpu_place(ctx.GetPlace())) { platform::CPUPlace cpu; - framework::TensorCopy(tensor2, cpu, &tmp_tensor); + framework::TensorCopy(*tensor2, cpu, &tmp_tensor); tensor_data2 = tmp_tensor.data(); } else { - tensor_data2 = const_cast(tensor2.data()); + tensor_data2 = const_cast(tensor2->data()); } + const int64_t* rows_data2 = rows2->data(); - EXPECT_EQ(varmsg.lod_level(), 1); - EXPECT_EQ(varmsg.lod(0).lod_data(0), 1); - EXPECT_EQ(varmsg.lod(0).lod_data(1), 3); - EXPECT_EQ(varmsg.lod(0).lod_data(2), 8); - for (int i = 0; i < tensor_numel; ++i) EXPECT_FLOAT_EQ(tensor_data2[i], 31.9); + for (int i = 0; i < tensor_numel; ++i) { + EXPECT_FLOAT_EQ(tensor_data2[i], 32.7); + } + EXPECT_EQ(rows_data2[0], 3); + EXPECT_EQ(rows_data2[1], 10); } -void RunSerdeTestSelectedRows(platform::Place place) { - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - auto& ctx = *pool.Get(place); - +void RunTestLodTensor(platform::Place place, int from_type = 0) { // serialize var to ByteBuffer framework::Variable var; - auto* slr = var.GetMutable(); - auto* tensor = slr->mutable_value(); - auto* rows = slr->mutable_rows(); - tensor->Resize(framework::make_ddim({2, 10})); + auto* tensor = var.GetMutable(); + tensor->Resize(framework::make_ddim({4, 8, 4, 2})); + framework::LoD lod; + lod.push_back(framework::Vector({1, 3, 8})); + tensor->set_lod(lod); + int tensor_numel = 4 * 8 * 4 * 2; + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + auto& ctx = *pool.Get(place); tensor->mutable_data(place); - int tensor_numel = 2 * 10; - math::set_constant(ctx, tensor, 32.7); - rows->push_back(3); - rows->push_back(10); + math::set_constant(ctx, tensor, 31.9); ::grpc::ByteBuffer msg; operators::detail::SerializeToByteBuffer("myvar", &var, ctx, &msg); @@ -126,61 +135,82 @@ void RunSerdeTestSelectedRows(platform::Place place) { } sendrecv::VariableMessage varmsg; EXPECT_TRUE(varmsg.ParseFromString(tmp)); - EXPECT_EQ(varmsg.varname(), "myvar"); - EXPECT_EQ(varmsg.type(), 1); + EXPECT_EQ(varmsg.type(), 0); + EXPECT_EQ(varmsg.dims()[0], 4); + EXPECT_EQ(varmsg.dims()[1], 8); + EXPECT_EQ(varmsg.dims()[2], 4); + EXPECT_EQ(varmsg.dims()[3], 2); + EXPECT_EQ(varmsg.lod_level(), 1); + EXPECT_EQ(varmsg.lod(0).lod_data(0), 1); + EXPECT_EQ(varmsg.lod(0).lod_data(1), 3); + EXPECT_EQ(varmsg.lod(0).lod_data(2), 8); const float* tensor_data = reinterpret_cast(varmsg.serialized().data()); - const int64_t* rows_data = - reinterpret_cast(varmsg.rows().data()); for (int i = 0; i < tensor_numel; ++i) { - EXPECT_FLOAT_EQ(tensor_data[i], 32.7); + EXPECT_FLOAT_EQ(tensor_data[i], 31.9); } - EXPECT_EQ(rows_data[0], 3); - EXPECT_EQ(rows_data[1], 10); + + // message binary + std::string str; + varmsg.SerializeToString(&str); + + // message bytebuffer + ::grpc::Slice slices_2[1]; + int num_slices = 1; + slices_2[0] = ::grpc::Slice(str.length()); + memcpy(const_cast(slices_2[0].begin()), str.c_str(), str.length()); + ::grpc::ByteBuffer bytebuffer2(&slices_2[0], num_slices); + // deserialize zero-copy - framework::Variable var2; - operators::detail::DeserializeFromByteBuffer(msg, ctx, &var2); + framework::Scope scope; + scope.Var("myvar"); + operators::detail::VariableResponse resp(&scope, &ctx); + if (from_type == 0) { + EXPECT_EQ(resp.Parse(msg), 0); + } else { + EXPECT_EQ(resp.Parse(bytebuffer2), 0); + } - auto* slr2 = var2.GetMutable(); - auto* tensor2 = slr2->mutable_value(); - auto* rows2 = slr2->mutable_rows(); + framework::Variable* var2 = resp.GetVar(); + + auto tensor2 = var2->Get(); float* tensor_data2 = nullptr; framework::Tensor tmp_tensor; if (platform::is_gpu_place(ctx.GetPlace())) { platform::CPUPlace cpu; - framework::TensorCopy(*tensor2, cpu, &tmp_tensor); + framework::TensorCopy(tensor2, cpu, &tmp_tensor); tensor_data2 = tmp_tensor.data(); } else { - tensor_data2 = const_cast(tensor2->data()); + tensor_data2 = const_cast(tensor2.data()); } - const int64_t* rows_data2 = rows2->data(); - for (int i = 0; i < tensor_numel; ++i) { - EXPECT_FLOAT_EQ(tensor_data2[i], 32.7); - } - EXPECT_EQ(rows_data2[0], 3); - EXPECT_EQ(rows_data2[1], 10); + EXPECT_EQ(varmsg.lod_level(), 1); + EXPECT_EQ(varmsg.lod(0).lod_data(0), 1); + EXPECT_EQ(varmsg.lod(0).lod_data(1), 3); + EXPECT_EQ(varmsg.lod(0).lod_data(2), 8); + for (int i = 0; i < tensor_numel; ++i) EXPECT_FLOAT_EQ(tensor_data2[i], 31.9); } -TEST(SelectedRows, CPU) { +TEST(LodTensor, Run) { platform::CPUPlace place; - RunSerdeTestSelectedRows(place); -} - -TEST(SelectedRows, GPU) { + RunTestLodTensor(place); + RunTestLodTensor(place, 1); +#ifdef PADDLE_WITH_CUDA platform::CUDAPlace place; - RunSerdeTestSelectedRows(place); + RunTestLodTensor(place); + RunTestLodTensor(place, 1); +#endif } -TEST(Tensor, CPU) { +TEST(SelectedRows, Run) { platform::CPUPlace place; - RunSerdeTestTensor(place); -} + RunSerdeTestSelectedRows(place); -TEST(Tensor, GPU) { +#ifdef PADDLE_WITH_CUDA platform::CUDAPlace place; - RunSerdeTestTensor(place); -} \ No newline at end of file + RunSerdeTestSelectedRows(place); +#endif +} diff --git a/paddle/fluid/operators/detail/variable_response.cc b/paddle/fluid/operators/detail/variable_response.cc new file mode 100644 index 0000000000000000000000000000000000000000..12e8eb0b4da2252b104415aef4156bf100c3e565 --- /dev/null +++ b/paddle/fluid/operators/detail/variable_response.cc @@ -0,0 +1,400 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/detail/variable_response.h" +#include +#include "paddle/fluid/operators/detail/send_recv.pb.h" +#include "paddle/fluid/operators/detail/sendrecvop_utils.h" + +namespace paddle { +namespace operators { +namespace detail { + +enum WireType { + WIRETYPE_VARINT = 0, + WIRETYPE_LENGTH_DELIMITED = 2, +}; + +inline int GetTagFieldNumber(uint32_t tag) { return tag >> 3; } + +inline WireType GetTagWireType(uint32_t tag) { + return static_cast(tag & 0x7); +} + +bool ReadVarintSizeAsInt(::google::protobuf::io::CodedInputStream* input, + int* result) { + uint64_t v; + if (input->ReadVarint64(&v) && v <= static_cast(INT_MAX)) { + *result = static_cast(v); + return true; + } else { + return false; + } +} + +bool ReadRaw(::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& dev_ctx, platform::Place place, + void* dest, int size) { + const void* data = NULL; + int size_to_write = 0; + + if (platform::is_gpu_place(place)) { +#ifdef PADDLE_WITH_CUDA + auto& gpu_dev_ctx = + static_cast(dev_ctx); + platform::CPUPlace cpu; + + char* p = reinterpret_cast(dest); + while (size > 0) { + if (!input->GetDirectBufferPointer(&data, &size_to_write)) { + return false; + } + + memory::Copy(boost::get(place), + reinterpret_cast(p), cpu, data, size_to_write, + gpu_dev_ctx.stream()); + p += size_to_write; + size -= size_to_write; + + input->Skip(size_to_write); + } + gpu_dev_ctx.Wait(); +#else + PADDLE_THROW("Unexpected branch"); +#endif + return true; + } + + char* p = reinterpret_cast(dest); + while (size > 0) { + if (!input->GetDirectBufferPointer(&data, &size_to_write)) { + return false; + } + // TODO(gongwb): can we avoid copy? + platform::CPUPlace cpu; + memory::Copy(cpu, reinterpret_cast(p), cpu, data, size_to_write); + + p += size_to_write; + size -= size_to_write; + + input->Skip(size_to_write); + } + + return true; +} + +bool VariableResponse::CopyLodTensorData( + ::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& ctx, framework::DDim& dims, int length) { + auto var = scope_->FindVar(meta_.varname()); + auto* tensor = var->GetMutable(); + tensor->Resize(dims); + + framework::LoD lod; + for (int i = 0; i < meta_.lod_level(); ++i) { + framework::Vector v; + for (int j = 0; j < meta_.lod(i).lod_data_size(); ++j) { + v.push_back(meta_.lod(i).lod_data(j)); + } + lod.push_back(v); + } + tensor->set_lod(lod); + + void* tensor_data = + tensor->mutable_data(ctx.GetPlace(), ToTypeIndex(meta_.data_type())); + + if (!ReadRaw(input, ctx, tensor->place(), tensor_data, length)) { + return false; + } + + return true; +} + +inline framework::DDim GetDims( + const ::google::protobuf::RepeatedField<::google::protobuf::int64>& dims) { + std::vector vecdims; + for (auto& d : dims) { + vecdims.push_back(d); + } + return framework::make_ddim(vecdims); +} + +bool VariableResponse::CopySelectRowsTensorData( + ::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& ctx, framework::DDim& dims, int length) { + auto var = scope_->FindVar(meta_.varname()); + auto* slr = var->GetMutable(); + auto* tensor = slr->mutable_value(); + tensor->Resize(dims); + void* tensor_data = tensor->mutable_data( + ctx.GetPlace(), + paddle::operators::detail::ToTypeIndex(meta_.data_type())); + + if (!ReadRaw(input, ctx, tensor->place(), tensor_data, length)) { + return false; + } + + return true; +} + +bool VariableResponse::CopySelectRowsData( + ::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& ctx, int length) { + auto var = scope_->FindVar(meta_.varname()); + auto* slr = var->GetMutable(); + int64_t* rows_data = slr->mutable_rows()->data(); + + // copy rows CPU data, GPU data will be copied lazily. + platform::CPUPlace cpu; + if (!ReadRaw(input, ctx, cpu, rows_data, length)) { + return false; + } + + return true; +} + +bool ParseLodData(::google::protobuf::io::CodedInputStream* input, + std::vector* lod) { + while (true) { + auto p = input->ReadTagWithCutoff(127); + int tag = GetTagFieldNumber(p.first); + WireType wt = GetTagWireType(p.first); + + if (!p.second) { + return (tag == 0); + } + + switch (tag) { + case sendrecv::VariableMessage_LodData::kLodDataFieldNumber: { + uint64_t v; + if (wt == WIRETYPE_VARINT) { + if (!input->ReadVarint64(&v)) { + return false; + } + lod->push_back(v); + break; + } + + if (wt == WIRETYPE_LENGTH_DELIMITED) { + int length = 0; + if (!input->ReadVarintSizeAsInt(&length)) { + return tag; + } + + for (int i = 0; i < length; i++) { + uint64_t v; + if (!input->ReadVarint64(&v)) { + return false; + } + lod->push_back(v); + } + break; + } + + return false; + } + default: { return false; } + } + } + + return true; +} + +int VariableResponse::Parse(const ::grpc::ByteBuffer& byte_buffer) { + GrpcByteBufferSource source; + source.Init(byte_buffer); + GrpcByteBufferSourceWrapper r(&source); + + return Parse(&r); +} + +int VariableResponse::Parse(Source* source) { + ::google::protobuf::io::ZeroCopyInputStream* input_stream = + source->contents(); + ::google::protobuf::io::CodedInputStream input(input_stream); + input.SetTotalBytesLimit(INT_MAX, INT_MAX); + + while (true) { + auto p = input.ReadTagWithCutoff(127); + int tag = GetTagFieldNumber(p.first); + WireType wt = GetTagWireType(p.first); + if (!p.second) { + if (tag != 0) { + return -1; + } + + return 0; + } + + switch (tag) { + case sendrecv::VariableMessage::kVarnameFieldNumber: { + uint32_t length; + if ((wt != WIRETYPE_LENGTH_DELIMITED) || !input.ReadVarint32(&length)) { + return tag; + } + + std::string temp; + if (!input.ReadString(&temp, length)) { + return tag; + } + + meta_.set_varname(temp); + break; + } + case sendrecv::VariableMessage::kTypeFieldNumber: { + uint64_t v; + if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) { + return tag; + } + + meta_.set_type(static_cast<::sendrecv::VarType>(v)); + break; + } + case sendrecv::VariableMessage::kDataTypeFieldNumber: { + uint64_t v = 0; + if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) { + return tag; + } + + meta_.set_data_type(static_cast<::sendrecv::VariableMessage_Type>(v)); + break; + } + case sendrecv::VariableMessage::kDimsFieldNumber: { + // not packed + if (wt == WIRETYPE_VARINT) { + uint64_t v; + if (!input.ReadVarint64(&v)) { + return tag; + } + meta_.add_dims(v); + break; + } + + // packed + if (wt == WIRETYPE_LENGTH_DELIMITED) { + int length = 0; + if (!input.ReadVarintSizeAsInt(&length)) { + return tag; + } + for (int i = 0; i < length; i++) { + uint64_t v; + if (!input.ReadVarint64(&v)) { + return tag; + } + meta_.add_dims(v); + } + break; + } + + return tag; + } + case sendrecv::VariableMessage::kLodLevelFieldNumber: { + uint64_t v = 0; + if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) { + return tag; + } + meta_.set_lod_level(static_cast(v)); + break; + } + case sendrecv::VariableMessage::kLodFieldNumber: { + int length = 0; + if (wt != WIRETYPE_LENGTH_DELIMITED || + !ReadVarintSizeAsInt(&input, &length)) { + return tag; + } + + std::pair<::google::protobuf::io::CodedInputStream::Limit, int> p = + input.IncrementRecursionDepthAndPushLimit(length); + + std::vector lod_data; + if (p.second < 0 || !ParseLodData(&input, &lod_data)) { + return tag; + } + + if (!input.DecrementRecursionDepthAndPopLimit(p.first)) { + return false; + } + + if (lod_data.size() == 0) { + break; + } + + auto lod = meta_.add_lod(); + for (uint32_t i = 0; i < lod_data.size(); i++) { + lod->add_lod_data(lod_data[i]); + } + break; + } + case sendrecv::VariableMessage::kSerializedFieldNumber: { + PADDLE_ENFORCE((meta_.type() == sendrecv::SELECTED_ROWS || + meta_.type() == sendrecv::LOD_TENSOR) && + meta_.varname() != "", + "meta info should be got first!"); + + int length = 0; + if (wt != WIRETYPE_LENGTH_DELIMITED || + !ReadVarintSizeAsInt(&input, &length)) { + return tag; + } + + framework::DDim dims = GetDims(meta_.dims()); + if (meta_.type() == sendrecv::LOD_TENSOR) { + PADDLE_ENFORCE(meta_.lod_size() >= 0, + "lod info should be got first!"); + if (!CopyLodTensorData(&input, *dev_ctx_, dims, length)) { + return tag; + } + break; + } + + if (meta_.type() == sendrecv::SELECTED_ROWS) { + if (!CopySelectRowsTensorData(&input, *dev_ctx_, dims, length)) { + return tag; + } + break; + } + + return tag; + } + case sendrecv::VariableMessage::kRowsFieldNumber: { + PADDLE_ENFORCE((meta_.type() == sendrecv::SELECTED_ROWS || + meta_.type() == sendrecv::LOD_TENSOR) && + meta_.varname() != "", + "meta info should be got first!"); + + int length = 0; + if (wt != WIRETYPE_LENGTH_DELIMITED || + !ReadVarintSizeAsInt(&input, &length)) { + return tag; + } + + if (!CopySelectRowsData(&input, *dev_ctx_, length)) { + return tag; + } + break; + } + + default: { + // Unknown tag, return unknown error. + return -1; + } + } + } + + return 0; +} + +}; // namespace detail +}; // namespace operators +}; // namespace paddle diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/detail/variable_response.h new file mode 100644 index 0000000000000000000000000000000000000000..e121ed7bce966d7dea94f71087f2187dcaa17cec --- /dev/null +++ b/paddle/fluid/operators/detail/variable_response.h @@ -0,0 +1,81 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/var_type.h" + +#include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" +#include "paddle/fluid/operators/detail/send_recv.pb.h" + +#include "google/protobuf/io/coded_stream.h" +#include "google/protobuf/io/zero_copy_stream.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/detail/bytebuffer_stream.h" + +namespace paddle { +namespace operators { +namespace detail { + +class VariableResponse { + public: + VariableResponse(const framework::Scope* scope, + const platform::DeviceContext* dev_ctx) + : scope_(scope), dev_ctx_(dev_ctx) {} + + virtual ~VariableResponse() {} + + // return: + // 0:ok. + // -1: unkown error. + // other: number of error field. + int Parse(Source* source); + + // return: + // 0:ok. + // -1: unkown error. + // other: number of error field. + int Parse(const ::grpc::ByteBuffer& byte_buffer); + + inline std::string Varname() { return meta_.varname(); } + + // should call parse first. + framework::Variable* GetVar() { return scope_->FindVar(meta_.varname()); } + + private: + bool CopySelectRowsTensorData(::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& ctx, + framework::DDim& dims, int length); + + bool CopySelectRowsData(::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& ctx, int length); + + bool CopyLodTensorData(::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& ctx, + framework::DDim& dims, int length); + + private: + const framework::Scope* scope_; + const platform::DeviceContext* dev_ctx_; + // only Skeleton + sendrecv::VariableMessage meta_; +}; + +}; // namespace detail +}; // namespace operators +}; // namespace paddle diff --git a/paddle/fluid/operators/detection_map_op.cc b/paddle/fluid/operators/detection_map_op.cc index 73c84c2fe0155d21d7059938330e44fa3668c6df..93ef15b9332168a9c62abfd4d0827207173ece45 100644 --- a/paddle/fluid/operators/detection_map_op.cc +++ b/paddle/fluid/operators/detection_map_op.cc @@ -188,8 +188,8 @@ The general steps are as follows. First, calculate the true positive and } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(detection_map, ops::DetectionMAPOp, - ops::DetectionMAPOpMaker); +REGISTER_OPERATOR(detection_map, ops::DetectionMAPOp, ops::DetectionMAPOpMaker, + paddle::framework::EmptyGradOpMaker); REGISTER_OP_CPU_KERNEL( detection_map, ops::DetectionMAPOpKernel, ops::DetectionMAPOpKernel); diff --git a/paddle/fluid/operators/detection_output_op.cc b/paddle/fluid/operators/detection_output_op.cc deleted file mode 100644 index f7520475917ff23535f11ccfde0ee915112bba30..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/detection_output_op.cc +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -Indicesou may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/detection_output_op.h" -namespace paddle { -namespace operators { - -class DetectionOutputOpMaker : public framework::OpProtoAndCheckerMaker { - public: - DetectionOutputOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Loc", - "(Tensor) The input tensor of detection_output operator." - "The input predict locations" - "The format of input tensor is kNCHW. Where K is priorbox point " - "numbers," - "N is How many boxes are there on each point, " - "C is 4, H and W both are 1."); - AddInput("Conf", - "(Tensor) The input tensor of detection_output operator." - "The input priorbox confidence." - "The format of input tensor is kNCHW. Where K is priorbox point " - "numbers," - "N is How many boxes are there on each point, " - "C is the number of classes, H and W both are 1."); - AddInput("PriorBox", - "(Tensor) The input tensor of detection_output operator." - "The format of input tensor is the position and variance " - "of the boxes"); - AddOutput("Out", - "(Tensor) The output tensor of detection_output operator."); - AddAttr("background_label_id", "(int), The background class index."); - AddAttr("num_classes", "(int), The number of the classification."); - AddAttr("nms_threshold", - "(float), The Non-maximum suppression threshold."); - AddAttr("confidence_threshold", - "(float), The classification confidence threshold."); - AddAttr("top_k", "(int), The bbox number kept of the layer’s output."); - AddAttr("nms_top_k", - "(int), The bbox number kept of the NMS’s output."); - AddComment(R"DOC( - detection output for SSD(single shot multibox detector) - Apply the NMS to the output of network and compute the predict - bounding box location. The output’s shape of this layer could - be zero if there is no valid bounding box. - )DOC"); - } -}; - -class DetectionOutputOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Loc"), - "Input(X) of DetectionOutputOp" - "should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Conf"), - "Input(X) of DetectionOutputOp" - "should not be null."); - PADDLE_ENFORCE(ctx->HasInput("PriorBox"), - "Input(X) of DetectionOutputOp" - "should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of DetectionOutputOp should not be null."); - std::vector output_shape({1, 7}); - ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); - } -}; -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(detection_output, ops::DetectionOutputOp, - ops::DetectionOutputOpMaker); -REGISTER_OP_CPU_KERNEL( - detection_output, - ops::DetectionOutputKernel, - ops::DetectionOutputKernel); diff --git a/paddle/fluid/operators/detection_output_op.h b/paddle/fluid/operators/detection_output_op.h deleted file mode 100644 index af9081c93436776b6ca6ee7139e340054111e440..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/detection_output_op.h +++ /dev/null @@ -1,167 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - Indicesou may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#pragma once -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/operators/math/detection_util.h" -#include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/operators/math/softmax.h" -#include "paddle/fluid/operators/strided_memcpy.h" -namespace paddle { -namespace operators { -template -inline void transpose_fun(const framework::ExecutionContext& context, - const framework::Tensor& src, - framework::Tensor* dst) { - int input_nums = src.dims()[0]; - int offset = 0; - for (int j = 0; j < input_nums; ++j) { - framework::Tensor in_p_tensor = src.Slice(j, j + 1); - std::vector shape_vec( - {in_p_tensor.dims()[0], in_p_tensor.dims()[1], in_p_tensor.dims()[3], - in_p_tensor.dims()[4], in_p_tensor.dims()[2]}); - framework::DDim shape(framework::make_ddim(shape_vec)); - framework::Tensor in_p_tensor_transpose; - in_p_tensor_transpose.mutable_data(shape, context.GetPlace()); - std::vector shape_axis({0, 1, 3, 4, 2}); - math::Transpose trans5; - trans5(context.template device_context(), in_p_tensor, - &in_p_tensor_transpose, shape_axis); - auto dst_stride = framework::stride(dst->dims()); - auto src_stride = framework::stride(in_p_tensor_transpose.dims()); - StridedMemcpy(context.device_context(), in_p_tensor_transpose.data(), - src_stride, in_p_tensor_transpose.dims(), dst_stride, - dst->data() + offset); - offset += in_p_tensor_transpose.dims()[4] * src_stride[4]; - } -} -template -class DetectionOutputKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const framework::Tensor* in_loc = context.Input("Loc"); - const framework::Tensor* in_conf = context.Input("Conf"); - const framework::Tensor* in_priorbox = - context.Input("PriorBox"); - auto* out = context.Output("Out"); - int num_classes = context.template Attr("num_classes"); - int top_k = context.template Attr("top_k"); - int nms_top_k = context.template Attr("nms_top_k"); - int background_label_id = context.template Attr("background_label_id"); - float nms_threshold = context.template Attr("nms_threshold"); - float confidence_threshold = - context.template Attr("confidence_threshold"); - size_t batch_size = in_conf->dims()[1]; - int conf_sum_size = in_conf->numel(); - // for softmax - std::vector conf_shape_softmax_vec( - {conf_sum_size / num_classes, num_classes}); - framework::DDim conf_shape_softmax( - framework::make_ddim(conf_shape_softmax_vec)); - // for knchw => nhwc - std::vector loc_shape_vec({1, in_loc->dims()[1], in_loc->dims()[3], - in_loc->dims()[4], - in_loc->dims()[2] * in_loc->dims()[0]}); - std::vector conf_shape_vec( - {1, in_conf->dims()[1], in_conf->dims()[3], in_conf->dims()[4], - in_conf->dims()[2] * in_conf->dims()[0]}); - framework::DDim loc_shape(framework::make_ddim(loc_shape_vec)); - framework::DDim conf_shape(framework::make_ddim(conf_shape_vec)); - framework::Tensor loc_tensor; - framework::Tensor conf_tensor; - loc_tensor.mutable_data(loc_shape, context.GetPlace()); - conf_tensor.mutable_data(conf_shape, context.GetPlace()); - // for cpu - framework::Tensor loc_cpu; - framework::Tensor conf_cpu; - framework::Tensor priorbox_cpu; - const T* priorbox_data = in_priorbox->data(); - transpose_fun(context, *in_loc, &loc_tensor); - transpose_fun(context, *in_conf, &conf_tensor); - conf_tensor.Resize(conf_shape_softmax); - math::SoftmaxFunctor()( - context.template device_context(), &conf_tensor, - &conf_tensor); - T* loc_data = loc_tensor.data(); - T* conf_data = conf_tensor.data(); - if (platform::is_gpu_place(context.GetPlace())) { - loc_cpu.mutable_data(loc_tensor.dims(), platform::CPUPlace()); - framework::TensorCopy(loc_tensor, platform::CPUPlace(), - context.device_context(), &loc_cpu); - loc_data = loc_cpu.data(); - conf_cpu.mutable_data(conf_tensor.dims(), platform::CPUPlace()); - framework::TensorCopy(conf_tensor, platform::CPUPlace(), - context.device_context(), &conf_cpu); - conf_data = conf_cpu.data(); - priorbox_cpu.mutable_data(in_priorbox->dims(), platform::CPUPlace()); - framework::TensorCopy(*in_priorbox, platform::CPUPlace(), - context.device_context(), &priorbox_cpu); - priorbox_data = priorbox_cpu.data(); - } - // get decode bboxes - size_t num_priors = in_priorbox->numel() / 8; - std::vector>> all_decoded_bboxes; - for (size_t n = 0; n < batch_size; ++n) { - std::vector> decoded_bboxes; - for (size_t i = 0; i < num_priors; ++i) { - size_t prior_offset = i * 8; - size_t loc_pred_offset = n * num_priors * 4 + i * 4; - std::vector> prior_bbox_vec; - math::GetBBoxFromPriorData(priorbox_data + prior_offset, 1, - prior_bbox_vec); - std::vector> prior_bbox_var; - math::GetBBoxVarFromPriorData(priorbox_data + prior_offset, 1, - prior_bbox_var); - std::vector loc_pred_data; - for (size_t j = 0; j < 4; ++j) - loc_pred_data.push_back(*(loc_data + loc_pred_offset + j)); - math::BBox bbox = math::DecodeBBoxWithVar( - prior_bbox_vec[0], prior_bbox_var[0], loc_pred_data); - decoded_bboxes.push_back(bbox); - } - all_decoded_bboxes.push_back(decoded_bboxes); - } - std::vector>> all_indices; - int num_kept = math::GetDetectionIndices( - conf_data, num_priors, num_classes, background_label_id, batch_size, - confidence_threshold, nms_top_k, nms_threshold, top_k, - all_decoded_bboxes, &all_indices); - - if (num_kept <= 0) { - std::vector out_shape_vec({0, 0}); - framework::DDim out_shape(framework::make_ddim(out_shape_vec)); - out->Resize(out_shape); - return; - } - std::vector out_shape_vec({num_kept, 7}); - framework::DDim out_shape(framework::make_ddim(out_shape_vec)); - out->mutable_data(out_shape, context.GetPlace()); - framework::Tensor out_cpu; - T* out_data = out->data(); - if (platform::is_gpu_place(context.GetPlace())) { - out_cpu.mutable_data(out->dims(), platform::CPUPlace()); - out_data = out_cpu.data(); - } - math::GetDetectionOutput(conf_data, num_kept, num_priors, num_classes, - batch_size, all_indices, all_decoded_bboxes, - out_data); - if (platform::is_gpu_place(context.GetPlace())) { - framework::TensorCopy(out_cpu, platform::CUDAPlace(), - context.device_context(), out); - } - } -}; -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/dropout_op.cc b/paddle/fluid/operators/dropout_op.cc index 1074ed6acc22a81f46c466d917ef973945a12898..e4436549f6185ba04a5f270893596a6dcb11e89b 100644 --- a/paddle/fluid/operators/dropout_op.cc +++ b/paddle/fluid/operators/dropout_op.cc @@ -35,7 +35,6 @@ class DropoutOp : public framework::OperatorWithKernel { } }; -template class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { public: DropoutOpMaker(OpProto* proto, OpAttrChecker* op_checker) @@ -73,7 +72,6 @@ are set equal to their corresponding inputs. } }; -template class DropoutOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -103,11 +101,10 @@ class DropoutOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(dropout, ops::DropoutOp, ops::DropoutOpMaker, dropout_grad, - ops::DropoutOpGrad); +REGISTER_OP(dropout, ops::DropoutOp, ops::DropoutOpMaker, dropout_grad, + ops::DropoutOpGrad); REGISTER_OP_CPU_KERNEL( - dropout, - ops::CPUDropoutKernel); + dropout, ops::CPUDropoutKernel); REGISTER_OP_CPU_KERNEL( dropout_grad, ops::DropoutGradKernel); diff --git a/paddle/fluid/operators/dropout_op.cu b/paddle/fluid/operators/dropout_op.cu index d6f9c04359d733cb4f3f0586e9239ee67deb7078..f6c85a2a537b37feb20e6d62729dc5075af2a5d9 100644 --- a/paddle/fluid/operators/dropout_op.cu +++ b/paddle/fluid/operators/dropout_op.cu @@ -18,17 +18,18 @@ limitations under the License. */ #include #include #include "paddle/fluid/operators/dropout_op.h" +#include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { -template +template __global__ void RandomGenerator(const size_t n, const int seed, - const AttrType dropout_prob, const T* src, + const float dropout_prob, const T* src, T* mask_data, T* dst) { thrust::minstd_rand rng; rng.seed(seed); - thrust::uniform_real_distribution dist(0, 1); + thrust::uniform_real_distribution dist(0, 1); int idx = blockDim.x * blockIdx.x + threadIdx.x; for (; idx < n; idx += blockDim.x * gridDim.x) { @@ -44,14 +45,14 @@ __global__ void RandomGenerator(const size_t n, const int seed, // It seems that Eigen::Tensor::setRandom in GPU will SEGFAULT. // Use std::random and thrust::random(thrust is a std library in CUDA) to // implement uniform random. -template +template class GPUDropoutKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input("X"); auto* y = context.Output("Out"); y->mutable_data(context.GetPlace()); - AttrType dropout_prob = context.Attr("dropout_prob"); + float dropout_prob = context.Attr("dropout_prob"); auto X = EigenMatrix::Reshape(*x, 1); auto Y = EigenMatrix::Reshape(*y, 1); @@ -70,11 +71,11 @@ class GPUDropoutKernel : public framework::OpKernel { int threads = 512; int grid = (x->numel() + threads - 1) / threads; - RandomGenerator<<>>( + RandomGenerator< + T><<>>( size, seed, dropout_prob, x_data, mask_data, y_data); } else { - Y.device(place) = X * (1.0f - dropout_prob); + Y.device(place) = X * static_cast(1.0f - dropout_prob); } } }; @@ -83,9 +84,9 @@ class GPUDropoutKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( - dropout, - ops::GPUDropoutKernel); -REGISTER_OP_CUDA_KERNEL( - dropout_grad, - ops::DropoutGradKernel); + dropout, ops::GPUDropoutKernel, + ops::GPUDropoutKernel); +REGISTER_OP_CUDA_KERNEL(dropout_grad, + ops::DropoutGradKernel); diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index 209e4dec1756dc65fbf147c4dbbf0913d3c6ef7e..b5ee86ae2d11dfc835e1a3a6826ce016baf38a29 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -25,7 +25,7 @@ template using EigenMatrix = framework::EigenMatrix; -template +template class CPUDropoutKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { diff --git a/paddle/fluid/operators/elementwise_add_op.cu b/paddle/fluid/operators/elementwise_add_op.cu index 19dc4a52152e2a7aa71476d4f0ef692d0af97b4a..dfff518f170b56d180b6883c363effb8dbd677b6 100644 --- a/paddle/fluid/operators/elementwise_add_op.cu +++ b/paddle/fluid/operators/elementwise_add_op.cu @@ -14,19 +14,20 @@ limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/elementwise_add_op.h" +#include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( - elementwise_add, - ops::ElementwiseAddKernel, - ops::ElementwiseAddKernel, - ops::ElementwiseAddKernel, - ops::ElementwiseAddKernel); + elementwise_add, ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel, + ops::ElementwiseAddKernel); REGISTER_OP_CUDA_KERNEL( elementwise_add_grad, - ops::ElementwiseAddGradKernel, - ops::ElementwiseAddGradKernel, - ops::ElementwiseAddGradKernel, - ops::ElementwiseAddGradKernel); + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel); diff --git a/paddle/fluid/operators/feed_op.cc b/paddle/fluid/operators/feed_op.cc index 90c31877f6a87d1e237283d489353b4aba26c97b..debacf07c360b9aa69000a0d891f04239ed08807 100644 --- a/paddle/fluid/operators/feed_op.cc +++ b/paddle/fluid/operators/feed_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { @@ -28,6 +29,10 @@ class FeedOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { + // get device context from pool + auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place); + platform::RecordEvent record_event(Type(), dev_ctx); + auto feed_var_name = Input("X"); auto *feed_var = scope.FindVar(feed_var_name); @@ -50,14 +55,10 @@ class FeedOp : public framework::OperatorBase { auto &feed_item = feed_list.at(static_cast(col)); auto *out_item = out_var->GetMutable(); - // get device context from pool - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(place); - if (platform::is_same_place(feed_item.place(), place)) { out_item->ShareDataWith(feed_item); } else { - framework::TensorCopy(feed_item, place, dev_ctx, out_item); + framework::TensorCopy(feed_item, place, *dev_ctx, out_item); } out_item->set_lod(feed_item.lod()); } diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index d66f01d1b7ce8528a7c0177b2889aff7e0c5a12b..7c7f3e9059fbb1e3f2cca4f04edfff55c9452761 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { @@ -29,6 +30,9 @@ class FetchOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + platform::RecordEvent record_event(Type(), pool.Get(place)); + auto fetch_var_name = Input("X"); auto *fetch_var = scope.FindVar(fetch_var_name); PADDLE_ENFORCE(fetch_var != nullptr, @@ -53,7 +57,6 @@ class FetchOp : public framework::OperatorBase { // FIXME(yuyang18): Should we assume the fetch operator always generate // CPU outputs? - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(src_item.place()); TensorCopy(src_item, platform::CPUPlace(), dev_ctx, &dst_item); diff --git a/paddle/fluid/operators/iou_similarity_op.cc b/paddle/fluid/operators/iou_similarity_op.cc index ffbd7c7814c3fdec9fef0580ccd1ea3661ac0012..4b78ec510d1fb73592ee8af9a641622f4d713f8d 100755 --- a/paddle/fluid/operators/iou_similarity_op.cc +++ b/paddle/fluid/operators/iou_similarity_op.cc @@ -87,8 +87,9 @@ $$ } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(iou_similarity, ops::IOUSimilarityOp, - ops::IOUSimilarityOpMaker); +REGISTER_OPERATOR(iou_similarity, ops::IOUSimilarityOp, + ops::IOUSimilarityOpMaker, + paddle::framework::EmptyGradOpMaker); REGISTER_OP_CPU_KERNEL( iou_similarity, diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 8e9923c87ce22ed229f78ef15430e50cab16c947..d8a3c45ac5bf64c0613fa5f3de1684954d3c056a 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -24,6 +24,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/proto_desc.h" +#include "paddle/fluid/framework/threadpool.h" #include "paddle/fluid/operators/detail/grpc_server.h" #include "paddle/fluid/operators/detail/sendrecvop_utils.h" #include "paddle/fluid/operators/detail/simple_block_queue.h" @@ -68,9 +69,7 @@ class ListenAndServOp : public framework::OperatorBase { } void Stop() override { - detail::MessageWithName term_msg; - term_msg.first = LISTEN_TERMINATE_MESSAGE; - rpc_service_->Push(term_msg); + rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); rpc_service_->ShutDown(); server_thread_->join(); } @@ -89,7 +88,17 @@ class ListenAndServOp : public framework::OperatorBase { auto *block = Attr(kOptimizeBlock); auto *program = block->Program(); + int num_blocks = program->Size(); + PADDLE_ENFORCE_GE(num_blocks, 2, + "server program should have at least 2 blocks"); + framework::Executor executor(dev_place); + std::vector blk_ctx_list; + blk_ctx_list.push_back(nullptr); // block0 is not used. + for (int blkid = 1; blkid < num_blocks; ++blkid) { + auto *exe_ctx = executor.Prepare(*program, blkid); + blk_ctx_list.push_back(exe_ctx); + } // TODO(typhoonzero): change this to a while_op for every cluster-batch. bool exit_flag = false; @@ -103,7 +112,7 @@ class ListenAndServOp : public framework::OperatorBase { size_t recv_var_cnt = 0; int batch_barrier = 0; while (batch_barrier != fan_in) { - const detail::MessageWithName &v = rpc_service_->Get(); + const detail::ReceivedMessage v = rpc_service_->Get(); auto recv_var_name = v.first; if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { LOG(INFO) << "received terminate message and exit"; @@ -116,28 +125,52 @@ class ListenAndServOp : public framework::OperatorBase { } else { VLOG(3) << "received grad: " << recv_var_name; recv_var_cnt++; - auto *var = recv_scope.FindVar(recv_var_name); + auto var = v.second->GetVar(); if (var == nullptr) { LOG(ERROR) << "Can not find server side var: " << recv_var_name; PADDLE_THROW("Can not find server side var"); } - detail::DeserializeFromMessage(v.second, dev_ctx, var); if (var->IsType()) { sparse_vars.push_back(var); } } } if (exit_flag) { - rpc_service_->ShutDown(); rpc_service_->SetCond(1); + rpc_service_->ShutDown(); break; } - try { - executor.Run(*program, &recv_scope, block->ID(), /*global_block*/ - false /*create_local_scope*/, false /*create_vars*/); - } catch (std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); + + // put optimize blocks in the thread pool to start run, the last block + // should be global ops. + // NOTE: if is_gpu_place, CUDA kernels are laugched by multiple threads + // and this will still work. + + std::vector> fs; + // block0 contains only listen_and_serv op, start run from block1. + for (int blkid = 1; blkid < num_blocks - 1; ++blkid) { + fs.push_back(framework::Async( + [&executor, &program, &recv_scope, &blk_ctx_list, blkid]() { + int run_block = blkid; // thread local + try { + executor.RunPreparedContext(blk_ctx_list[run_block], + &recv_scope, false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + })); + } + for (int i = 0; i < num_blocks - 2; ++i) fs[i].wait(); + // Run global block at final step, or block1 if there are only 2 blocks + if (num_blocks >= 2) { + try { + executor.RunPreparedContext(blk_ctx_list[num_blocks - 1], &recv_scope, + false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } } + // Reset the received sparse variables, the sum operator would not // sum the input sparse variables which rows is empty at the next // mini-batch. @@ -148,9 +181,13 @@ class ListenAndServOp : public framework::OperatorBase { } rpc_service_->SetCond(1); // FIXME(typhoonzero): use another condition to sync wait clients get. - rpc_service_->WaitClientGet(ins.size()); + rpc_service_->WaitClientGet(fan_in); sparse_vars.clear(); } // while(true) + + for (int i = 0; i < num_blocks; ++i) { + delete blk_ctx_list[i]; + } } protected: diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index 05f809ac5628420251957116bb2390b4502f11b8..6ffe0bec5e38432676ecadfa1abbbe70a1425bb1 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { @@ -29,6 +30,9 @@ class LoadOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { + auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place); + platform::RecordEvent record_event(Type(), dev_ctx); + auto filename = Attr("file_path"); std::ifstream fin(filename); PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s for load op", @@ -41,9 +45,7 @@ class LoadOp : public framework::OperatorBase { auto *tensor = out_var->GetMutable(); - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(place); - DeserializeFromStream(fin, tensor, dev_ctx); + DeserializeFromStream(fin, tensor, *dev_ctx); if (platform::is_gpu_place(place)) { // copy CPU to GPU @@ -55,7 +57,7 @@ class LoadOp : public framework::OperatorBase { out_var->Clear(); tensor = out_var->GetMutable(); tensor->set_lod(cpu_tensor.lod()); - TensorCopy(cpu_tensor, place, dev_ctx, tensor); + TensorCopy(cpu_tensor, place, *dev_ctx, tensor); } } }; diff --git a/paddle/fluid/operators/lod_reset_op.cc b/paddle/fluid/operators/lod_reset_op.cc index 6a66297cb843ead1a507a6867c1c562224861cbf..7d5687f2d0666d393d7bb1c1a2fdde6c95e6d615 100644 --- a/paddle/fluid/operators/lod_reset_op.cc +++ b/paddle/fluid/operators/lod_reset_op.cc @@ -22,17 +22,16 @@ class LoDResetOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - // input check PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of LoDResetOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of LoDResetOp should not be null."); - // If target LoD is not set form Input(), then it must be set from Attr(). - if (!ctx->HasInput("TargetLoD")) { + + if (!ctx->HasInput("Y")) { auto level0 = ctx->Attrs().Get>("target_lod"); - PADDLE_ENFORCE(level0.size() > 1, - "Target LoD is not found, should be set to be a valid one " - "through Input() or Attr()."); + PADDLE_ENFORCE_GT(level0.size(), 1, + "If Input(Y) not provided, the target lod should be " + "specified by attribute `target_lod`."); } ctx->SetOutputDim("Out", ctx->GetInputDim("X")); } @@ -50,36 +49,77 @@ class LoDResetOpMaker : public framework::OpProtoAndCheckerMaker { public: LoDResetOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(LoDTensor) The input tensor of lod_reset operator."); - AddInput("TargetLoD", - "(Tensor, optional) The target level 0 LoD from Input().") + AddInput("X", + "(Tensor, LoDTensor) Input variable of LoDResetOp which " + "could be a Tensor or LoDTensor, where the data of output " + "variable inherits from."); + AddInput("Y", + "(Tensor, LoDTensor, optional) If provided and Y is LoDTensor, " + "lod of Input(Y) would be considered as the target lod first, " + "otherwise data of Input(Y) would be considered as the " + "target lod.") .AsDispensable(); - AddOutput("Out", "(LoDTensor) The output tensor of lod_reset operator."); + AddOutput("Out", + "(LoDTensor) Output variable of LoDResetOp which should be a " + "LoDTensor."); AddAttr>("target_lod", "The target level 0 LoD from Attr().") .SetDefault(std::vector{}); AddComment(R"DOC(LoDReset operator -Reset LoD of Input(X) into a new one specified by Input(TargetLoD) or -Attr(target_lod), or set LoD for Input(X) if it doesn't have one. -Currently the lod_reset operator only supports the reset of level 0 LoD. -At least one of Input(TargetLoD) and Attr(target_lod) must be set, -and if both of them are set, Input(TargetLoD) will be chosen as the -target LoD. +Set LoD of `X` to a new one specified by `Y` or attribute `target_lod`. When `Y` +provided and `Y` is a LoDTensor, `Y.lod` would be considered as target LoD +first, otherwise `Y.data` would be considered as target LoD. If `Y` is not +provided, target LoD should be specified by attribute `target_lod`. +If target LoD is specified by `Y.data` or `target_lod`, only one level LoD +is supported. + +Example 1: + +Given a 1-level LoDTensor input(X): + X.lod = [[ 0, 2, 5 6 ]] + X.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + X.dims = [6, 1] + +attr(target_lod): [0, 4, 6] + +then we get a 1-level LoDTensor: + Out.lod = [[ 0, 4, 6 ]] + Out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + Out.dims = [6, 1] + +Example 2: -An example: -Given a float LoDTensor X with shape (6, 1), its transpose form represents +Given a 1-level LoDTensor input(X): + X.lod = [[ 0, 2, 5 6 ]] + X.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + X.dims = [6, 1] - [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], +input(Y) is a Tensor: + Y.data = [[0, 2, 6]] + Y.dims = [1, 3] -with LoD = [[0, 2, 5, 6]] and the three (transposed) sequences look like +then we get a 1-level LoDTensor: + Out.lod = [[ 0, 2, 6 ]] + Out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + Out.dims = [6, 1] - [1.0, 2.0], [3.0, 4.0, 5.0], [6.0]. +Example 3: -If target LoD = [0, 4, 6], the lod_reset operator will reset the LoD and -the sequences that the LoDTensor Output(Out) contains becomes: +Given a 1-level LoDTensor input(X): + X.lod = [[ 0, 2, 5 6 ]] + X.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + X.dims = [6, 1] - [1.0, 2.0, 3.0, 4.0], [5.0, 6.0]. +input(Y) is a 2-level LoDTensor: + Y.lod = [[0, 2, 4], [0, 2, 5, 6]] + Y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]] + Y.dims = [6, 1] + +then we get a 2-level LoDTensor: + Out.lod = [[0, 2, 4], [0, 2, 5, 6]] + Out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + Out.dims = [6, 1] )DOC"); } @@ -90,10 +130,16 @@ class LoDResetGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of LoDResetGradOp should not be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) shouldn't be null."); - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + "Input(Out@Grad) of LoDResetGradOp should not be null."); + + auto x_grad_name = framework::GradVarName("X"); + if (ctx->HasOutput(x_grad_name)) { + ctx->SetOutputDim(x_grad_name, ctx->GetInputDim("X")); + ctx->ShareLoD("X", /*->*/ x_grad_name); + } } protected: @@ -111,9 +157,13 @@ class LoDResetGradOp : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(lod_reset, ops::LoDResetOp, ops::LoDResetOpMaker, lod_reset_grad, ops::LoDResetGradOp); -REGISTER_OP_CPU_KERNEL(lod_reset, - ops::LoDResetKernel, - ops::LoDResetKernel); +REGISTER_OP_CPU_KERNEL( + lod_reset, ops::LoDResetKernel, + ops::LoDResetKernel, + ops::LoDResetKernel, + ops::LoDResetKernel); REGISTER_OP_CPU_KERNEL( lod_reset_grad, ops::LoDResetGradKernel, - ops::LoDResetGradKernel); + ops::LoDResetGradKernel, + ops::LoDResetGradKernel, + ops::LoDResetGradKernel); diff --git a/paddle/fluid/operators/lod_reset_op.cu b/paddle/fluid/operators/lod_reset_op.cu index b0e87a851a77a1cc98d419a63d4d9e5e1b9dd163..888d4c12eb4e3f4fd94d8dd4178c59acd0abb23b 100644 --- a/paddle/fluid/operators/lod_reset_op.cu +++ b/paddle/fluid/operators/lod_reset_op.cu @@ -18,8 +18,12 @@ namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( lod_reset, ops::LoDResetKernel, - ops::LoDResetKernel); + ops::LoDResetKernel, + ops::LoDResetKernel, + ops::LoDResetKernel); REGISTER_OP_CUDA_KERNEL( lod_reset_grad, ops::LoDResetGradKernel, - ops::LoDResetGradKernel); + ops::LoDResetGradKernel, + ops::LoDResetGradKernel, + ops::LoDResetGradKernel); diff --git a/paddle/fluid/operators/lod_reset_op.h b/paddle/fluid/operators/lod_reset_op.h index 8186d4f8262101edc723af390eee1aec4fa6f3a5..99f01c2a255ade81421c2bba95ff3d38ced6f87c 100644 --- a/paddle/fluid/operators/lod_reset_op.h +++ b/paddle/fluid/operators/lod_reset_op.h @@ -26,35 +26,46 @@ class LoDResetKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const { auto* out = ctx.Output("Out"); auto* in = ctx.Input("X"); - auto* lod_t = ctx.Input("TargetLoD"); + auto* lod_t = ctx.Input("Y"); + + out->ShareDataWith(*in); std::vector level0; if (lod_t) { - auto* lod = lod_t->data(); - if (platform::is_gpu_place(ctx.GetPlace())) { - framework::Tensor lod_cpu; - framework::TensorCopy(*lod_t, platform::CPUPlace(), - ctx.device_context(), &lod_cpu); - lod = lod_cpu.data(); + if (lod_t->lod().size() > 0) { + auto y_lod = lod_t->lod(); + auto last_level = y_lod[y_lod.size() - 1]; + PADDLE_ENFORCE_EQ(last_level.back(), in->dims()[0], + "Last value of `Y`'s last level LoD should be equal " + "to the first dimension of `X`"); + out->set_lod(y_lod); + return; // early return, since lod already set + } else { + auto* lod = lod_t->data(); + if (platform::is_gpu_place(ctx.GetPlace())) { + framework::Tensor lod_cpu; + framework::TensorCopy(*lod_t, platform::CPUPlace(), + ctx.device_context(), &lod_cpu); + lod = lod_cpu.data(); + } + level0 = std::vector(lod, lod + lod_t->numel()); } - level0 = std::vector(lod, lod + lod_t->numel()); } else { level0 = ctx.Attr>("target_lod"); } - PADDLE_ENFORCE(level0.size() > 1UL, - "The size of target LoD should be greater than 1."); - PADDLE_ENFORCE(level0[0] == 0, - "Target LoD should be a vector starting from 0."); - PADDLE_ENFORCE(level0.back() == in->dims()[0], - "Target LoD should be a vector end with the " - "first dimension of Input(X)."); + PADDLE_ENFORCE_GT(level0.size(), 1UL, + "Size of target LoD should be greater than 1."); + PADDLE_ENFORCE_EQ(level0[0], 0, + "Target LoD should be a vector starting from 0."); + PADDLE_ENFORCE_EQ(level0.back(), in->dims()[0], + "Target LoD should be a vector end with the " + "first dimension of Input(X)."); for (size_t i = 0; i < level0.size() - 1; ++i) { PADDLE_ENFORCE(level0[i + 1] > level0[i], "Target LoD should be an ascending vector."); } - out->ShareDataWith(*in); // cast level0 to size_t std::vector ulevel0(level0.size(), 0); std::transform(level0.begin(), level0.end(), ulevel0.begin(), diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index 3acdca17afc2fea05fb81871e6e03d72691fe91e..50eeadab72e71f39325c5eda69e9a3c3e6517d7d 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -33,8 +33,16 @@ class LookupTableOp : public framework::OperatorWithKernel { auto table_dims = ctx->GetInputDim("W"); auto ids_dims = ctx->GetInputDim("Ids"); - PADDLE_ENFORCE_EQ(ids_dims.size(), 2); - PADDLE_ENFORCE_EQ(ids_dims[1], 1); + auto ids_var_type = ctx->GetInputsVarType("Ids").front(); + // The type of Ids(Input) is SelectedRows or LoDTensor, when Ids's type + // is LoDTensor, this tensor contains the ids to be looked up in W + // and it must be a column vector with rank = 2 while the 2nd dimension + // size must be 1, when Ids's type is SelectedRows, the rows of Ids + // contains the ids to be looked up in W; + if (ids_var_type == framework::proto::VarType::LOD_TENSOR) { + PADDLE_ENFORCE_EQ(ids_dims.size(), 2); + PADDLE_ENFORCE_EQ(ids_dims[1], 1); + } ctx->SetOutputDim("Out", {ids_dims[0], table_dims[1]}); ctx->ShareLoD("Ids", /*->*/ "Out"); @@ -54,17 +62,22 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { LookupTableOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("W", - "An input represents embedding tensors, " + "(Tensor) The input represents embedding tensors, " "which is a learnable parameter."); - AddInput("Ids", - "An input with type int32 or int64 " - "contains the ids to be looked up in W. " - "Ids must be a column vector with rank = 2. " - "The 2nd dimension size must be 1."); - AddOutput("Out", "The lookup results, which have the same type as W."); + AddInput( + "Ids", + "(Tensor or SelectedRows) Ids's type can be Tensor or " + "SelectedRows, when Ids's type is Tensor, this tensor contains " + "the ids to be looked up in W and it must be a column vector with " + "rank = 2 while the 2nd dimension size must be 1; when Ids's type is " + "SelectedRows, the rows of Ids contains the ids to be looked up " + "in W."); + AddOutput("Out", + "(Tensor or SelectedRows) The lookup results, which have the " + "same type as W."); AddAttr("is_sparse", "(boolean, default false) " - "Sparse update") + "Sparse update.") .SetDefault(false); AddAttr("padding_idx", "(int64, default -1) " @@ -76,10 +89,15 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { Lookup Table Operator. This operator is used to perform lookups on the parameter W, -then concatenated into a dense tensor. +then concatenated into a dense or sparse tensor. + +The type of Ids(Input) is SelectedRows, Tensor or LoDTensor, when Ids's +type is SelectedRows, the rows of Ids contains the ids to be looked up in W; +when Ids's type is Tensor, this tensor contains the ids to be looked up in W +and it must be a column vector with rank = 2 while the 2nd dimension size must be 1, +at this time, Ids can carry the LoD (Level of Details) information, or not, and +the output only shares the LoD information with input Ids. -The input Ids can carry the LoD (Level of Details) information, -or not. And the output only shares the LoD information with input Ids. )DOC"); } diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index 923340f46102d888f549c79684ae0ae2f78ed038..6d81fccd2059c511f71d403229e04587e553e93d 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -74,14 +74,32 @@ class LookupTableCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* table_t = context.Input("W"); - auto* ids_t = context.Input("Ids"); - auto* output_t = context.Output("Out"); int64_t padding_idx = context.Attr("padding_idx"); + auto* ids_var = context.InputVar("Ids"); + Tensor* output_t = context.Output("Out"); + + int64_t* ids; + int64_t K; + + // The type of Ids(Input) is SelectedRows or LoDTensor, when Ids's type + // is LoDTensor, this tensor contains the ids to be looked up in W; + // when Ids's type is SelectedRows, the rows of Ids contains the + // ids to be looked up in W. + if (ids_var->IsType()) { + auto* ids_t = context.Input("Ids"); + ids = const_cast(ids_t->data()); + K = ids_t->numel(); + } else if (ids_var->IsType()) { + auto* ids_t = context.Input("Ids"); + ids = const_cast(ids_t->rows().CUDAData(context.GetPlace())); + K = ids_t->rows().size(); + output_t->Resize({K, table_t->dims()[1]}); + } else { + PADDLE_THROW("Unsupported Variable Type of Ids"); + } size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; - size_t K = ids_t->numel(); - auto* ids = ids_t->data(); auto* table = table_t->data(); auto* output = output_t->mutable_data(context.GetPlace()); diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index d88b034e919f1127ac3c424e87e4a5f81a598dc8..c92ce78eeffb8f1517e61c6d6624d406e04d974d 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -22,6 +22,7 @@ limitations under the License. */ namespace paddle { namespace operators { +using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; using SelectedRows = framework::SelectedRows; @@ -29,25 +30,45 @@ template class LookupTableKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* table_t = context.Input("W"); // float tensor - auto* ids_t = context.Input("Ids"); // int tensor - auto* output_t = context.Output("Out"); // float tensor + auto* table_t = context.Input("W"); + auto* ids_var = context.InputVar("Ids"); + Tensor* output_t = context.Output("Out"); + + int64_t* ids; + int64_t ids_numel; + + // The type of Ids(Input) is SelectedRows or LoDTensor, when Ids's type + // is LoDTensor, this tensor contains the ids to be looked up in W; + // when Ids's type is SelectedRows, the rows of Ids contains the + // ids to be looked up in W. + if (ids_var->IsType()) { + auto* ids_t = context.Input("Ids"); + ids = const_cast(ids_t->data()); + ids_numel = ids_t->numel(); + } else if (ids_var->IsType()) { + auto* ids_t = context.Input("Ids"); + ids = const_cast(ids_t->rows().data()); + ids_numel = ids_t->rows().size(); + output_t->Resize({ids_numel, table_t->dims()[1]}); + } else { + PADDLE_THROW("Unsupported Variable Type of Ids"); + } + int64_t padding_idx = context.Attr("padding_idx"); int N = table_t->dims()[0]; int D = table_t->dims()[1]; - auto* ids = ids_t->data(); auto* table = table_t->data(); auto* output = output_t->mutable_data(context.GetPlace()); if (padding_idx == -1) { - for (int64_t i = 0; i < ids_t->numel(); ++i) { + for (int64_t i = 0; i < ids_numel; ++i) { PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_GE(ids[i], 0); memcpy(output + i * D, table + ids[i] * D, D * sizeof(T)); } } else { - for (int64_t i = 0; i < ids_t->numel(); ++i) { + for (int64_t i = 0; i < ids_numel; ++i) { if (ids[i] == padding_idx) { memset(output + i * D, 0, D * sizeof(T)); } else { diff --git a/paddle/fluid/operators/lrn_mkldnn_op.cc b/paddle/fluid/operators/lrn_mkldnn_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..3bead16ce44c26b9d7a6f2a5c6b471612494d595 --- /dev/null +++ b/paddle/fluid/operators/lrn_mkldnn_op.cc @@ -0,0 +1,209 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/lrn_op.h" +#include "paddle/fluid/platform/mkldnn_helper.h" + +namespace paddle { +namespace operators { + +using paddle::framework::Tensor; +using paddle::platform::MKLDNNDeviceContext; + +namespace { +template +std::shared_ptr insert_to_context(const std::string& key, + const MKLDNNDeviceContext& dev_ctx, + Args&&... args) { + auto p = std::static_pointer_cast(dev_ctx.GetBlob(key)); + + if (!p) { + p = std::make_shared(args...); + dev_ctx.SetBlob(key, std::static_pointer_cast(p)); + } + + return p; +} +} // namespace + +template +class LRNMKLDNNOpKernel : public paddle::framework::OpKernel { + public: + void Compute(const paddle::framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(std::is_same::value, + "MKLDNN LRN must use float data."); + PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), + "MKLDNN LRN must use CPUPlace."); + + auto& dev_ctx = ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); + + auto x = ctx.Input("X"); + auto out = ctx.Output("Out"); + auto mid = ctx.Output("MidOut"); + + auto input_data = x->data(); + auto output_data = out->mutable_data(ctx.GetPlace()); + mid->mutable_data(ctx.GetPlace()); + + const int n = ctx.Attr("n"); + const float alpha = ctx.Attr("alpha"); + const float beta = ctx.Attr("beta"); + const float k = ctx.Attr("k"); + const bool is_test = ctx.Attr("is_test"); + + auto e_mid = framework::EigenTensor::From(*mid); + e_mid = e_mid.constant(k); + + auto dims = paddle::framework::vectorize2int(x->dims()); + + auto src_md = paddle::platform::MKLDNNMemDesc( + dims, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); + + auto dst_md = paddle::platform::MKLDNNMemDesc( + dims, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); + + auto forward_desc = mkldnn::lrn_forward::desc{mkldnn::prop_kind::forward, + mkldnn::lrn_across_channels, + src_md, + n, + alpha, + beta, + k}; + + auto src_memory_pd = mkldnn::memory::primitive_desc{src_md, mkldnn_engine}; + auto dst_memory = mkldnn::memory{{dst_md, mkldnn_engine}, + static_cast(output_data)}; + + std::unique_ptr forward_op = nullptr; + + if (!is_test) { + const std::string key = ctx.op().Output("Out"); + const std::string key_src_memory = key + "@lrn_src_memory"; + const std::string key_pd = key + "@lrn_pd"; + const std::string key_workspace_memory = key + "@lrn_workspace_memory"; + + auto forward_pd = insert_to_context( + key_pd, dev_ctx, forward_desc, mkldnn_engine); + + auto src_memory = insert_to_context( + key_src_memory, dev_ctx, src_memory_pd); + + src_memory->set_data_handle( + static_cast(const_cast(input_data))); + + auto workspace_memory = insert_to_context( + key_workspace_memory, dev_ctx, + forward_pd->workspace_primitive_desc()); + + forward_op.reset(new mkldnn::lrn_forward{*forward_pd, *src_memory, + *workspace_memory, dst_memory}); + + } else { + auto forward_pd = + mkldnn::lrn_forward::primitive_desc{forward_desc, mkldnn_engine}; + auto src_memory = mkldnn::memory{ + src_memory_pd, static_cast(const_cast(input_data))}; + auto workspace_memory = + mkldnn::memory{forward_pd.workspace_primitive_desc()}; + + forward_op.reset(new mkldnn::lrn_forward{forward_pd, src_memory, + workspace_memory, dst_memory}); + } + + std::vector pipeline = {*forward_op}; + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + } +}; + +template +class LRNMKLDNNGradOpKernel : public paddle::framework::OpKernel { + public: + void Compute(const paddle::framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(std::is_same::value, + "MKLDNN LRN must use float data."); + PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), + "MKLDNN LRN must use CPUPlace."); + + auto x = ctx.Input("X"); + + auto out_grad = ctx.Input(framework::GradVarName("Out")); + auto x_grad = ctx.Output(framework::GradVarName("X")); + + const std::string key = ctx.op().Input("Out"); + const std::string key_src_memory = key + "@lrn_src_memory"; + const std::string key_pd = key + "@lrn_pd"; + const std::string key_workspace_memory = key + "@lrn_workspace_memory"; + + const int n = ctx.Attr("n"); + const float alpha = ctx.Attr("alpha"); + const float beta = ctx.Attr("beta"); + const float k = ctx.Attr("k"); + + auto& dev_ctx = ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); + + auto x_grad_data = x_grad->mutable_data(ctx.GetPlace()); + auto out_grad_data = out_grad->data(); + + auto dims = paddle::framework::vectorize2int(x->dims()); + + auto src_md = paddle::platform::MKLDNNMemDesc( + dims, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); + + auto diff_src_md = paddle::platform::MKLDNNMemDesc( + dims, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); + + auto diff_dst_md = paddle::platform::MKLDNNMemDesc( + dims, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); + + auto diff_dst_memory = + mkldnn::memory{{diff_dst_md, mkldnn_engine}, + static_cast(const_cast(out_grad_data))}; + + auto diff_src_memory = mkldnn::memory{{diff_src_md, mkldnn_engine}, + static_cast(x_grad_data)}; + + auto backward_desc = mkldnn::lrn_backward::desc{ + mkldnn::lrn_across_channels, src_md, diff_src_md, n, alpha, beta, k}; + + auto forward_pd = dev_ctx.GetBlob(key_pd); + + auto backward_pd = mkldnn::lrn_backward::primitive_desc{ + backward_desc, mkldnn_engine, + *static_cast(forward_pd.get())}; + + std::shared_ptr workspace_memory = + dev_ctx.GetBlob(key_workspace_memory); + + auto src_memory = dev_ctx.GetBlob(key_src_memory); + auto backward_op = mkldnn::lrn_backward{ + backward_pd, *static_cast(src_memory.get()), + diff_dst_memory, *static_cast(workspace_memory.get()), + diff_src_memory}; + + std::vector pipeline = {backward_op}; + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_KERNEL(lrn, MKLDNN, paddle::platform::CPUPlace, + ops::LRNMKLDNNOpKernel); +REGISTER_OP_KERNEL(lrn_grad, MKLDNN, paddle::platform::CPUPlace, + ops::LRNMKLDNNGradOpKernel); diff --git a/paddle/fluid/operators/lrn_op.cc b/paddle/fluid/operators/lrn_op.cc index b0c213d637c244e4cbacbe75218537973efed047..2b1947a187bbd17871107553127647032ac7d7f9 100644 --- a/paddle/fluid/operators/lrn_op.cc +++ b/paddle/fluid/operators/lrn_op.cc @@ -13,6 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/lrn_op.h" +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif namespace paddle { namespace operators { @@ -36,7 +39,7 @@ struct LRNFunctor { auto e_x = framework::EigenTensor::From(input); for (int m = 0; m < N; m++) { for (int i = 0; i < C; i++) { - for (int c = start; c <= end; c++) { + for (int c = start; c < end; c++) { int ch = i + c; if (ch >= 0 && ch < C) { auto s = e_mid.slice(Eigen::array({{m, i, 0, 0}}), @@ -92,7 +95,7 @@ struct LRNGradFunctor { Eigen::array({{1, 1, H, W}})); i_x_g = i_mid.pow(-beta) * i_out_g; - for (int c = start; c <= end; c++) { + for (int c = start; c < end; c++) { int ch = i + c; if (ch < 0 || ch >= C) { continue; @@ -116,6 +119,26 @@ struct LRNGradFunctor { template struct LRNGradFunctor; template struct LRNGradFunctor; +namespace { +framework::OpKernelType GetExpectedLRNKernel( + const framework::ExecutionContext& ctx) { + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_MKLDNN + if (library_ == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kMKLDNN; + } +#endif + + std::string data_format = ctx.Attr("data_format"); + // TODO(pzelazko-intel): enable MKLDNN layout when it's ready + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + layout_, library_); +} +} // namespace + class LRNOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -135,6 +158,11 @@ class LRNOp : public framework::OperatorWithKernel { ctx->SetOutputDim("MidOut", x_dim); ctx->ShareLoD("X", /*->*/ "Out"); } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return GetExpectedLRNKernel(ctx); + } }; template @@ -176,6 +204,17 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker { "beta is the power number.") .SetDefault(0.75) .GreaterThan(0.0); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); + AddAttr( + "data_format", + "(string, default NCHW) Only used in " + "An optional string from: \"NHWC\", \"NCHW\". " + "Defaults to \"NHWC\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault("AnyLayout"); + AddAttr("is_test", "").SetDefault(false); AddComment(R"DOC( Local Response Normalization Operator. @@ -223,8 +262,12 @@ class LRNOpGrad : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); } -}; + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return GetExpectedLRNKernel(ctx); + } +}; } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index a181d802262d15b188060dae4330cec0e24714ab..ee0e91132bce52998e9c45b37335618e4354e1cd 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -6,6 +6,7 @@ function(math_library TARGET) # But it handle split GPU/CPU code and link some common library. set(cc_srcs) set(cu_srcs) + set(hip_srcs) set(math_common_deps device_context framework_proto) set(multiValueArgs DEPS) cmake_parse_arguments(math_library "${options}" "${oneValueArgs}" @@ -17,10 +18,15 @@ function(math_library TARGET) if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu) list(APPEND cu_srcs ${TARGET}.cu) endif() + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.hip.cu) + list(APPEND hip_srcs ${TARGET}.hip.cu) + endif() list(LENGTH cc_srcs cc_srcs_len) if (WITH_GPU) nv_library(${TARGET} SRCS ${cc_srcs} ${cu_srcs} DEPS ${math_library_DEPS} ${math_common_deps}) + elseif (WITH_AMD_GPU) + hip_library(${TARGET} SRCS ${cc_srcs} ${hip_srcs} DEPS ${math_library_DEPS} ${math_common_deps}) elseif(${cc_srcs_len} GREATER 0) cc_library(${TARGET} SRCS ${cc_srcs} DEPS ${math_library_DEPS} ${math_common_deps}) endif() @@ -38,12 +44,12 @@ math_library(lstm_compute DEPS activation_functions) math_library(math_function DEPS cblas) math_library(maxouting) math_library(pooling) -math_library(selected_rows_functor DEPS selected_rows) +math_library(selected_rows_functor DEPS selected_rows math_function) math_library(sequence2batch) math_library(sequence_padding) math_library(sequence_pooling DEPS math_function) math_library(sequence_scale) -math_library(softmax) +math_library(softmax DEPS math_function) math_library(unpooling) math_library(vol2col) diff --git a/paddle/fluid/operators/math/concat.cc b/paddle/fluid/operators/math/concat.cc index b542143419e05e9baf29e9a2322447f32ddd9829..b672c79afd97e36894af647fd4bc6edfb885ff13 100644 --- a/paddle/fluid/operators/math/concat.cc +++ b/paddle/fluid/operators/math/concat.cc @@ -44,7 +44,7 @@ class ConcatFunctor { out_cols += t_cols; input_cols[i] = t_cols; } - auto& cpu_place = boost::get(context.GetPlace()); + auto cpu_place = boost::get(context.GetPlace()); // computation for (int k = 0; k < out_rows; ++k) { @@ -87,7 +87,7 @@ class ConcatGradFunctor { input_cols += t_cols; output_cols[i] = t_cols; } - auto& cpu_place = boost::get(context.GetPlace()); + auto cpu_place = boost::get(context.GetPlace()); // computation for (int k = 0; k < input_rows; ++k) { diff --git a/paddle/fluid/operators/detection_output_op.cu.cc b/paddle/fluid/operators/math/concat.hip.cu similarity index 54% rename from paddle/fluid/operators/detection_output_op.cu.cc rename to paddle/fluid/operators/math/concat.hip.cu index 0f48765c9c67c1d3fa32b19d5e87b2acaa3c486a..eacef0438883891671fec6e4001f862f619723cb 100644 --- a/paddle/fluid/operators/detection_output_op.cu.cc +++ b/paddle/fluid/operators/math/concat.hip.cu @@ -1,8 +1,8 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. -Indicesou may obtain a copy of the License at +You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 @@ -12,10 +12,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/detection_output_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - detection_output, - ops::DetectionOutputKernel, - ops::DetectionOutputKernel); +#include diff --git a/paddle/fluid/operators/math/detection_util.h b/paddle/fluid/operators/math/detection_util.h deleted file mode 100644 index c31764cfaf5bbdfea2f3ed06f31f97965a8858ed..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/math/detection_util.h +++ /dev/null @@ -1,300 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#pragma once -#include -#include "paddle/fluid/framework/selected_rows.h" -#include "paddle/fluid/platform/device_context.h" - -namespace paddle { -namespace operators { -namespace math { -template -struct BBox { - BBox(T x_min, T y_min, T x_max, T y_max) - : x_min(x_min), - y_min(y_min), - x_max(x_max), - y_max(y_max), - is_difficult(false) {} - - BBox() {} - - T get_width() const { return x_max - x_min; } - - T get_height() const { return y_max - y_min; } - - T get_center_x() const { return (x_min + x_max) / 2; } - - T get_center_y() const { return (y_min + y_max) / 2; } - - T get_area() const { return get_width() * get_height(); } - - // coordinate of bounding box - T x_min; - T y_min; - T x_max; - T y_max; - // whether difficult object (e.g. object with heavy occlusion is difficult) - bool is_difficult; -}; -// KNCHW ==> NHWC -// template -template -void GetBBoxFromPriorData(const T* prior_data, const size_t num_bboxes, - std::vector>& bbox_vec); -template -void GetBBoxVarFromPriorData(const T* prior_data, const size_t num, - std::vector>& var_vec); -template -BBox DecodeBBoxWithVar(BBox& prior_bbox, - const std::vector& prior_bbox_var, - const std::vector& loc_pred_data); -template -bool SortScorePairDescend(const std::pair& pair1, - const std::pair& pair2); -template -bool SortScorePairDescend(const std::pair>& pair1, - const std::pair>& pair2); -template -T jaccard_overlap(const BBox& bbox1, const BBox& bbox2); - -template -void ApplyNmsFast(const std::vector>& bboxes, const T* conf_score_data, - size_t class_idx, size_t top_k, T conf_threshold, - T nms_threshold, size_t num_priors, size_t num_classes, - std::vector* indices); -template -int GetDetectionIndices( - const T* conf_data, const size_t num_priors, const size_t num_classes, - const size_t background_label_id, const size_t batch_size, - const T conf_threshold, const size_t nms_top_k, const T nms_threshold, - const size_t top_k, - const std::vector>>& all_decoded_bboxes, - std::vector>>* all_detection_indices); -template -BBox ClipBBox(const BBox& bbox); -template -void GetDetectionOutput( - const T* conf_data, const size_t num_kept, const size_t num_priors, - const size_t num_classes, const size_t batch_size, - const std::vector>>& all_indices, - const std::vector>>& all_decoded_bboxes, T* out_data); -template -void GetBBoxFromPriorData(const T* prior_data, const size_t num_bboxes, - std::vector>& bbox_vec) { - size_t out_offset = bbox_vec.size(); - bbox_vec.resize(bbox_vec.size() + num_bboxes); - for (size_t i = 0; i < num_bboxes; ++i) { - BBox bbox; - bbox.x_min = *(prior_data + i * 8); - bbox.y_min = *(prior_data + i * 8 + 1); - bbox.x_max = *(prior_data + i * 8 + 2); - bbox.y_max = *(prior_data + i * 8 + 3); - bbox_vec[out_offset + i] = bbox; - } -} -template -void GetBBoxVarFromPriorData(const T* prior_data, const size_t num, - std::vector>& var_vec) { - size_t out_offset = var_vec.size(); - var_vec.resize(var_vec.size() + num); - for (size_t i = 0; i < num; ++i) { - std::vector var; - var.push_back(*(prior_data + i * 8 + 4)); - var.push_back(*(prior_data + i * 8 + 5)); - var.push_back(*(prior_data + i * 8 + 6)); - var.push_back(*(prior_data + i * 8 + 7)); - var_vec[out_offset + i] = var; - } -} -template -BBox DecodeBBoxWithVar(BBox& prior_bbox, - const std::vector& prior_bbox_var, - const std::vector& loc_pred_data) { - T prior_bbox_width = prior_bbox.get_width(); - T prior_bbox_height = prior_bbox.get_height(); - T prior_bbox_center_x = prior_bbox.get_center_x(); - T prior_bbox_center_y = prior_bbox.get_center_y(); - - T decoded_bbox_center_x = - prior_bbox_var[0] * loc_pred_data[0] * prior_bbox_width + - prior_bbox_center_x; - T decoded_bbox_center_y = - prior_bbox_var[1] * loc_pred_data[1] * prior_bbox_height + - prior_bbox_center_y; - T decoded_bbox_width = - std::exp(prior_bbox_var[2] * loc_pred_data[2]) * prior_bbox_width; - T decoded_bbox_height = - std::exp(prior_bbox_var[3] * loc_pred_data[3]) * prior_bbox_height; - - BBox decoded_bbox; - decoded_bbox.x_min = decoded_bbox_center_x - decoded_bbox_width / 2; - decoded_bbox.y_min = decoded_bbox_center_y - decoded_bbox_height / 2; - decoded_bbox.x_max = decoded_bbox_center_x + decoded_bbox_width / 2; - decoded_bbox.y_max = decoded_bbox_center_y + decoded_bbox_height / 2; - - return decoded_bbox; -} -template -bool SortScorePairDescend(const std::pair& pair1, - const std::pair& pair2) { - return pair1.first > pair2.first; -} -template -T jaccard_overlap(const BBox& bbox1, const BBox& bbox2) { - if (bbox2.x_min > bbox1.x_max || bbox2.x_max < bbox1.x_min || - bbox2.y_min > bbox1.y_max || bbox2.y_max < bbox1.y_min) { - return 0.0; - } else { - T inter_x_min = std::max(bbox1.x_min, bbox2.x_min); - T inter_y_min = std::max(bbox1.y_min, bbox2.y_min); - T interX_max = std::min(bbox1.x_max, bbox2.x_max); - T interY_max = std::min(bbox1.y_max, bbox2.y_max); - - T inter_width = interX_max - inter_x_min; - T inter_height = interY_max - inter_y_min; - T inter_area = inter_width * inter_height; - - T bbox_area1 = bbox1.get_area(); - T bbox_area2 = bbox2.get_area(); - - return inter_area / (bbox_area1 + bbox_area2 - inter_area); - } -} - -template -void ApplyNmsFast(const std::vector>& bboxes, const T* conf_score_data, - size_t class_idx, size_t top_k, T conf_threshold, - T nms_threshold, size_t num_priors, size_t num_classes, - std::vector* indices) { - std::vector> scores; - for (size_t i = 0; i < num_priors; ++i) { - size_t conf_offset = i * num_classes + class_idx; - if (conf_score_data[conf_offset] > conf_threshold) - scores.push_back(std::make_pair(conf_score_data[conf_offset], i)); - } - std::stable_sort(scores.begin(), scores.end(), - SortScorePairDescend); - if (top_k > 0 && top_k < scores.size()) scores.resize(top_k); - while (scores.size() > 0) { - const size_t idx = scores.front().second; - bool keep = true; - for (size_t i = 0; i < indices->size(); ++i) { - if (keep) { - const size_t saved_idx = (*indices)[i]; - T overlap = jaccard_overlap(bboxes[idx], bboxes[saved_idx]); - keep = overlap <= nms_threshold; - } else { - break; - } - } - if (keep) indices->push_back(idx); - scores.erase(scores.begin()); - } -} -template -int GetDetectionIndices( - const T* conf_data, const size_t num_priors, const size_t num_classes, - const size_t background_label_id, const size_t batch_size, - const T conf_threshold, const size_t nms_top_k, const T nms_threshold, - const size_t top_k, - const std::vector>>& all_decoded_bboxes, - std::vector>>* all_detection_indices) { - int total_keep_num = 0; - for (size_t n = 0; n < batch_size; ++n) { - const std::vector>& decoded_bboxes = all_decoded_bboxes[n]; - size_t num_detected = 0; - std::map> indices; - size_t conf_offset = n * num_priors * num_classes; - for (size_t c = 0; c < num_classes; ++c) { - if (c == background_label_id) continue; - ApplyNmsFast(decoded_bboxes, conf_data + conf_offset, c, nms_top_k, - conf_threshold, nms_threshold, num_priors, num_classes, - &(indices[c])); - num_detected += indices[c].size(); - } - if (top_k > 0 && num_detected > top_k) { - // std::vector> score_index_pairs; - std::vector>> score_index_pairs; - for (size_t c = 0; c < num_classes; ++c) { - const std::vector& label_indices = indices[c]; - for (size_t i = 0; i < label_indices.size(); ++i) { - size_t idx = label_indices[i]; - score_index_pairs.push_back( - std::make_pair((conf_data + conf_offset)[idx * num_classes + c], - std::make_pair(c, idx))); - } - } - std::sort(score_index_pairs.begin(), score_index_pairs.end(), - SortScorePairDescend>); - score_index_pairs.resize(top_k); - std::map> new_indices; - for (size_t i = 0; i < score_index_pairs.size(); ++i) { - size_t label = score_index_pairs[i].second.first; - size_t idx = score_index_pairs[i].second.second; - new_indices[label].push_back(idx); - } - all_detection_indices->push_back(new_indices); - total_keep_num += top_k; - } else { - all_detection_indices->push_back(indices); - total_keep_num += num_detected; - } - } - return total_keep_num; -} -template -BBox ClipBBox(const BBox& bbox) { - T one = static_cast(1.0); - T zero = static_cast(0.0); - BBox clipped_bbox; - clipped_bbox.x_min = std::max(std::min(bbox.x_min, one), zero); - clipped_bbox.y_min = std::max(std::min(bbox.y_min, one), zero); - clipped_bbox.x_max = std::max(std::min(bbox.x_max, one), zero); - clipped_bbox.y_max = std::max(std::min(bbox.y_max, one), zero); - return clipped_bbox; -} -template -void GetDetectionOutput( - const T* conf_data, const size_t num_kept, const size_t num_priors, - const size_t num_classes, const size_t batch_size, - const std::vector>>& all_indices, - const std::vector>>& all_decoded_bboxes, T* out_data) { - size_t count = 0; - for (size_t n = 0; n < batch_size; ++n) { - for (std::map>::const_iterator it = - all_indices[n].begin(); - it != all_indices[n].end(); ++it) { - size_t label = it->first; - const std::vector& indices = it->second; - const std::vector>& decoded_bboxes = all_decoded_bboxes[n]; - for (size_t i = 0; i < indices.size(); ++i) { - size_t idx = indices[i]; - size_t conf_offset = n * num_priors * num_classes + idx * num_classes; - out_data[count * 7] = n; - out_data[count * 7 + 1] = label; - out_data[count * 7 + 2] = (conf_data + conf_offset)[label]; - BBox clipped_bbox = ClipBBox(decoded_bboxes[idx]); - out_data[count * 7 + 3] = clipped_bbox.x_min; - out_data[count * 7 + 4] = clipped_bbox.y_min; - out_data[count * 7 + 5] = clipped_bbox.x_max; - out_data[count * 7 + 6] = clipped_bbox.y_max; - ++count; - } - } - } -} -} // namespace math -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 35d251f71a0cb631d5900498ea3188b5ddeae334..299a0aed01dfe0448d896738d9fd33319b1b2887 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -278,6 +278,7 @@ void axpy( cblas_daxpy(n, alpha, x, 1, y, 1); } +template struct SetConstant; template struct SetConstant; template struct SetConstant; template struct SetConstant; @@ -371,6 +372,8 @@ template struct RowwiseAdd; template struct ColwiseSum; template struct ColwiseSum; +template struct ColwiseSum; +template struct ColwiseSum; template struct RowwiseSum; template struct RowwiseSum; diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index 3abbcdb71d03eaf6f8eba3d97150d27ac5a5405e..1e909db5288afccb9dd0be08a45cf3c27048ae6f 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -348,6 +348,7 @@ void axpy( &alpha, x, 1, y, 1)); } +template struct SetConstant; template struct SetConstant; template struct SetConstant; template struct SetConstant; @@ -422,6 +423,8 @@ struct RowwiseAdd { template struct RowwiseAdd; template struct RowwiseAdd; template struct ColwiseSum; +template struct ColwiseSum; +template struct ColwiseSum; // template struct ColwiseSum; // The ColwiseSum failed in debug mode, // and only failed for this case. So reimplemented it. diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index 38e93fdf15d99eb447948378a599891074c10fc5..5518ebed3f792a5acdfbb27976bc2c6dbd78069a 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -14,13 +14,87 @@ limitations under the License. */ #define EIGEN_USE_GPU +#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/softmax.h" #include "paddle/fluid/operators/math/softmax_impl.h" +#include "paddle/fluid/platform/cudnn_helper.h" namespace paddle { namespace operators { namespace math { +using Tensor = framework::Tensor; +using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; +using DataLayout = platform::DataLayout; +template +using CudnnDataType = platform::CudnnDataType; + +template +void SoftmaxCUDNNFunctor::operator()( + const platform::CUDADeviceContext& context, const framework::Tensor* X, + framework::Tensor* Y) { + // ------------------- cudnn descriptors --------------------- + ScopedTensorDescriptor xDesc; + ScopedTensorDescriptor yDesc; + std::vector cudnn_tensor_dims = framework::vectorize2int(X->dims()); + DataLayout layout = DataLayout::kNCHW; + if (cudnn_tensor_dims.size() == 5) { + layout = DataLayout::kNCDHW; + } + // NOTE(*) : cudnn softmax only support >= 4D Tensor, + // fill 1 at unused dims + if (cudnn_tensor_dims.size() <= 2) { + cudnn_tensor_dims.resize(4, 1); + } + cudnnTensorDescriptor_t cudnn_x_desc = + xDesc.descriptor(layout, cudnn_tensor_dims); + cudnnTensorDescriptor_t cudnn_y_desc = + xDesc.descriptor(layout, cudnn_tensor_dims); + PADDLE_ENFORCE(platform::dynload::cudnnSoftmaxForward( + context.cudnn_handle(), CUDNN_SOFTMAX_ACCURATE, + CUDNN_SOFTMAX_MODE_INSTANCE, CudnnDataType::kOne(), cudnn_x_desc, + X->data(), CudnnDataType::kZero(), cudnn_y_desc, + Y->mutable_data(context.GetPlace()))); +} + +template +void SoftmaxGradCUDNNFunctor::operator()( + const platform::CUDADeviceContext& context, const framework::Tensor* Y, + const framework::Tensor* YGrad, framework::Tensor* XGrad) { + // ------------------- cudnn descriptors --------------------- + ScopedTensorDescriptor yDesc; + ScopedTensorDescriptor dyDesc; + ScopedTensorDescriptor dxDesc; + std::vector cudnn_tensor_dims = framework::vectorize2int(Y->dims()); + DataLayout layout = DataLayout::kNCHW; + if (cudnn_tensor_dims.size() == 5) { + layout = DataLayout::kNCDHW; + } + // NOTE(*) : cudnn softmax only support >= 4D Tensor, + // fill 1 at unused dims + if (cudnn_tensor_dims.size() <= 2) { + cudnn_tensor_dims.resize(4, 1); + } + cudnnTensorDescriptor_t cudnn_y_desc = + yDesc.descriptor(layout, cudnn_tensor_dims); + cudnnTensorDescriptor_t cudnn_xgrad_desc = + dxDesc.descriptor(layout, cudnn_tensor_dims); + cudnnTensorDescriptor_t cudnn_ygrad_desc = + dyDesc.descriptor(layout, cudnn_tensor_dims); + PADDLE_ENFORCE(platform::dynload::cudnnSoftmaxBackward( + context.cudnn_handle(), CUDNN_SOFTMAX_ACCURATE, + CUDNN_SOFTMAX_MODE_INSTANCE, CudnnDataType::kOne(), cudnn_y_desc, + Y->data(), cudnn_ygrad_desc, YGrad->data(), + CudnnDataType::kZero(), cudnn_xgrad_desc, + XGrad->mutable_data(context.GetPlace()))); +} + +template class SoftmaxCUDNNFunctor; +template class SoftmaxCUDNNFunctor; +template class SoftmaxCUDNNFunctor; +template class SoftmaxGradCUDNNFunctor; +template class SoftmaxGradCUDNNFunctor; + template class SoftmaxFunctor; template class SoftmaxFunctor; template class SoftmaxGradFunctor; diff --git a/paddle/fluid/operators/math/softmax.h b/paddle/fluid/operators/math/softmax.h index 14b2690c2a4e764058270953214a07aee8053444..da1f0b672d3a5fb5da8f4d72892be21964bdbc0d 100644 --- a/paddle/fluid/operators/math/softmax.h +++ b/paddle/fluid/operators/math/softmax.h @@ -33,6 +33,23 @@ class SoftmaxGradFunctor { const framework::Tensor* y_grad, framework::Tensor* x_grad); }; +#ifdef PADDLE_WITH_CUDA +template +class SoftmaxCUDNNFunctor { + public: + void operator()(const platform::CUDADeviceContext& context, + const framework::Tensor* X, framework::Tensor* Y); +}; + +template +class SoftmaxGradCUDNNFunctor { + public: + void operator()(const platform::CUDADeviceContext& context, + const framework::Tensor* Y, const framework::Tensor* y_grad, + framework::Tensor* x_grad); +}; +#endif + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/mine_hard_examples_op.cc b/paddle/fluid/operators/mine_hard_examples_op.cc index 0e81d60878dce747b047abbe4641b71462373b2b..277901cff493445e1e85e92e22ea0ada0e1cba43 100644 --- a/paddle/fluid/operators/mine_hard_examples_op.cc +++ b/paddle/fluid/operators/mine_hard_examples_op.cc @@ -324,8 +324,9 @@ MatchIndices elements with value -1. } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(mine_hard_examples, ops::MineHardExamplesOp, - ops::MineHardExamplesOpMaker); +REGISTER_OPERATOR(mine_hard_examples, ops::MineHardExamplesOp, + ops::MineHardExamplesOpMaker, + paddle::framework::EmptyGradOpMaker); REGISTER_OP_CPU_KERNEL( mine_hard_examples, diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index e7bed2c39735b66c19e738c91f4977e46571143b..90af1e2d602ac039b4d98a69a889ff8b1b85ffc6 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -17,11 +17,14 @@ limitations under the License. */ namespace paddle { namespace operators { +using framework::OpKernelType; using framework::Tensor; -class MulOpShapeInference : public framework::InferShapeBase { +class MulOp : public framework::OperatorWithKernel { public: - void operator()(framework::InferShapeContext* ctx) const override { + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MulOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of MulOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -122,7 +125,7 @@ or not. But the output only shares the LoD information with input $X$. } }; -class MulOpGrad : public framework::OperatorWithKernel { +class MulGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -156,10 +159,7 @@ class MulOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OPERATOR(mul, paddle::framework::OperatorWithKernel, ops::MulOpMaker, - ops::MulOpShapeInference, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(mul_grad, ops::MulOpGrad); +REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulGradOp); REGISTER_OP_CPU_KERNEL( mul, ops::MulKernel); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/fluid/operators/mul_op.cu.cc b/paddle/fluid/operators/mul_op.cu.cc index 0667530e943856576ae8c9fe4856cb6aa1448e4e..757f9c3ee2665c7ac654659416fe8dd727dca16d 100644 --- a/paddle/fluid/operators/mul_op.cu.cc +++ b/paddle/fluid/operators/mul_op.cu.cc @@ -13,9 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/mul_op.h" +#include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - mul, ops::MulKernel); -REGISTER_OP_CUDA_KERNEL( - mul_grad, ops::MulGradKernel); +namespace plat = paddle::platform; +REGISTER_OP_CUDA_KERNEL(mul, ops::MulKernel, + ops::MulKernel); +REGISTER_OP_CUDA_KERNEL(mul_grad, + ops::MulGradKernel); diff --git a/paddle/fluid/operators/mul_op.h b/paddle/fluid/operators/mul_op.h index 38311cf87265ad0f1f815734cbf69bd682d62e62..b1260d36ebe11f65529ac274c959479dcb38ee5f 100644 --- a/paddle/fluid/operators/mul_op.h +++ b/paddle/fluid/operators/mul_op.h @@ -48,7 +48,7 @@ class MulKernel : public framework::OpKernel { } math::matmul( context.template device_context(), x_matrix, false, - y_matrix, false, 1, z, 0); + y_matrix, false, static_cast(1), z, static_cast(0)); if (z_dim.size() != 2) { z->Resize(z_dim); } diff --git a/paddle/fluid/operators/nccl_op.cc b/paddle/fluid/operators/nccl_op.cc index 329656d26da0d32a4e30dd2aeecb9f7aa7f9a84d..5e4ed886b10bd48bf991ce84a9099611cf5d1d26 100644 --- a/paddle/fluid/operators/nccl_op.cc +++ b/paddle/fluid/operators/nccl_op.cc @@ -104,19 +104,38 @@ class NCCLAllReduceOp : public framework::OperatorWithKernel { " Input(Communicator) of AllReduce op input should not be NULL"); PADDLE_ENFORCE(ctx->HasOutput("Out"), " Output(Out) of AllReduce op output should not be NULL"); - - auto x_dims = ctx->GetInputsDim("X"); - std::string reduction = ctx->Attrs().Get("reduction"); PADDLE_ENFORCE((reduction == "ncclSum" || reduction == "ncclProd" || reduction == "ncclMin" || reduction == "ncclMax"), "invalid reduction."); + auto x_dims = ctx->GetInputsDim("X"); ctx->SetOutputsDim("Out", x_dims); ctx->ShareLoD("X", /*->*/ "Out"); } }; +// AllReduceOp +class NCCLAllReduceOpMaker : public framework::OpProtoAndCheckerMaker { + public: + NCCLAllReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input of AllReduce op"); + AddInput("Communicator", "Communicator for communicating between gpus"); + AddOutput("Out", "The output of AllReduce op"); + AddAttr("reduction", + "(string, default 'ncclSum') " + "{'ncclMin', 'ncclMax', 'ncclProd', 'ncclSum'}.") + .SetDefault("ncclSum"); + AddComment(R"DOC( +NCCLAllReduce Operator. + +AllReduce the input tensors. + +)DOC"); + } +}; + // ReduceOp class NCCLReduceOp : public framework::OperatorWithKernel { public: @@ -143,50 +162,6 @@ class NCCLReduceOp : public framework::OperatorWithKernel { } }; -// BcastOp -class NCCLBcastOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - " Input(X) of Bcast op input should not be NULL"); - PADDLE_ENFORCE(ctx->HasInput("Communicator"), - " Input(Communicator) of Bcast op input should not be NULL"); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - " Output(Out) of Bcast op output should not be NULL"); - - int root = ctx->Attrs().Get("root"); - PADDLE_ENFORCE(root != platform::kInvalidGPUId, "Bcast root must be set."); - - auto x_dims = ctx->GetInputsDim("X"); - ctx->SetOutputsDim("Out", x_dims); - ctx->ShareLoD("X", /*->*/ "Out"); - } -}; - -// AllreduceOp -class NCCLAllReduceOpMaker : public framework::OpProtoAndCheckerMaker { - public: - NCCLAllReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input of AllReduce op"); - AddInput("Communicator", "Communicator for communicating between gpus"); - AddOutput("Out", "The output of AllReduce op"); - AddAttr("reduction", - "(string, default 'ncclSum') " - "{'ncclMin', 'ncclMax', 'ncclProd', 'ncclSum'}.") - .SetDefault("ncclSum"); - AddComment(R"DOC( -NCCLAllReduce Operator. - -AllReduce the input tensors. - -)DOC"); - } -}; - // ReduceOp class NCCLReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -213,6 +188,29 @@ Reduce the tensors. } }; +// BcastOp +class NCCLBcastOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + " Input(X) of Bcast op input should not be NULL"); + PADDLE_ENFORCE(ctx->HasInput("Communicator"), + " Input(Communicator) of Bcast op input should not be NULL"); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + " Output(Out) of Bcast op output should not be NULL"); + + int root = ctx->Attrs().Get("root"); + PADDLE_ENFORCE(root != platform::kInvalidGPUId, "Bcast root must be set."); + + auto x_dims = ctx->GetInputsDim("X"); + ctx->SetOutputsDim("Out", x_dims); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + // BcastOp class NCCLBcastOpMaker : public framework::OpProtoAndCheckerMaker { public: diff --git a/paddle/fluid/operators/nccl_op.cu.cc b/paddle/fluid/operators/nccl_op.cu.cc index 683a520e99fe72875d52393a18463324a8b6c3f2..ad623e1fe0f8941615b671a0c20bd3637ae6d407 100644 --- a/paddle/fluid/operators/nccl_op.cu.cc +++ b/paddle/fluid/operators/nccl_op.cu.cc @@ -43,13 +43,12 @@ class NCCLAllReduceKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "This kernel only runs on GPU device."); - - auto ins = ctx.MultiInput("X"); - auto outs = ctx.MultiOutput("Out"); - + auto* x = ctx.Input("X"); + auto* out = ctx.Output("Out"); + auto* comm = ctx.Input("Communicator"); std::string reduction = ctx.Attr("reduction"); - ncclRedOp_t reduction_op_ = ncclSum; + ncclRedOp_t reduction_op_ = ncclSum; if (reduction == "ncclMin") { reduction_op_ = ncclMin; } else if (reduction == "ncclMax") { @@ -61,30 +60,19 @@ class NCCLAllReduceKernel : public framework::OpKernel { } else { PADDLE_THROW("Invalid reduction. default ncclSum."); } - - auto* comm = ctx.Input("Communicator"); - - auto stream = ctx.cuda_device_context().stream(); - // device id int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); int idx = comm->GetCommId(gpu_id); - - for (size_t i = 0; i < ins.size(); ++i) { - VLOG(1) << "gpu : " - << " invoke allreduce. send " << ins[i]->numel() << " recv " - << outs[i]->numel(); - - PADDLE_ENFORCE(platform::dynload::ncclAllReduce( - ins[i]->data(), outs[i]->mutable_data(ctx.GetPlace()), - outs[i]->numel(), NCCLTypeWrapper::type, reduction_op_, - comm->comms().at(idx), stream)); - PADDLE_ENFORCE(cudaStreamSynchronize(stream)); - - VLOG(1) << "gpu : " - << " finished allreduce. send " << ins[i]->numel() << " recv " - << outs[i]->numel(); - } + VLOG(3) << "gpu : " + << " invoke allreduce. send " << x->numel() << " recv " + << out->numel(); + PADDLE_ENFORCE(platform::dynload::ncclAllReduce( + x->data(), out->mutable_data(ctx.GetPlace()), out->numel(), + NCCLTypeWrapper::type, reduction_op_, comm->comms().at(idx), + ctx.cuda_device_context().stream())); + VLOG(3) << "gpu : " + << " finished allreduce. send " << x->numel() << " recv " + << out->numel(); } }; @@ -94,13 +82,13 @@ class NCCLReduceKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "This kernel only runs on GPU device."); - - auto ins = ctx.MultiInput("X"); // x0, x1, x2 - auto outs = ctx.MultiOutput("Out"); - + auto x = ctx.Input("X"); // x0, x1, x2 + auto out = ctx.Output("Out"); + auto* comm = ctx.Input("Communicator"); + int root = ctx.Attr("root"); std::string reduction = ctx.Attr("reduction"); - ncclRedOp_t reduction_op_ = ncclSum; + ncclRedOp_t reduction_op_ = ncclSum; if (reduction == "ncclMin") { reduction_op_ = ncclMin; } else if (reduction == "ncclMax") { @@ -112,40 +100,23 @@ class NCCLReduceKernel : public framework::OpKernel { } else { PADDLE_THROW("Invalid reduction. default ncclSum."); } - - int root = ctx.Attr("root"); - auto* comm = ctx.Input("Communicator"); - - auto stream = reinterpret_cast( - ctx.device_context()) - .stream(); // device id int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); int idx = comm->GetCommId(gpu_id); - - auto ins_names = ctx.Inputs("X"); - std::hash hasher; - for (size_t i = 0; i < ins.size(); ++i) { - if (root == platform::kInvalidGPUId) { - root = hasher(ins_names[i]) % comm->comms().size(); - } - T* recvbuffer = nullptr; - if (root == gpu_id) { - recvbuffer = outs[i]->mutable_data(ctx.GetPlace()); - } - - VLOG(1) << "gpu : " << gpu_id << " invoke reduce. send " - << ins[i]->numel() << " recv " << outs[i]->numel(); - - PADDLE_ENFORCE(platform::dynload::ncclReduce( - ins[i]->data(), recvbuffer, ins[i]->numel(), - NCCLTypeWrapper::type, reduction_op_, root, comm->comms().at(idx), - stream)); - PADDLE_ENFORCE(cudaStreamSynchronize(stream)); - - VLOG(1) << "gpu : " << gpu_id << " finished reduce. send " - << ins[i]->numel() << " recv " << outs[i]->numel(); + T* recvbuffer = nullptr; + if (root == gpu_id) { + recvbuffer = out->mutable_data(ctx.GetPlace()); + } else { + out->Resize(framework::make_ddim({0})); } + VLOG(3) << "gpu : " << gpu_id << " invoke reduce. send " << x->numel() + << " recv " << out->numel(); + PADDLE_ENFORCE(platform::dynload::ncclReduce( + x->data(), recvbuffer, x->numel(), NCCLTypeWrapper::type, + reduction_op_, root, comm->comms().at(idx), + ctx.cuda_device_context().stream())); + VLOG(3) << "gpu : " << gpu_id << " finished reduce. send " << x->numel() + << " recv " << out->numel(); } }; @@ -155,47 +126,27 @@ class NCCLBcastKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "This kernel only runs on GPU device."); - int root = ctx.Attr("root"); - auto* comm = ctx.Input("Communicator"); - - auto stream = reinterpret_cast( - ctx.device_context()) - .stream(); // device id int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); int idx = comm->GetCommId(gpu_id); - if (idx == root) { - auto ins = ctx.MultiInput("X"); - for (size_t i = 0; i < ins.size(); ++i) { - VLOG(1) << "gpu : " << gpu_id << " invoke Bcast. send " - << ins[i]->numel(); - - VLOG(1) << " before ncclBcast"; - PADDLE_ENFORCE(platform::dynload::ncclBcast( - (void*)ins[i]->data(), ins[i]->numel(), NCCLTypeWrapper::type, - root, comm->comms().at(idx), stream)); - VLOG(1) << " after ncclBcast"; - PADDLE_ENFORCE(cudaStreamSynchronize(stream)); - - VLOG(1) << "gpu : " << gpu_id << " finished Bcast."; - } + auto* x = ctx.Input("X"); + VLOG(3) << "gpu : " << gpu_id << " invoke Bcast. send " << x->numel(); + PADDLE_ENFORCE(platform::dynload::ncclBcast( + (void*)x->data(), x->numel(), NCCLTypeWrapper::type, root, + comm->comms().at(idx), ctx.cuda_device_context().stream())); + VLOG(3) << "gpu : " << gpu_id << " finished Bcast."; } else { - auto outs = ctx.MultiOutput("Out"); - for (size_t i = 0; i < outs.size(); ++i) { - VLOG(1) << "gpu : " << gpu_id << " invoke Bcast. recv buffer " - << framework::product(outs[i]->dims()); - - PADDLE_ENFORCE(platform::dynload::ncclBcast( - outs[i]->mutable_data(ctx.GetPlace()), outs[i]->numel(), - NCCLTypeWrapper::type, root, comm->comms().at(idx), stream)); - PADDLE_ENFORCE(cudaStreamSynchronize(stream)); - - VLOG(1) << "gpu : " << gpu_id << " finished Bcast. recv " - << outs[i]->numel(); - } + auto* out = ctx.Output("Out"); + VLOG(3) << "gpu : " << gpu_id << " invoke Bcast. recv buffer " + << framework::product(out->dims()); + PADDLE_ENFORCE(platform::dynload::ncclBcast( + out->mutable_data(ctx.GetPlace()), out->numel(), + NCCLTypeWrapper::type, root, comm->comms().at(idx), + ctx.cuda_device_context().stream())); + VLOG(3) << "gpu : " << gpu_id << " finished Bcast. recv " << out->numel(); } } }; diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index bf4d0476df32d7454d4064cb6ee454e6ad5d6fc5..4001b9a130348b4e3ea99f3017eae6d85e41fc6e 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/threadpool.h" #include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { @@ -158,11 +159,14 @@ class ParallelDoOp : public framework::OperatorBase { auto &place = places[place_idx]; auto *cur_scope = sub_scopes[place_idx]; - workers.emplace_back(framework::Async([program, cur_scope, place, block] { - framework::Executor executor(place); - executor.Run(*program, cur_scope, block->ID(), - false /*create_local_scope*/); - })); + workers.emplace_back( + framework::Async([program, cur_scope, place, block, place_idx] { + // Give the thread an id to distinguish parallel block with same id. + platform::RecordThread rt(static_cast(place_idx) + 1); + framework::Executor executor(place); + executor.Run(*program, cur_scope, block->ID(), + false /*create_local_scope*/); + })); } for (auto &worker : workers) { worker.wait(); @@ -234,11 +238,14 @@ class ParallelDoGradOp : public framework::OperatorBase { auto *cur_scope = sub_scopes[i]; // execute - workers.emplace_back(framework::Async([program, cur_scope, place, block] { - framework::Executor executor(place); - executor.Run(*program, cur_scope, block->ID(), - false /*create_local_scope*/); - })); + workers.emplace_back( + framework::Async([program, cur_scope, place, block, i] { + // Give the thread an id to distinguish parallel block with same id. + platform::RecordThread rt(static_cast(i) + 1); + framework::Executor executor(place); + executor.Run(*program, cur_scope, block->ID(), + false /*create_local_scope*/); + })); } for (auto &worker : workers) { worker.wait(); diff --git a/paddle/fluid/operators/pool_cudnn_op.cu.cc b/paddle/fluid/operators/pool_cudnn_op.cu.cc index 781d96981e4c033d9287ab3de9860dfd9fcd2875..39c862b03ad497dca5c38ccecff20be510ab60e5 100644 --- a/paddle/fluid/operators/pool_cudnn_op.cu.cc +++ b/paddle/fluid/operators/pool_cudnn_op.cu.cc @@ -24,6 +24,8 @@ using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedPoolingDescriptor = platform::ScopedPoolingDescriptor; using DataLayout = platform::DataLayout; using PoolingMode = platform::PoolingMode; +template +using ScalingParamType = typename platform::CudnnDataType::ScalingParamType; template class PoolCUDNNOpKernel : public framework::OpKernel { @@ -78,8 +80,7 @@ class PoolCUDNNOpKernel : public framework::OpKernel { // ------------------- cudnn pool algorithm --------------------- auto handle = ctx.cuda_device_context().cudnn_handle(); - T alpha = 1.0f, beta = 0.0f; - + ScalingParamType alpha = 1.0f, beta = 0.0f; PADDLE_ENFORCE(platform::dynload::cudnnPoolingForward( handle, cudnn_pool_desc, &alpha, cudnn_input_desc, input_data, &beta, cudnn_output_desc, output_data)); @@ -144,8 +145,7 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { // ------------------- cudnn pool algorithm --------------------- auto handle = ctx.cuda_device_context().cudnn_handle(); - T alpha = 1.0f, beta = 0.0f; - + ScalingParamType alpha = 1.0f, beta = 0.0f; if (input_grad) { T *input_grad_data = input_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset input_grad. @@ -162,17 +162,19 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; +namespace plat = paddle::platform; -REGISTER_OP_KERNEL(pool2d, CUDNN, ::paddle::platform::CUDAPlace, +REGISTER_OP_KERNEL(pool2d, CUDNN, plat::CUDAPlace, ops::PoolCUDNNOpKernel, - ops::PoolCUDNNOpKernel); -REGISTER_OP_KERNEL(pool2d_grad, CUDNN, ::paddle::platform::CUDAPlace, + ops::PoolCUDNNOpKernel, + ops::PoolCUDNNOpKernel); +REGISTER_OP_KERNEL(pool2d_grad, CUDNN, plat::CUDAPlace, ops::PoolCUDNNGradOpKernel, ops::PoolCUDNNGradOpKernel); -REGISTER_OP_KERNEL(pool3d, CUDNN, ::paddle::platform::CUDAPlace, +REGISTER_OP_KERNEL(pool3d, CUDNN, plat::CUDAPlace, ops::PoolCUDNNOpKernel, ops::PoolCUDNNOpKernel); -REGISTER_OP_KERNEL(pool3d_grad, CUDNN, ::paddle::platform::CUDAPlace, +REGISTER_OP_KERNEL(pool3d_grad, CUDNN, plat::CUDAPlace, ops::PoolCUDNNGradOpKernel, ops::PoolCUDNNGradOpKernel); diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index d78da10016a0e2b1d9a0ca9f3dfe4e8009bbe61d..b144ec5f7d315cb340dcd94b4a519bfcfd2a0e66 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -124,11 +124,15 @@ framework::OpKernelType PoolOpGrad::GetExpectedKernelType( } #endif + auto input_data_type = framework::ToDataType(ctx.Input("X")->type()); + if (input_data_type == framework::proto::VarType::FP16) { + PADDLE_ENFORCE_EQ(library_, framework::LibraryType::kCUDNN, + "float16 can only be used when CUDNN is used"); + } std::string data_format = ctx.Attr("data_format"); framework::DataLayout layout_ = framework::StringToDataLayout(data_format); - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), - layout_, library_); + return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_, + library_); } Pool2dOpMaker::Pool2dOpMaker(OpProto *proto, OpAttrChecker *op_checker) diff --git a/paddle/fluid/operators/prior_box_op.cc b/paddle/fluid/operators/prior_box_op.cc index 7ba55437cb20f802cc12ceea7777d7d78bba62a6..c22a55bce263423d5c17fffdb06b7ece02ae26da 100644 --- a/paddle/fluid/operators/prior_box_op.cc +++ b/paddle/fluid/operators/prior_box_op.cc @@ -168,7 +168,9 @@ https://arxiv.org/abs/1512.02325. } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(prior_box, ops::PriorBoxOp, ops::PriorBoxOpMaker); +REGISTER_OPERATOR(prior_box, ops::PriorBoxOp, ops::PriorBoxOpMaker, + paddle::framework::EmptyGradOpMaker); + REGISTER_OP_CPU_KERNEL( prior_box, ops::PriorBoxOpKernel, ops::PriorBoxOpKernel); diff --git a/paddle/fluid/operators/reader/CMakeLists.txt b/paddle/fluid/operators/reader/CMakeLists.txt index 335c5b26a864381bf87a2824b78f521cdce063e4..6fa0195b9ae103418beb56cc4b0fa9ab59e93108 100644 --- a/paddle/fluid/operators/reader/CMakeLists.txt +++ b/paddle/fluid/operators/reader/CMakeLists.txt @@ -1,6 +1,26 @@ cc_library(reader_op_registry SRCS reader_op_registry.cc DEPS operator op_registry reader) -op_library(create_random_data_generator_op SRCS create_random_data_generator_op.cc DEPS reader_op_registry) -op_library(create_shuffle_reader_op SRCS create_shuffle_reader_op.cc DEPS reader_op_registry) -op_library(create_batch_reader_op SRCS create_batch_reader_op.cc DEPS reader_op_registry) -op_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc DEPS reader_op_registry) -set(READER_LIBRARY create_random_data_generator_op create_shuffle_reader_op create_batch_reader_op create_double_buffer_reader_op PARENT_SCOPE) +set(LOCAL_READER_LIBS) + +function(reader_library TARGET_NAME) + set(oneValueArgs "") + set(multiValueArgs SRCS DEPS) + set(options "") + set(common_deps reader_op_registry) + cmake_parse_arguments(reader_library "${options}" "${oneValueArgs}" + "${multiValueArgs}" ${ARGN}) + op_library(${TARGET_NAME} SRCS ${reader_library_SRCS} DEPS ${common_deps} ${reader_library_DEPS}) + set(LOCAL_READER_LIBS + ${TARGET_NAME} + ${LOCAL_READER_LIBS} + PARENT_SCOPE) +endfunction() + +reader_library(open_files_op SRCS open_files_op.cc) +reader_library(create_random_data_generator_op SRCS create_random_data_generator_op.cc) +reader_library(create_shuffle_reader_op SRCS create_shuffle_reader_op.cc) +reader_library(create_batch_reader_op SRCS create_batch_reader_op.cc) +reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_op.cc) +reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc) +reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc) +# Export local libraries to parent +set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE) diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index b6a0609a1e23195ececee0f16a69daa1c1c46ed8..76cdb794ccdb4a015ae8630940a5c26845e7a7b3 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -24,23 +24,52 @@ static constexpr size_t kDoubleBufferSize = 2; class DoubleBufferReader : public framework::DecoratedReader { public: - explicit DoubleBufferReader(ReaderBase* reader) - : DecoratedReader(reader), - buffer_(framework::MakeChannel>( - kDoubleBufferSize)) { - std::thread prefetch(&DoubleBufferReader::PrefetchThreadFunc, this); - prefetch.detach(); + struct Item { + Item() : ctx_(nullptr) {} + + std::vector payloads_; + platform::DeviceContext* ctx_; + }; + + explicit DoubleBufferReader( + ReaderBase* reader, platform::Place target_place = platform::CPUPlace()) + : DecoratedReader(reader), place_(target_place) { + for (size_t i = 0; i < kDoubleBufferSize; ++i) { + if (platform::is_gpu_place(place_)) { +#ifdef PADDLE_WITH_CUDA + ctxs_.emplace_back(new platform::CUDADeviceContext( + boost::get(place_))); +#endif + } + } + + start_thread(); + } + + void start_thread() { + buffer_ = framework::MakeChannel(kDoubleBufferSize); + prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); } void ReadNext(std::vector* out) override; void ReInit() override; - ~DoubleBufferReader() { buffer_->Close(); } + ~DoubleBufferReader() { + buffer_->Close(); + prefetcher_.join(); + delete buffer_; + } + + bool HasNext() const override; private: void PrefetchThreadFunc(); - framework::Channel>* buffer_; + std::thread prefetcher_; + framework::Channel* buffer_; + platform::Place place_; + std::vector> ctxs_; + mutable Item local_buffer_; }; class CreateDoubleBufferReaderOp : public framework::OperatorBase { @@ -54,7 +83,20 @@ class CreateDoubleBufferReaderOp : public framework::OperatorBase { ->Get(); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset(new DoubleBufferReader(underlying_reader.Get())); + + auto place_str = Attr("place"); + platform::Place place; + if (place_str == "CPU") { + place = platform::CPUPlace(); + } else { + std::istringstream sin(place_str); + sin.seekg(std::string("CUDA:").size(), std::ios::beg); + size_t num; + sin >> num; + place = platform::CUDAPlace(static_cast(num)); + } + + out->Reset(new DoubleBufferReader(underlying_reader.Get(), place)); } }; @@ -69,41 +111,78 @@ class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { It launches another thread to execute the 'underlying reader' asynchronously, which prevents reading process from blocking subsequent training. )DOC"); + std::unordered_set enum_range; + constexpr size_t kMaxCUDADevs = 128; + for (size_t i = 0; i < kMaxCUDADevs; ++i) { + enum_range.insert(string::Sprintf("CUDA:%d", i)); + } + enum_range.insert("CPU"); + AddAttr("place", "The double buffer place, default is CPU") + .SetDefault("CPU") + .InEnum({enum_range}); } }; void DoubleBufferReader::ReadNext(std::vector* out) { - out->clear(); - buffer_->Receive(out); + if (!HasNext()) { + PADDLE_THROW("There is no next data!"); + } + + if (local_buffer_.payloads_.empty()) { + buffer_->Receive(&local_buffer_); + } + *out = local_buffer_.payloads_; + local_buffer_.payloads_.clear(); + if (local_buffer_.ctx_) { + local_buffer_.ctx_->Wait(); + } } void DoubleBufferReader::ReInit() { reader_->ReInit(); buffer_->Close(); - // The existing prefetch thread will terminate for the buffer_ is closed. - buffer_ = framework::MakeChannel>( - kDoubleBufferSize); - std::thread prefetch(&DoubleBufferReader::PrefetchThreadFunc, this); - prefetch.detach(); + prefetcher_.join(); + delete buffer_; + start_thread(); } void DoubleBufferReader::PrefetchThreadFunc() { VLOG(5) << "A new prefetch thread starts."; - while (true) { - std::vector batch; - reader_->ReadNext(&batch); - if (batch.empty()) { - // EOF - buffer_->Close(); - VLOG(5) << "Reached the end of the file. The prefetch thread terminates."; - break; + size_t gpu_ctx_offset = 0; + while (reader_->HasNext()) { + Item batch; + reader_->ReadNext(&batch.payloads_); + if (platform::is_gpu_place(place_)) { + std::vector gpu_batch; + auto& gpu_ctx = this->ctxs_[gpu_ctx_offset++]; + gpu_ctx_offset %= this->ctxs_.size(); + gpu_batch.resize(batch.payloads_.size()); + for (size_t i = 0; i < batch.payloads_.size(); ++i) { + framework::TensorCopy(batch.payloads_[i], place_, *gpu_ctx, + &gpu_batch[i]); + gpu_batch[i].set_lod(batch.payloads_[i].lod()); + } + batch.ctx_ = gpu_ctx.get(); + std::swap(gpu_batch, batch.payloads_); } + if (!buffer_->Send(&batch)) { VLOG(5) << "WARNING: The double buffer channel has been closed. The " - "prefetch thread terminates."; + "prefetch thread will terminate."; break; } } + buffer_->Close(); + VLOG(5) << "Prefetch thread terminates."; +} + +bool DoubleBufferReader::HasNext() const { + if (local_buffer_.payloads_.empty()) { + bool ok = buffer_->Receive(&local_buffer_); + return ok; + } else { + return true; + } } } // namespace reader diff --git a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..4d4e9fb909eafea5328491a4097276577f28a5ba --- /dev/null +++ b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc @@ -0,0 +1,101 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/reader/reader_op_registry.h" + +namespace paddle { +namespace operators { +namespace reader { + +class MultiPassReader : public framework::DecoratedReader { + public: + MultiPassReader(ReaderBase* reader, int pass_num) + : DecoratedReader(reader), pass_num_(pass_num), pass_count_(0) {} + + void ReadNext(std::vector* out) override { + if (!HasNext()) { + PADDLE_THROW("There is no next data!"); + } + reader_->ReadNext(out); + } + + bool HasNext() const override { + if (reader_->HasNext()) { + return true; + } else { + ++pass_count_; + if (pass_count_ >= pass_num_) { + return false; + } else { + reader_->ReInit(); + return true; + } + } + } + + void ReInit() override { + pass_count_ = 0; + reader_->ReInit(); + } + + private: + int pass_num_; + mutable int pass_count_; +}; + +class CreateMultiPassReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { + const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) + ->Get(); + auto& out = detail::Ref(scope.FindVar(Output("Out"))); + int pass_num = Attr("pass_num"); + out.GetMutable()->Reset( + new MultiPassReader(underlying_reader.Get(), pass_num)); + } +}; + +class CreateMultiPassReaderOpMaker : public DecoratedReaderMakerBase { + public: + CreateMultiPassReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) + : DecoratedReaderMakerBase(op_proto, op_checker) { + AddAttr("pass_num", "The number of pass to run.").GreaterThan(0); + AddComment(R"DOC( + CreateMultiPassReader Operator + + This operator creates a multi-pass reader. A multi-pass reader + is used to yield data for several pass training continuously. + It takes the the number of pass to run as one of its attributes + ('pass_num'), and maintains a pass counter to record how many + passes it has completed. When the underlying reader reach the EOF, + the multi-pass reader checks whether it has completed training + of the given number of pass. If not, the underlying reader will + be re-initialized and starts a new pass automatically. + )DOC"); + } +}; + +} // namespace reader +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators::reader; +REGISTER_DECORATED_READER_OPERATOR(create_multi_pass_reader, + ops::CreateMultiPassReaderOp, + ops::CreateMultiPassReaderOpMaker); diff --git a/paddle/fluid/operators/reader/create_random_data_generator_op.cc b/paddle/fluid/operators/reader/create_random_data_generator_op.cc index 73c39b5da4484b27f75aeba3c8171c5ffed2398f..95d8674c08b63e872926ff8708d0c734da33684c 100644 --- a/paddle/fluid/operators/reader/create_random_data_generator_op.cc +++ b/paddle/fluid/operators/reader/create_random_data_generator_op.cc @@ -19,11 +19,11 @@ namespace operators { namespace reader { template -class RandomDataGenerator : public framework::FileReader { +class RandomDataGenerator : public framework::ReaderBase { public: RandomDataGenerator(const std::vector& shapes, float min, float max) - : FileReader(shapes), min_(min), max_(max) { + : framework::ReaderBase(), min_(min), max_(max), shapes_(shapes) { PADDLE_ENFORCE_LE( min, max, "'min' shouldn't be greater than 'max'.(%f vs %f)", min, max); unsigned int seed = std::random_device()(); @@ -52,11 +52,14 @@ class RandomDataGenerator : public framework::FileReader { void ReInit() override { return; } + bool HasNext() const override { return true; } + private: float min_; float max_; std::minstd_rand engine_; std::uniform_real_distribution dist_; + std::vector shapes_; }; template diff --git a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..c4aa29c7206dbd3fe6a99b2a6c5ac6f083621944 --- /dev/null +++ b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc @@ -0,0 +1,90 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reader/reader_op_registry.h" +#include "paddle/fluid/recordio/scanner.h" + +namespace paddle { +namespace operators { +namespace reader { +class RecordIOFileReader : public framework::FileReader { + public: + explicit RecordIOFileReader(const std::string& filename, + const std::vector& dims) + : FileReader(dims), + scanner_(filename), + dev_ctx_(*platform::DeviceContextPool::Instance().Get( + platform::CPUPlace())) {} + + bool HasNext() const override { return scanner_.HasNext(); } + + void ReInit() override { scanner_.Reset(); } + + protected: + void ReadNextImpl(std::vector* out) override { + *out = framework::ReadFromRecordIO(scanner_, dev_ctx_); + } + + private: + recordio::Scanner scanner_; + const platform::DeviceContext& dev_ctx_; +}; + +class CreateRecordIOReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { + const auto& shape_concat = Attr>("shape_concat"); + const auto& ranks = Attr>("ranks"); + PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); + PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), + int(shape_concat.size()), + "The accumulate of all ranks should be equal to the " + "shape concat's length."); + std::string filename = Attr("filename"); + + auto* out = scope.FindVar(Output("Out")) + ->template GetMutable(); + out->Reset( + new RecordIOFileReader(filename, RestoreShapes(shape_concat, ranks))); + } +}; + +class CreateRecordIOReaderOpMaker : public FileReaderMakerBase { + public: + CreateRecordIOReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) + : FileReaderMakerBase(op_proto, op_checker) { + AddAttr("filename", "The filename of record io reader"); + AddComment(R"DOC( + CreateRecordIOReader Operator + + Create a reader from a record io file + )DOC"); + } +}; + +} // namespace reader +} // namespace operators +} // namespace paddle + +namespace reader = paddle::operators::reader; + +REGISTER_FILE_READER_OPERATOR(create_recordio_file_reader, + reader::CreateRecordIOReaderOp, + reader::CreateRecordIOReaderOpMaker); + +REGISTER_FILE_READER(recordio, reader::RecordIOFileReader); diff --git a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc index 4dac3831109beeed660d32f08fb27c7adf62ac2b..3a1f3805a0483c2f5eabdc7432556051d8308964 100644 --- a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc +++ b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include +#include "glog/logging.h" +#include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { @@ -20,43 +23,55 @@ namespace reader { class ShuffleReader : public framework::DecoratedReader { public: - ShuffleReader(ReaderBase* reader, int buffer_size) - : DecoratedReader(reader), buffer_size_(buffer_size), iteration_pos_(0) { - buffer_.reserve(buffer_size); + ShuffleReader(ReaderBase* reader, size_t buffer_size, size_t seed = 0) + : DecoratedReader(reader), buffer_size_(buffer_size), seed_(seed) { + VLOG(10) << "Create shuffle reader of " << reader_; + if (seed_ == 0) { + std::random_device device; + seed_ = device(); + } + ReadIntoBuffers(); } - void ReadNext(std::vector* out) override; + void ReadNext(std::vector* out) override { + if (!HasNext()) { + PADDLE_THROW("There is no next data!"); + } + if (iteration_pos_ >= buffer_.size()) { + VLOG(10) << "Resetting shuffle buffer"; + ReadIntoBuffers(); + } + *out = buffer_[iteration_pos_++]; + } - private: - int buffer_size_; - std::vector> buffer_; - size_t iteration_pos_; -}; + bool HasNext() const override { + return iteration_pos_ < buffer_.size() || reader_->HasNext(); + } -void ShuffleReader::ReadNext(std::vector* out) { - if (iteration_pos_ >= buffer_.size()) { - // Reload buffer with new data + private: + void ReadIntoBuffers() { buffer_.clear(); buffer_.reserve(buffer_size_); - for (int i = 0; i < buffer_size_; ++i) { - buffer_.push_back(std::vector()); - reader_->ReadNext(&buffer_.back()); - if (buffer_.back().empty()) { - buffer_.pop_back(); + iteration_pos_ = 0; + for (size_t i = 0; i < buffer_size_; ++i) { + if (!reader_->HasNext()) { break; } + buffer_.emplace_back(); + reader_->ReadNext(&buffer_.back()); } - // TODO(fengjiayi): 'std::random_shuffle' can be very slow. It needs to be - // optimize. - std::random_shuffle(buffer_.begin(), buffer_.end()); - iteration_pos_ = 0; - } - out->clear(); - if (!buffer_.empty()) { - std::swap(*out, buffer_[iteration_pos_++]); + std::mt19937 g(seed_); + std::shuffle(buffer_.begin(), buffer_.end(), g); + seed_ = g(); // update seed_; + VLOG(10) << "random buffer size = " << buffer_.size(); } - // if buffer_ is empty, the 'out' will return as an empty vector. -} + + size_t buffer_size_; + std::vector> buffer_; + + size_t iteration_pos_; + size_t seed_; +}; class CreateShuffleReaderOp : public framework::OperatorBase { public: @@ -67,10 +82,10 @@ class CreateShuffleReaderOp : public framework::OperatorBase { const platform::Place& dev_place) const override { const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - auto* out = scope.FindVar(Output("Out")) - ->template GetMutable(); - out->Reset( - new ShuffleReader(underlying_reader.Get(), Attr("buffer_size"))); + auto& var = detail::Ref(scope.FindVar(Output("Out"))); + var.GetMutable()->Reset( + new ShuffleReader(underlying_reader.Get(), + static_cast(Attr("buffer_size")))); } }; diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..414c76fea0bb916dfeafe38c0448a7a800889e03 --- /dev/null +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -0,0 +1,212 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/channel.h" +#include "paddle/fluid/operators/reader/reader_op_registry.h" + +namespace paddle { +namespace operators { +namespace reader { + +class MultipleReader : public framework::ReaderBase { + public: + MultipleReader(const std::vector& file_names, + const std::vector& dims, size_t thread_num) + : file_names_(file_names), dims_(dims) { + prefetchers_.resize(thread_num); + StartNewScheduler(); + } + + void ReadNext(std::vector* out) override; + bool HasNext() const override; + void ReInit() override; + + ~MultipleReader() { EndScheduler(); } + + private: + void StartNewScheduler(); + void EndScheduler(); + void ScheduleThreadFunc(); + void PrefetchThreadFunc(std::string file_name, size_t thread_idx); + + std::vector file_names_; + std::vector dims_; + std::thread scheduler_; + std::vector prefetchers_; + framework::Channel* waiting_file_idx_; + framework::Channel* available_thread_idx_; + framework::Channel>* buffer_; + mutable std::vector local_buffer_; +}; + +void MultipleReader::ReadNext(std::vector* out) { + if (!HasNext()) { + PADDLE_THROW("There is no next data!"); + } + + if (local_buffer_.empty()) { + buffer_->Receive(&local_buffer_); + } + *out = local_buffer_; + local_buffer_.clear(); +} + +bool MultipleReader::HasNext() const { + return local_buffer_.empty() ? buffer_->Receive(&local_buffer_) : true; +} + +void MultipleReader::ReInit() { + EndScheduler(); + local_buffer_.clear(); + StartNewScheduler(); +} + +void MultipleReader::StartNewScheduler() { + size_t thread_num = prefetchers_.size(); + waiting_file_idx_ = framework::MakeChannel(file_names_.size()); + available_thread_idx_ = framework::MakeChannel(thread_num); + buffer_ = + framework::MakeChannel>(thread_num); + + for (size_t i = 0; i < file_names_.size(); ++i) { + waiting_file_idx_->Send(&i); + } + waiting_file_idx_->Close(); + for (size_t i = 0; i < thread_num; ++i) { + available_thread_idx_->Send(&i); + } + + scheduler_ = std::thread([this] { ScheduleThreadFunc(); }); +} + +void MultipleReader::EndScheduler() { + available_thread_idx_->Close(); + buffer_->Close(); + waiting_file_idx_->Close(); + if (scheduler_.joinable()) { + scheduler_.join(); + } + delete buffer_; + delete available_thread_idx_; + delete waiting_file_idx_; +} + +void MultipleReader::ScheduleThreadFunc() { + VLOG(5) << "MultipleReader schedule thread starts."; + size_t completed_thread_num = 0; + size_t thread_idx; + while (available_thread_idx_->Receive(&thread_idx)) { + std::thread& prefetcher = prefetchers_[thread_idx]; + if (prefetcher.joinable()) { + prefetcher.join(); + } + size_t file_idx; + if (waiting_file_idx_->Receive(&file_idx)) { + // Still have files to read. Start a new prefetch thread. + std::string file_name = file_names_[file_idx]; + prefetcher = std::thread([this, file_name, thread_idx] { + PrefetchThreadFunc(file_name, thread_idx); + }); + } else { + // No more file to read. + ++completed_thread_num; + if (completed_thread_num == prefetchers_.size()) { + buffer_->Close(); + break; + } + } + } + // If users invoke ReInit() when scheduler is running, it will close the + // 'avaiable_thread_idx_' and prefecther threads have no way to tell scheduler + // to release their resource. So a check is needed before scheduler ends. + for (auto& p : prefetchers_) { + if (p.joinable()) { + p.join(); + } + } + VLOG(5) << "MultipleReader schedule thread terminates."; +} + +void MultipleReader::PrefetchThreadFunc(std::string file_name, + size_t thread_idx) { + VLOG(5) << "The prefetch thread of file '" << file_name << "' starts."; + std::unique_ptr reader = + CreateReaderByFileName(file_name, dims_); + while (reader->HasNext()) { + std::vector ins; + reader->ReadNext(&ins); + if (!buffer_->Send(&ins)) { + VLOG(5) << "WARNING: The buffer channel has been closed. The prefetch " + "thread of file '" + << file_name << "' will terminate."; + break; + } + } + if (!available_thread_idx_->Send(&thread_idx)) { + VLOG(5) << "WARNING: The available_thread_idx_ channel has been closed. " + "Fail to send thread_idx."; + } + VLOG(5) << "The prefetch thread of file '" << file_name << "' terminates."; +} + +class OpenFilesOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { + const auto& shape_concat = Attr>("shape_concat"); + const auto& ranks = Attr>("ranks"); + PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); + PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), + int(shape_concat.size()), + "The accumulate of all ranks should be equal to the " + "shape concat's length."); + const auto& file_names = Attr>("file_names"); + PADDLE_ENFORCE(!file_names.empty(), "No file to be read!"); + const size_t thread_num = Attr("thread_num"); + + auto* out = scope.FindVar(Output("Out")) + ->template GetMutable(); + out->Reset(new MultipleReader( + file_names, RestoreShapes(shape_concat, ranks), thread_num)); + } +}; + +class OpenFilesOpMaker : public FileReaderMakerBase { + public: + OpenFilesOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) + : FileReaderMakerBase(op_proto, op_checker) { + AddAttr>("file_names", "Files to be read."); + AddAttr("thread_num", "The maximal concurrent prefetch thread number.") + .GreaterThan(0); + + AddComment(R"DOC( + OpenFiles Operator + + An OpenFilesOp creates a MultipleReader, which is able to + read data multi-threaded from multiple files. + )DOC"); + } +}; + +} // namespace reader +} // namespace operators +} // namespace paddle + +namespace reader = paddle::operators::reader; + +REGISTER_FILE_READER_OPERATOR(open_files, reader::OpenFilesOp, + reader::OpenFilesOpMaker); diff --git a/paddle/fluid/operators/reader/reader_op_registry.cc b/paddle/fluid/operators/reader/reader_op_registry.cc index f80769d7cd2d35261cd55fc1d6c8c20197f5e88c..fc8dc747ff0c2286f4516d8350f75d9887361924 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.cc +++ b/paddle/fluid/operators/reader/reader_op_registry.cc @@ -31,11 +31,31 @@ std::vector RestoreShapes(const std::vector& shape_concat, return res; } +std::unordered_map& FileReaderRegistry() { + static std::unordered_map regs; + return regs; +} + +std::unique_ptr CreateReaderByFileName( + const std::string& file_name, const std::vector& dims) { + size_t separator_pos = file_name.find_last_of(kFileFormatSeparator); + PADDLE_ENFORCE_NE(separator_pos, std::string::npos, + "File name illegal! A legal file name should be like: " + "[file_name].[file_format] (e.g., 'data_file.recordio')."); + std::string filetype = file_name.substr(separator_pos + 1); + + auto itor = FileReaderRegistry().find(filetype); + PADDLE_ENFORCE(itor != FileReaderRegistry().end(), + "No file reader registered for '%s' format.", filetype); + framework::ReaderBase* reader = (itor->second)(file_name, dims); + return std::unique_ptr(reader); +} + FileReaderMakerBase::FileReaderMakerBase( framework::OpProtoAndCheckerMaker::OpProto* op_proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(op_proto, op_checker) { - AddOutput("Out", "(ReaderHolder) The created random reader."); + AddOutput("Out", "(ReaderHolder) The created random reader.").AsDuplicable(); AddAttr>("shape_concat", "The concat of all data's shapes."); AddAttr>( "ranks", diff --git a/paddle/fluid/operators/reader/reader_op_registry.h b/paddle/fluid/operators/reader/reader_op_registry.h index d1f0498f4692247cda72fbcbdd5070ddfaa11553..929d32ad8b367865e33530f8517343c513ee9878 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.h +++ b/paddle/fluid/operators/reader/reader_op_registry.h @@ -21,6 +21,25 @@ namespace paddle { namespace operators { namespace reader { +static constexpr char kFileFormatSeparator[] = "."; + +using FileReaderCreator = std::function&)>; + +std::unordered_map& FileReaderRegistry(); + +template +int RegisterFileReader(const std::string& filetype) { + FileReaderRegistry()[filetype] = []( + const std::string& fn, const std::vector& dims) { + return new Reader(fn, dims); + }; + return 0; +} + +std::unique_ptr CreateReaderByFileName( + const std::string& file_name, const std::vector& dims); + extern std::vector RestoreShapes( const std::vector& shape_concat, const std::vector& ranks); @@ -73,3 +92,15 @@ class DecoratedReaderMakerBase : public framework::OpProtoAndCheckerMaker { paddle::operators::reader::DecoratedReaderInferShape, \ paddle::framework::EmptyGradOpMaker, \ paddle::operators::reader::DecoratedReaderInferVarType) + +#define REGISTER_FILE_READER(_filetype, _reader) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + _reg_file_reader_##_filetype, \ + "Must use REGISTER_FILE_READER in global namespace"); \ + int TouchFileReader##_filetype() { return 0; } \ + int _reg_file_reader_entry_##filetype = \ + paddle::operators::reader::RegisterFileReader<_reader>(#_filetype) + +#define USE_FILE_READER(filetype) \ + extern int TouchFileReader##filetype(); \ + static int _use_##filetype = TouchFileReader##filetype() diff --git a/paddle/fluid/operators/reduce_op.cc b/paddle/fluid/operators/reduce_op.cc index 69e8f8081e93cb74177eac1a57e0eaf284951e3f..7879367830216cdd875f9f95f95e2a88f282ac64 100644 --- a/paddle/fluid/operators/reduce_op.cc +++ b/paddle/fluid/operators/reduce_op.cc @@ -173,6 +173,15 @@ class ReduceMinOpMaker : public ReduceOpMaker { } }; +class ReduceProdOpMaker : public ReduceOpMaker { + public: + ReduceProdOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : ReduceOpMaker(proto, op_checker) { + SetComment("ReduceProd", "production"); + AddComment(comment_); + } +}; + } // namespace operators } // namespace paddle @@ -190,6 +199,9 @@ REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad, REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMinOpMaker, reduce_min_grad, ops::ReduceGradOp); +REGISTER_OP(reduce_prod, ops::ReduceOp, ops::ReduceProdOpMaker, + reduce_prod_grad, ops::ReduceGradOp); + #define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ REGISTER_OP_CPU_KERNEL(reduce_type, \ ops::ReduceKernel + void operator()(const DeviceContext& place, X& x, Y& y, const Dim& dim) { + y.device(place) = x.prod(dim); + } +}; + +struct ProdGradFunctor { + template + void operator()(const DeviceContext& place, X& x, Y& y, DX& dx, DY& dy, + const Dim& dim, int size) { + dx.device(place) = dy.broadcast(dim) * y.broadcast(dim) * x.inverse(); + } +}; + template class ReduceKernel : public framework::OpKernel { public: @@ -254,4 +270,5 @@ class ReduceGradKernel : public framework::OpKernel { __macro(reduce_sum, SumFunctor, SumGradFunctor); \ __macro(reduce_mean, MeanFunctor, MeanGradFunctor); \ __macro(reduce_max, MaxFunctor, MaxOrMinGradFunctor); \ - __macro(reduce_min, MinFunctor, MaxOrMinGradFunctor); + __macro(reduce_min, MinFunctor, MaxOrMinGradFunctor); \ + __macro(reduce_prod, ProdFunctor, ProdGradFunctor); diff --git a/paddle/fluid/operators/scatter_op.cc b/paddle/fluid/operators/scatter_op.cc index 3fb8b56d2676f90ff7e1cefa46c459ee37f63ca8..d6fd6214711f4ee66b1daffa4db2e84aa7201e79 100644 --- a/paddle/fluid/operators/scatter_op.cc +++ b/paddle/fluid/operators/scatter_op.cc @@ -23,24 +23,24 @@ class ScatterOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Ref"), - "Input(Ref) of ScatterOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Index"), - "Input(Index) of ScatterOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of ScatterOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Ids"), + "Input(Ids) of ScatterOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Updates"), "Input(Updates) of ScatterOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of ScatterOp should not be null."); auto updates_dims = ctx->GetInputDim("Updates"); - auto ref_dims = ctx->GetInputDim("Ref"); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Index").size(), 1, - "Update Index should be 1-D."); + auto ref_dims = ctx->GetInputDim("X"); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Ids").size(), 1, + "Update Ids should be 1-D."); PADDLE_ENFORCE_EQ(ref_dims.size(), updates_dims.size(), - "Reference and Updates should have the same shape size"); + "Xerence and Updates should have the same shape size"); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Updates")[0], - ctx->GetInputDim("Index")[0], - "Updates and Index should have same batch-size."); + ctx->GetInputDim("Ids")[0], + "Updates and Ids should have same batch-size."); framework::DDim data_dim(updates_dims); for (int i = 1; i < data_dim.size(); ++i) { PADDLE_ENFORCE_EQ(data_dim[i], updates_dims[i]); @@ -52,7 +52,7 @@ class ScatterOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( - framework::ToDataType(ctx.Input("Ref")->type()), + framework::ToDataType(ctx.Input("X")->type()), ctx.device_context()); } }; @@ -64,14 +64,14 @@ class ScatterGradOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { ctx->SetOutputDim(framework::GradVarName("Updates"), ctx->GetInputDim("Updates")); - ctx->SetOutputDim(framework::GradVarName("Ref"), ctx->GetInputDim("Ref")); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( - framework::ToDataType(ctx.Input("Ref")->type()), + framework::ToDataType(ctx.Input("X")->type()), ctx.device_context()); } }; @@ -80,9 +80,8 @@ class ScatterOpMaker : public framework::OpProtoAndCheckerMaker { public: ScatterOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Ref", "The source input of scatter op"); - AddInput("Index", - "The index input of scatter op where Ref will be updated"); + AddInput("X", "The source input of scatter op"); + AddInput("Ids", "The index input of scatter op where X will be updated"); AddInput("Updates", "The updated value of updates op"); AddOutput("Out", "The output of add op"); AddComment(R"DOC( @@ -91,8 +90,8 @@ Scatter Operator. This operator obtains output by updating the input on selected indices on the first axis: $$ -Out = Ref \\ -Out[Index] = Ref[Index] + Updates +Out = X \\ +Out[Ids] = X[Ids] + Updates $$ )DOC"); diff --git a/paddle/fluid/operators/scatter_op.cu b/paddle/fluid/operators/scatter_op.cu index bdabb29fa680f8f87873b4381acf0dbd2b6195d0..ef7d700659d8d713715a10910baf739954ba0786 100644 --- a/paddle/fluid/operators/scatter_op.cu +++ b/paddle/fluid/operators/scatter_op.cu @@ -25,14 +25,14 @@ class ScatterOpCUDAKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "This kernel only runs on GPU device."); - auto *Ref = ctx.Input("Ref"); - auto *Index = ctx.Input("Index"); + auto *X = ctx.Input("X"); + auto *Ids = ctx.Input("Ids"); auto *Updates = ctx.Input("Updates"); auto *Out = ctx.Output("Out"); - Out->ShareDataWith(*Ref); + Out->ShareDataWith(*X); - GPUScatterAssign(ctx.device_context(), *Updates, *Index, Out); + GPUScatterAssign(ctx.device_context(), *Updates, *Ids, Out); } }; @@ -42,16 +42,16 @@ class ScatterGradOpCUDAKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "This kernel only runs on GPU device."); - auto *dRef = ctx.Output(framework::GradVarName("Ref")); + auto *dX = ctx.Output(framework::GradVarName("X")); auto *dUpdates = ctx.Output(framework::GradVarName("Updates")); - auto *Index = ctx.Input("Index"); + auto *Ids = ctx.Input("Ids"); auto *dOut = ctx.Input(framework::GradVarName("Out")); - // In place gradient: dRef = dO - dRef->ShareDataWith(*dOut); + // In place gradient: dX = dO + dX->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); - // Gradient by Gather: dUpdates = dO[Index] - GPUGather(ctx.device_context(), *dOut, *Index, dUpdates); + // Gradient by Gather: dUpdates = dO[Ids] + GPUGather(ctx.device_context(), *dOut, *Ids, dUpdates); } }; diff --git a/paddle/fluid/operators/scatter_op.h b/paddle/fluid/operators/scatter_op.h index 3c6e7ece320229e1a311ef6d7a27387d40be3c2a..2151d8a9240fc88966533f4a07d5cf56b6c1c3bc 100644 --- a/paddle/fluid/operators/scatter_op.h +++ b/paddle/fluid/operators/scatter_op.h @@ -29,15 +29,15 @@ class ScatterOpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), "This kernel only runs on CPU."); - auto *Ref = ctx.Input("Ref"); - auto *Index = ctx.Input("Index"); + auto *X = ctx.Input("X"); + auto *Ids = ctx.Input("Ids"); auto *Updates = ctx.Input("Updates"); auto *Out = ctx.Output("Out"); - // In place output: Out = Ref, Out[Index] += Updates - Out->ShareDataWith(*Ref); + // In place output: Out = X, Out[Ids] += Updates + Out->ShareDataWith(*X); // Apply ScatterUpdate: Out[index] += Updates[:] - ScatterAssign(ctx.device_context(), *Updates, *Index, Out); + ScatterAssign(ctx.device_context(), *Updates, *Ids, Out); } }; @@ -47,16 +47,16 @@ class ScatterGradientOpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), "This kernel only runs on CPU."); - auto *dRef = ctx.Output(framework::GradVarName("Ref")); + auto *dX = ctx.Output(framework::GradVarName("X")); auto *dUpdates = ctx.Output(framework::GradVarName("Updates")); - auto *Index = ctx.Input("Index"); + auto *Ids = ctx.Input("Ids"); auto *dOut = ctx.Input(framework::GradVarName("Out")); - // In place gradient: dRef = dO - dRef->ShareDataWith(*dOut); + // In place gradient: dX = dO + dX->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); - // Gradient by Gather: dUpdates += dO[Index] - CPUGather(ctx.device_context(), *dOut, *Index, dUpdates); + // Gradient by Gather: dUpdates += dO[Ids] + CPUGather(ctx.device_context(), *dOut, *Ids, dUpdates); } }; diff --git a/paddle/fluid/operators/select_op.cc b/paddle/fluid/operators/select_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..c0bf0ff927481bc4da9cd6c4bb9b0c4a6841c891 --- /dev/null +++ b/paddle/fluid/operators/select_op.cc @@ -0,0 +1,419 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include +#include "paddle/fluid/framework/channel.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/concurrency/channel_util.h" + +namespace paddle { +namespace operators { + +static constexpr char kX[] = "X"; +static constexpr char kCaseToExecute[] = "case_to_execute"; +static constexpr char kOutputs[] = "Out"; + +static constexpr char kCases[] = "cases"; +static constexpr char kCasesBlock[] = "sub_block"; + +class SelectOp : public framework::OperatorBase { + public: + SelectOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : framework::OperatorBase(type, inputs, outputs, attrs) {} + + private: + enum class SelectOpCaseType { + DEFAULT = 0, + SEND = 1, + RECEIVE = 2, + }; + + struct SelectOpCase { + int caseIndex; + SelectOpCaseType caseType; + std::string channelName; + std::string varName; + + SelectOpCase() {} + + SelectOpCase(int caseIndex, SelectOpCaseType caseType, + std::string channelName, std::string varName) + : caseIndex(caseIndex), + caseType(caseType), + channelName(channelName), + varName(varName) {} + }; + + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { + std::vector casesConfigs = + Attr>(kCases); + + framework::BlockDesc *casesBlock = + Attr(kCasesBlock); + + framework::Scope &casesBlockScope = scope.NewScope(); + + std::string caseToExecuteVarName = Input(kCaseToExecute); + framework::Variable *caseToExecuteVar = + casesBlockScope.FindVar(caseToExecuteVarName); + + // Construct cases from "conditional_block_op"(s) in the casesBlock + std::vector> cases = + ParseAndShuffleCases(&casesConfigs); + + // Get all unique channels involved in select + std::set channelsSet; + for (auto c : cases) { + if (!c->channelName.empty()) { + auto channelVar = scope.FindVar(c->channelName); + framework::ChannelHolder *ch = + channelVar->GetMutable(); + + if (channelsSet.find(ch) == channelsSet.end()) { + channelsSet.insert(ch); + } + } + } + + // Order all channels by their pointer address + std::vector channels(channelsSet.begin(), + channelsSet.end()); + std::sort(channels.begin(), channels.end()); + + // Poll all cases + int32_t caseToExecute = pollCases(&scope, &cases, channels); + + // At this point, the case to execute has already been determined, + // so we can proceed with executing the cases block + framework::LoDTensor *caseToExecuteTensor = + caseToExecuteVar->GetMutable(); + caseToExecuteTensor->data()[0] = caseToExecute; + + // Execute the cases block, only one case will be executed since we set the + // case_to_execute value to the index of the case we want to execute + framework::Executor executor(dev_place); + framework::ProgramDesc *program = casesBlock->Program(); + executor.Run(*program, &casesBlockScope, casesBlock->ID(), + false /*create_local_scope*/); + } + + /** + * Goes through all operators in the casesConfigs and processes + * "conditional_block" operators. These operators are mapped to our + * SelectOpCase objects. We randomize the case orders, and set the + * default case (if any exists) as the last case) + * @param casesBlock + * @return + */ + std::vector> ParseAndShuffleCases( + std::vector *casesConfigs) const { + std::vector> cases; + std::shared_ptr defaultCase; + + if (casesConfigs != nullptr) { + boost::char_delimiters_separator sep(false, ",", ""); + for (std::vector::iterator itr = casesConfigs->begin(); + itr < casesConfigs->end(); ++itr) { + std::string caseConfig = *itr; + boost::tokenizer<> tokens(caseConfig, sep); + + boost::tokenizer<>::iterator tok_iter = tokens.begin(); + PADDLE_ENFORCE(tok_iter != tokens.end(), "Cannot get case index"); + std::string caseIndexString = *tok_iter; + int caseIndex = std::stoi(caseIndexString); + + ++tok_iter; + PADDLE_ENFORCE(tok_iter != tokens.end(), "Cannot get case type"); + std::string caseTypeString = *tok_iter; + SelectOpCaseType caseType = (SelectOpCaseType)std::stoi(caseTypeString); + + std::string caseChannel; + std::string caseChannelVar; + + ++tok_iter; + if (caseType != SelectOpCaseType::DEFAULT) { + PADDLE_ENFORCE(tok_iter != tokens.end(), "Cannot get case channel"); + caseChannel = *tok_iter; + + ++tok_iter; + PADDLE_ENFORCE(tok_iter != tokens.end(), + "Cannot get case channel variable"); + caseChannelVar = *tok_iter; + } + + auto c = std::make_shared(caseIndex, caseType, + caseChannel, caseChannelVar); + + if (caseType == SelectOpCaseType::DEFAULT) { + PADDLE_ENFORCE(defaultCase == nullptr, + "Select can only contain one default case."); + defaultCase = c; + } else { + cases.push_back(c); + } + } + } + + // Randomly sort cases, with default case being last + std::random_shuffle(cases.begin(), cases.end()); + if (defaultCase != nullptr) { + cases.push_back(defaultCase); + } + + return cases; + } + + /** + * This method will recursively poll the cases and determines if any case + * condition is true. + * If none of the cases conditions are true (and there is no default case), + * then block + * the thread. The thread may be woken up by a channel operation, at which + * point we + * execute the case. + * @param scope + * @param cases + * @param channels + * @return + */ + int32_t pollCases(const framework::Scope *scope, + std::vector> *cases, + std::vector channels) const { + // Lock all involved channels + lockChannels(channels); + + std::atomic caseToExecute(-1); + + std::vector>::iterator it = cases->begin(); + while (it != cases->end()) { + std::shared_ptr c = *it; + + auto chVar = scope->FindVar(c->channelName); + framework::ChannelHolder *ch = + chVar->GetMutable(); + + switch (c->caseType) { + case SelectOpCaseType::SEND: + PADDLE_ENFORCE(!ch->IsClosed(), "Cannot send to a closed channel"); + if (ch->CanSend()) { + // We can send to channel directly, send the data to channel + // and execute case + auto chVar = scope->FindVar(c->varName); + concurrency::ChannelSend(ch, chVar); + caseToExecute = c->caseIndex; + } + break; + case SelectOpCaseType::RECEIVE: + if (ch->CanReceive()) { + // We can receive from channel directly, send the data to channel + // and execute case + auto chVar = scope->FindVar(c->varName); + concurrency::ChannelReceive(ch, chVar); + caseToExecute = c->caseIndex; + } + break; + case SelectOpCaseType::DEFAULT: + caseToExecute = c->caseIndex; + break; + } + + if (caseToExecute != -1) { + // We found a case to execute, stop looking at other case statements + break; + } + + ++it; + } + + if (caseToExecute == -1) { + // None of the cases are eligible to execute, enqueue current thread + // into all the sending/receiving queue of each involved channel + std::atomic completed(false); + std::recursive_mutex mutex; + std::unique_lock lock{mutex}; + // std::condition_variable_any selectCond; + auto selectCond = std::make_shared(); + + std::recursive_mutex callbackMutex; + pushThreadOnChannelQueues(scope, cases, selectCond, caseToExecute, + completed, callbackMutex); + + // TODO(thuan): Atomically unlock all channels and sleep current thread + unlockChannels(channels); + selectCond->wait(lock, [&completed]() { return completed.load(); }); + + // Select has been woken up by case operation + lockChannels(channels); + removeThreadOnChannelQueues(scope, cases); + + if (caseToExecute == -1) { + // Recursively poll cases, since we were woken up by a channel close + // TODO(thuan): Need to test if this is a valid case + unlockChannels(channels); + return pollCases(scope, cases, channels); + } + } + + // At this point, caseToExecute != -1, and we can proceed with executing + // the case block + unlockChannels(channels); + + return caseToExecute; + } + + void lockChannels(std::vector chs) const { + std::vector::iterator it = chs.begin(); + while (it != chs.end()) { + framework::ChannelHolder *ch = *it; + ch->Lock(); + ++it; + } + } + + void unlockChannels(std::vector chs) const { + std::vector::reverse_iterator it = chs.rbegin(); + while (it != chs.rend()) { + framework::ChannelHolder *ch = *it; + ch->Unlock(); + ++it; + } + } + + void pushThreadOnChannelQueues( + const framework::Scope *scope, + std::vector> *cases, + std::shared_ptr rCond, + std::atomic &caseToExecute, std::atomic &completed, + std::recursive_mutex &callbackMutex) const { + std::vector>::iterator it = cases->begin(); + while (it != cases->end()) { + std::shared_ptr c = *it; + + auto chVar = scope->FindVar(c->channelName); + framework::ChannelHolder *ch = + chVar->GetMutable(); + + std::function cb = + [&caseToExecute, &completed, &callbackMutex, + c](framework::ChannelAction channelAction) { + std::lock_guard lock{callbackMutex}; + + bool canProcess = false; + if (!completed) { + // If the channel wasn't closed, we set the caseToExecute index + // as this current case + if (channelAction != framework::ChannelAction::CLOSE) { + caseToExecute = c->caseIndex; + } + // This will allow our conditional variable to break out of wait + completed = true; + canProcess = true; + } + + return canProcess; + }; + + switch (c->caseType) { + case SelectOpCaseType::SEND: { + auto chOutputVar = scope->FindVar(c->varName); + concurrency::ChannelAddToSendQ(ch, this, chOutputVar, rCond, cb); + break; + } + case SelectOpCaseType::RECEIVE: { + auto chOutputVar = scope->FindVar(c->varName); + concurrency::ChannelAddToReceiveQ(ch, this, chOutputVar, rCond, cb); + break; + } + default: + break; + } + ++it; + } + } + + void removeThreadOnChannelQueues( + const framework::Scope *scope, + std::vector> *cases) const { + std::vector>::iterator it = cases->begin(); + while (it != cases->end()) { + std::shared_ptr c = *it; + + auto chVar = scope->FindVar(c->channelName); + framework::ChannelHolder *ch = + chVar->GetMutable(); + switch (c->caseType) { + case SelectOpCaseType::SEND: { + ch->RemoveFromSendQ(this); + break; + } + case SelectOpCaseType::RECEIVE: { + ch->RemoveFromReceiveQ(this); + break; + } + default: + break; + } + ++it; + } + } +}; + +class SelectOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SelectOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput(kX, + "A set of variables, which are required by operators inside the " + "cases of Select Op") + .AsDuplicable(); + AddInput(kCaseToExecute, + "(Int) The variable the sets the index of the case to execute, " + "after evaluating the channels being sent to and received from") + .AsDuplicable(); + AddOutput(kOutputs, + "A set of variables, which will be assigned with values " + "generated by the operators inside the cases of Select Op.") + .AsDuplicable(); + AddAttr>(kCases, + "(String vector) Serialized list of" + "all cases in the select op. Each" + "case is serialized as: " + "',,,'" + "where type is 0 for default, 1 for" + "send, and 2 for receive" + "No channel and values are needed for" + "default cases."); + AddAttr(kCasesBlock, + "The cases block inside select_op"); + AddComment(R"DOC( +)DOC"); + } +}; + +// TODO(thuan): Implement Gradient Operator for SELECT_OP + +} // namespace operators +} // namespace paddle + +REGISTER_OPERATOR(select, paddle::operators::SelectOp, + paddle::framework::EmptyGradOpMaker, + paddle::operators::SelectOpMaker); diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index 8fdd08eae6b22cd57506d6e75182c1a7e2022562..a77c38f633c776359648a7a3eb6fab0ada6de997 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -68,7 +68,7 @@ class SendOp : public framework::OperatorBase { for (size_t i = 0; i < ins.size(); i++) { if (NeedSend(scope, ins[i])) { - VLOG(3) << "sending " << ins[i] << " to " << epmap[i]; + VLOG(2) << "sending " << ins[i] << " to " << epmap[i]; rpc_client->AsyncSendVariable(epmap[i], ctx, scope, ins[i]); } else { VLOG(3) << "don't send no-initialied variable: " << ins[i]; @@ -77,17 +77,23 @@ class SendOp : public framework::OperatorBase { PADDLE_ENFORCE(rpc_client->Wait()); for (auto& ep : endpoints) { - VLOG(3) << "batch barrier, ep: " << ep; + VLOG(2) << "batch barrier, ep: " << ep; rpc_client->AsyncSendBatchBarrier(ep); } PADDLE_ENFORCE(rpc_client->Wait()); if (outs.size() > 0) { for (size_t i = 0; i < outs.size(); i++) { - VLOG(3) << "getting " << outs[i] << " from " << epmap[i]; + VLOG(2) << "getting " << outs[i] << " from " << epmap[i]; rpc_client->AsyncGetVariable(epmap[i], ctx, scope, outs[i]); } PADDLE_ENFORCE(rpc_client->Wait()); + // tell pservers that current trainer have called fetch + for (auto& ep : endpoints) { + VLOG(2) << "send fetch barrier, ep: " << ep; + rpc_client->AsyncSendFetchBarrier(ep); + } + PADDLE_ENFORCE(rpc_client->Wait()); } } }; diff --git a/paddle/fluid/operators/sequence_expand_op.cc b/paddle/fluid/operators/sequence_expand_op.cc index a5d84d629b2e50763dac9bc571ac490414a8a406..786fe63e7580ce16b946d5049a490eed2c3c6ced 100644 --- a/paddle/fluid/operators/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_expand_op.cc @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { namespace operators { -using framework::Tensor; +using framework::LoDTensor; class SequenceExpandOp : public framework::OperatorWithKernel { public: @@ -25,15 +25,71 @@ class SequenceExpandOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X")); - PADDLE_ENFORCE(ctx->HasOutput("Out")); - PADDLE_ENFORCE(ctx->HasInput("Y")); - framework::DDim out_dim; - auto y_dim = ctx->GetInputDim("Y"); - out_dim = ctx->GetInputDim("X"); - out_dim[0] = y_dim[0]; - ctx->ShareLoD("Y", "Out"); - ctx->SetOutputDim("Out", out_dim); + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of SequenceExpandOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Y"), + "Input(Y) of SequenceExpandOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of SequenceExpandOp should not be null."); + + auto x_dims = ctx->GetInputDim("X"); + auto out_dims = x_dims; + int ref_level = ctx->Attrs().Get("ref_level"); + + PADDLE_ENFORCE_GE(x_dims.size(), 2, + "Dimension number of Input(X) should be at least 2."); + + if (ctx->IsRuntime()) { + framework::Variable* x_var = + boost::get(ctx->GetInputVarPtrs("X")[0]); + framework::Variable* y_var = + boost::get(ctx->GetInputVarPtrs("Y")[0]); + + auto& x_lod = x_var->Get().lod(); + auto& y_lod = y_var->Get().lod(); + + PADDLE_ENFORCE_LE(x_lod.size(), 1, + "Level number of Input(X)'s lod should not be " + "greater than 1."); + PADDLE_ENFORCE_GT(y_lod.size(), 0, + "Level number of Input(Y)'s lod should be " + "greater than 0."); + PADDLE_ENFORCE( + ref_level == -1 || + (ref_level >= 0 && ref_level < static_cast(y_lod.size())), + "Invlid `ref_level`, which should be either equal to -1 " + "or in [0, %d)", + y_lod.size()); + + if (ref_level == -1) ref_level = y_lod.size() - 1; + + if (x_lod.size() > 0) { + PADDLE_ENFORCE(x_lod[0].size() == y_lod[ref_level].size(), + "Level number of Input(X)'s lod could be 0. Otherwise " + "size of Input(X)'s first level lod should be equal to " + "size of Input(Y)'s referred level lod."); + } + + int64_t out_first_dim = 0; + if (y_lod[ref_level].size() <= 1) { + out_first_dim = x_dims[0]; + } else { + for (size_t i = 1; i < y_lod[ref_level].size(); ++i) { + int x_seq_len = 1; + if (x_lod.size() == 1) { + x_seq_len = x_lod[0][i] - x_lod[0][i - 1]; + } + out_first_dim += + (y_lod[ref_level][i] - y_lod[ref_level][i - 1]) * x_seq_len; + } + } + out_dims[0] = out_first_dim; + ctx->SetOutputDim("Out", out_dims); + } else { + out_dims[0] = -1; + ctx->SetOutputDim("Out", out_dims); + ctx->ShareLoD("X", /*->*/ "Out"); + } } }; @@ -42,83 +98,81 @@ class SequenceExpandOpMaker : public framework::OpProtoAndCheckerMaker { SequenceExpandOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", - "(Tensor or LoDTensor) The input(X) of this operator can be a " - "LoDTensor or a base Tensor."); + "(LoDTensor, default LoDTensor) A 2-D LoDTensor whose lod " + "level is at most 1."); AddInput("Y", - "(LoDTensor)The reference input(Y) of sequence_expand op." - "It must be a LoDTensor with k-level(k>0)." - "The input(X) will be expanded according to LOD of input(Y)." - "The element numbers of last level in input(Y) " - "must be equal to dims[0] of input(X)."); + "(LoDTensor, default LoDTensor) Referred LoDTensor whose " + "lod (specified level) is referred by Input(X)."); AddOutput("Out", - "(LodTensor)The output of sequence_expand op." - "The lod of output will be as same as input(Y)'s lod."); + "(LodTensor, default LoDTensor) Output LoDTensor which is " + "generated from Input(X) by referring lod of Input(Y)."); + AddAttr("ref_level", "Specify lod level of Input(Y).").SetDefault(-1); AddComment(R"DOC( Sequence Expand Operator. -This operator expands input(X) according to LOD of input(Y). +This operator expands `X` according to specified level lod of `Y`. Current +implementation constaints that lod level of `X` should be at most 1. Attribute +`ref_level` is used to specify which level lod of `Y` is referred to expand `X`. +If set `ref_level` to -1, then last level lod of `Y` would be referred. +Please note, rank of `X` should be at least 2, when the rank exceeds 2, `X` +would be viewed as a 2-D tensor. + Following are cases to better explain how this works: + Case 1: -Given a 2-level LoDTensor input(X) - X.lod = [[0, 2, 3], - [0, 1, 3, 4]] - X.data = [a, b, c, d] +Given a 1-level LoDTensor input(X) + X.lod = [[0, 2, 4]] + X.data = [[a], [b], [c], [d]] X.dims = [4, 1] and input(Y) Y.lod = [[0, 2, 4], [0, 3, 6, 7, 8]] -with condition len(Y.lod[-1]) -1 == X.dims[0] -then we get 2-level LoDTensor - Out.lod = [[0, 2, 4], - [0, 3, 6, 7, 8]] - Out.data = [a, a, a, b, b, b, c, d] +ref_level: 0 +then we get 1-level LoDTensor + Out.lod = [[0, 2, 4, 6, 8]] + Out.data = [[a], [b], [a], [b], [c], [d], [c], [d]] Out.dims = [8, 1] Case 2: +Given 1-level LoDTensor input(X) + X.lod = [[0, 1, 4]] + X.data = [[a], [b], [c], [d]] + X.dims = [4, 1] +and input(Y) + Y.lod = [[0, 2, 4], + [0, 3, 6, 6, 8]] +ref_level: 0 +then we get 1-level LoDTensor + Out.lod = [[0, 1, 2, 5, 8]] + Out.data = [[a], [a], [b], [c], [d], [b], [c], [d]] + Out.dims = [8, 1] + +Case 3: + Given a common Tensor input(X) - X.data = [a, b, c] + X.data = [[a], [b], [c]] X.dims = [3, 1] and input(Y) Y.lod = [[0, 2, 3, 6]] -with condition len(Y.lod[-1]) -1 == X.dims[0] -then we get 1-level LoDTensor - Out.lod = [[0, 2, 3, 6]] - Out.data = [a, a, b, c, c, c] +ref_level: -1 +then we get a common Tensor + Out.data = [[a], [a], [b], [c], [c], [c]] Out.dims = [6, 1] -Case 3: +Case 4: Given a common Tensor input(X) X.data = [[a, b], [c, d], [e, f]] X.dims = [3, 2] and input(Y) Y.lod = [[0, 2, 3, 6]] -with condition len(Y.lod[-1]) -1 == X.dims[0] -then we get 1-level LoDTensor - Out.lod = [[0, 2, 3, 6]] - Out.data = [[a,b], [a,b] [c,d], [e, f], [e, f], [e, f]] +ref_level: 0 +then we get a common LoDTensor + Out.data = [[a, b], [a, b] [c, d], [e, f], [e, f], [e, f]] Out.dims = [6, 2] -Case 4: - -Given 2-level a LoDTensor input(X) - X.lod = [[0, 2, 3], - [0, 1, 3, 4]] - X.data = [a, b, c, d] - X.dims = [4, 1] -and input(Y) - Y.lod = [[0, 2, 4], - [0, 3, 6, 6, 8]] -with condition len(Y.lod[-1]) -1 == X.dims[0] -then we get 2-level LoDTensor - Out.lod = [[0, 2, 4], - [0, 3, 6, 6, 8]] - Out.data = [a, a, a, b, b, b, d, d] - Out.dims = [8, 1] - - )DOC"); } }; @@ -129,12 +183,14 @@ class SequenceExpandOpGrad : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X")); - PADDLE_ENFORCE(ctx->HasInput("Out")); + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Out"), "Input(Out) should not be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "The input(Out@GRAD) should not be null"); + "Input(Out@GRAD) should not be null."); + auto x_dims = ctx->GetInputDim("X"); auto x_grad_name = framework::GradVarName("X"); + if (ctx->HasOutput(x_grad_name)) { ctx->SetOutputDim(x_grad_name, x_dims); } @@ -149,7 +205,13 @@ REGISTER_OP(sequence_expand, ops::SequenceExpandOp, ops::SequenceExpandOpMaker, sequence_expand_grad, ops::SequenceExpandOpGrad); REGISTER_OP_CPU_KERNEL( sequence_expand, - ops::SequenceExpandKernel); + ops::SequenceExpandKernel, + ops::SequenceExpandKernel, + ops::SequenceExpandKernel, + ops::SequenceExpandKernel); REGISTER_OP_CPU_KERNEL( sequence_expand_grad, - ops::SequenceExpandGradKernel); + ops::SequenceExpandGradKernel, + ops::SequenceExpandGradKernel, + ops::SequenceExpandGradKernel, + ops::SequenceExpandGradKernel); diff --git a/paddle/fluid/operators/sequence_expand_op.cu b/paddle/fluid/operators/sequence_expand_op.cu index 26622d23afa1c703e237628bcb11db8f1da73210..bb51bb2902eea797de3449fcb6c8b52b4f0e7fbf 100644 --- a/paddle/fluid/operators/sequence_expand_op.cu +++ b/paddle/fluid/operators/sequence_expand_op.cu @@ -18,7 +18,14 @@ limitations under the License. */ namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( sequence_expand, - ops::SequenceExpandKernel); + ops::SequenceExpandKernel, + ops::SequenceExpandKernel, + ops::SequenceExpandKernel, + ops::SequenceExpandKernel); REGISTER_OP_CUDA_KERNEL( sequence_expand_grad, - ops::SequenceExpandGradKernel); + ops::SequenceExpandGradKernel, + ops::SequenceExpandGradKernel, + ops::SequenceExpandGradKernel, + ops::SequenceExpandGradKernel); diff --git a/paddle/fluid/operators/sequence_expand_op.h b/paddle/fluid/operators/sequence_expand_op.h index 76dde976db2d19e307ae7406be8280f9b4987187..db7d8bd6821fabd9714a160970558291ec47197f 100644 --- a/paddle/fluid/operators/sequence_expand_op.h +++ b/paddle/fluid/operators/sequence_expand_op.h @@ -16,45 +16,75 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memcpy.h" -#include "unsupported/Eigen/CXX11/Tensor" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; +template +using EigenMatrix = framework::EigenMatrix; template class SequenceExpandKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input("X"); - auto* out = context.Output("Out"); - const T* x_data = x->data(); - auto x_dims = x->dims(); auto* y = context.Input("Y"); - PADDLE_ENFORCE(!y->lod().empty(), "y should have lod"); - PADDLE_ENFORCE_EQ(static_cast(x_dims[0]), - y->lod().back().size() - 1, - "The size of last lod level in Input(Y)" - "must be equal to dims[0] of Input(X)."); - out->set_lod(y->lod()); - auto* place = - context.template device_context().eigen_device(); - size_t element_len = framework::product(x_dims) / x_dims[0]; - T* out_data = out->mutable_data(context.GetPlace()); - auto out_starts = out->lod().back(); - - for (size_t i = 0; i < out_starts.size() - 1; i++) { - int scale = out_starts[i + 1] - out_starts[i]; - Eigen::TensorMap< - Eigen::Tensor> - x_t(x_data, 1, element_len); - Eigen::TensorMap> - out_t(out_data, scale, element_len); - Eigen::array cast({{scale, 1}}); - out_t.device(*place) = x_t.broadcast(cast); - x_data += element_len; - out_data += element_len * scale; + auto* out = context.Output("Out"); + + int ref_level = context.Attr("ref_level"); + auto& x_lod = x->lod(); + auto& y_lod = y->lod(); + + if (ref_level == -1) ref_level = y_lod.size() - 1; + + out->mutable_data(context.GetPlace()); + + if (y_lod[ref_level].size() <= 1) { + framework::TensorCopy(*x, context.GetPlace(), out); + return; + } + + auto& out_lod = *out->mutable_lod(); + if (x_lod.size() == 1) { + out_lod.resize(1); + out_lod[0] = {0}; + } + + int out_offset = 0; + auto& eigen_place = + *context.template device_context().eigen_device(); + for (size_t i = 1; i < y_lod[ref_level].size(); ++i) { + int repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1]; + int x_start = i - 1; + int x_end = i; + if (x_lod.size() == 1) { + x_start = x_lod[0][i - 1]; + x_end = x_lod[0][i]; + } + int x_seq_len = x_end - x_start; + if (repeat_num > 0) { + auto x_sub_tensor = x->Slice(x_start, x_end); + x_sub_tensor.Resize({1, x_sub_tensor.numel()}); + int out_start = out_offset; + if (x_lod.size() == 1) { + out_start = out_lod[0][out_offset]; + } + auto out_sub_tensor = + out->Slice(out_start, out_start + x_seq_len * repeat_num); + out_sub_tensor.Resize({repeat_num, x_sub_tensor.dims()[1]}); + EigenMatrix::From(out_sub_tensor).device(eigen_place) = + EigenMatrix::From(x_sub_tensor) + .broadcast(Eigen::array({{repeat_num, 1}})); + } + for (int j = 0; j < repeat_num; ++j) { + if (x_lod.size() == 1) { + out_lod[0].push_back(out_lod[0].back() + x_seq_len); + } + out_offset++; + } } } }; @@ -75,27 +105,51 @@ template class SequenceExpandGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* d_out = context.Input(framework::GradVarName("Out")); + auto* g_out = context.Input(framework::GradVarName("Out")); auto* x = context.Input("X"); - auto* out = context.Input("Out"); - auto* d_x = context.Output(framework::GradVarName("X")); - auto out_last_level = out->lod().back(); - d_x->set_lod(x->lod()); - const T* d_out_data = d_out->data(); - T* d_x_data = d_x->mutable_data(context.GetPlace()); - size_t element_len = d_out->numel() / d_out->dims()[0]; - for (size_t i = 0; i < out_last_level.size() - 1; ++i) { - size_t repeat = out_last_level[i + 1] - out_last_level[i]; - Eigen::TensorMap< - Eigen::Tensor> - d_out_t(d_out_data, static_cast(repeat), element_len); - Eigen::TensorMap> - d_x_t(d_x_data, static_cast(element_len)); - auto place = - context.template device_context().eigen_device(); - d_x_t.device(*place) = d_out_t.sum(Eigen::array({{0}})); - d_out_data += (repeat * element_len); - d_x_data += element_len; + auto* y = context.Input("Y"); + auto* g_x = context.Output(framework::GradVarName("X")); + int ref_level = context.Attr("ref_level"); + + g_x->mutable_data(context.GetPlace()); + g_x->set_lod(x->lod()); + + auto& x_lod = x->lod(); + auto& y_lod = y->lod(); + + if (ref_level == -1) ref_level = y_lod.size() - 1; + + // just copy the gradient + if (y_lod[ref_level].size() <= 1) { + framework::TensorCopy(*g_out, context.GetPlace(), g_x); + return; + } + + auto& dev_ctx = context.template device_context(); + + math::SetConstant set_zero; + set_zero(dev_ctx, g_x, static_cast(0)); + + int g_out_offset = 0; + for (size_t i = 1; i < y_lod[ref_level].size(); ++i) { + int repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1]; + if (repeat_num > 0) { + int x_start = i - 1; + int x_end = i; + if (x_lod.size() == 1) { + x_start = x_lod[0][i - 1]; + x_end = x_lod[0][i]; + } + int x_seq_len = x_end - x_start; + auto g_x_sub = g_x->Slice(x_start, x_end); + g_x_sub.Resize(flatten_to_1d(g_x_sub.dims())); + int g_out_end = g_out_offset + repeat_num * x_seq_len; + auto g_out_sub = g_out->Slice(g_out_offset, g_out_end); + g_out_sub.Resize({repeat_num, g_x_sub.dims()[0]}); + math::ColwiseSum col_sum; + col_sum(dev_ctx, g_out_sub, &g_x_sub); + g_out_offset += repeat_num * x_seq_len; + } } } }; diff --git a/paddle/fluid/operators/sequence_softmax_cudnn_op.cu.cc b/paddle/fluid/operators/sequence_softmax_cudnn_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..5661f4b42f37fed7f589c515e25fd66cfcede2c7 --- /dev/null +++ b/paddle/fluid/operators/sequence_softmax_cudnn_op.cu.cc @@ -0,0 +1,105 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/softmax.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; + +template +class SequenceSoftmaxCUDNNKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* x = ctx.Input("X"); + auto* out = ctx.Output("Out"); + + auto lod = x->lod(); + auto dims = x->dims(); + + const size_t level = lod.size() - 1; + PADDLE_ENFORCE_EQ(dims[0], static_cast(lod[level].back()), + "The first dimension of Input(X) should be equal to the " + "sum of all sequences' lengths."); + PADDLE_ENFORCE_EQ(dims[0], x->numel(), + "The width of each timestep in Input(X) of " + "SequenceSoftmaxOp should be 1."); + + out->mutable_data(ctx.GetPlace()); + for (int i = 0; i < static_cast(lod[level].size()) - 1; ++i) { + int start_pos = static_cast(lod[level][i]); + int end_pos = static_cast(lod[level][i + 1]); + Tensor x_i = x->Slice(start_pos, end_pos); + Tensor out_i = out->Slice(start_pos, end_pos); + + // Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos) + framework::DDim dims_i = + // framework::make_ddim({1UL, end_pos - start_pos, 1UL, 1UL}); + framework::make_ddim({1UL, end_pos - start_pos}); + x_i.Resize(dims_i); + out_i.Resize(dims_i); + math::SoftmaxCUDNNFunctor()( + ctx.template device_context(), &x_i, + &out_i); + } + } +}; + +template +class SequenceSoftmaxGradCUDNNKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* out = ctx.Input("Out"); + auto* out_grad = ctx.Input(framework::GradVarName("Out")); + auto* x = ctx.Input("X"); + auto* x_grad = ctx.Output(framework::GradVarName("X")); + + auto lod = x->lod(); + const size_t level = lod.size() - 1; + + x_grad->mutable_data(ctx.GetPlace()); + for (int i = 0; i < static_cast(lod[level].size()) - 1; ++i) { + int start_pos = static_cast(lod[level][i]); + int end_pos = static_cast(lod[level][i + 1]); + + Tensor out_i = out->Slice(start_pos, end_pos); + Tensor out_grad_i = out_grad->Slice(start_pos, end_pos); + Tensor x_grad_i = x_grad->Slice(start_pos, end_pos); + + // Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos) + framework::DDim dims_i = framework::make_ddim({1UL, end_pos - start_pos}); + out_i.Resize(dims_i); + out_grad_i.Resize(dims_i); + x_grad_i.Resize(dims_i); + math::SoftmaxGradCUDNNFunctor()( + ctx.template device_context(), &out_i, + &out_grad_i, &x_grad_i); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_KERNEL(sequence_softmax, CUDNN, ::paddle::platform::CUDAPlace, + ops::SequenceSoftmaxCUDNNKernel, + ops::SequenceSoftmaxCUDNNKernel) +REGISTER_OP_KERNEL(sequence_softmax_grad, CUDNN, ::paddle::platform::CUDAPlace, + ops::SequenceSoftmaxGradCUDNNKernel, + ops::SequenceSoftmaxGradCUDNNKernel) diff --git a/paddle/fluid/operators/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_softmax_op.cc index 7e685eb3dc7b12ef38f06b37d99a1212cfbc992c..e8b4df04286d327f568f4c43886f9fcf89cc4a88 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_softmax_op.cc @@ -29,6 +29,29 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Out"); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + // choose cudnn kernel if the runtime supported. + bool use_cudnn = ctx.Attr("use_cudnn"); + bool runtime_cudnn_support = false; +#ifdef PADDLE_WITH_CUDA + if (platform::is_gpu_place(ctx.GetPlace())) { + auto& dev_ctx = + ctx.template device_context(); + runtime_cudnn_support = dev_ctx.cudnn_handle() != nullptr ? true : false; + } +#endif + framework::LibraryType library_ = framework::LibraryType::kPlain; + if (use_cudnn && runtime_cudnn_support) { + library_ = framework::LibraryType::kCUDNN; + } + std::string data_format = ctx.Attr("data_format"); + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + framework::StringToDataLayout(data_format), library_); + } }; class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { @@ -41,6 +64,17 @@ class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "(LoDTensor) 1-D or 2-D output LoDTensor with the 2-nd dimension " "of length 1."); + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(false); + AddAttr( + "data_format", + "(string, default NCHW) Only used in " + "An optional string from: \"NHWC\", \"NCHW\". " + "Defaults to \"NHWC\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault("AnyLayout"); AddComment(R"DOC( Sequence Softmax Operator. @@ -91,6 +125,29 @@ class SequenceSoftmaxGradOp : public framework::OperatorWithKernel { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + // choose cudnn kernel if the runtime supported. + bool use_cudnn = ctx.Attr("use_cudnn"); + bool runtime_cudnn_support = false; +#ifdef PADDLE_WITH_CUDA + if (platform::is_gpu_place(ctx.GetPlace())) { + auto& dev_ctx = + ctx.template device_context(); + runtime_cudnn_support = dev_ctx.cudnn_handle() != nullptr ? true : false; + } +#endif + framework::LibraryType library_ = framework::LibraryType::kPlain; + if (use_cudnn && runtime_cudnn_support) { + library_ = framework::LibraryType::kCUDNN; + } + std::string data_format = ctx.Attr("data_format"); + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + framework::StringToDataLayout(data_format), library_); + } }; } // namespace operators @@ -102,7 +159,9 @@ REGISTER_OP(sequence_softmax, ops::SequenceSoftmaxOp, ops::SequenceSoftmaxGradOp); REGISTER_OP_CPU_KERNEL( sequence_softmax, - ops::SequenceSoftmaxKernel); + ops::SequenceSoftmaxKernel, + ops::SequenceSoftmaxKernel); REGISTER_OP_CPU_KERNEL( sequence_softmax_grad, - ops::SequenceSoftmaxGradKernel); + ops::SequenceSoftmaxGradKernel, + ops::SequenceSoftmaxGradKernel); diff --git a/paddle/fluid/operators/sequence_softmax_op.cu.cc b/paddle/fluid/operators/sequence_softmax_op.cu.cc index 295c68c5b936d6522666a4cc4e621db6f5f5f3ed..57adea3a1b9dbcbb5787d005e4d3ec595f61d4b2 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cu.cc +++ b/paddle/fluid/operators/sequence_softmax_op.cu.cc @@ -17,7 +17,10 @@ limitations under the License. */ namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( sequence_softmax, - ops::SequenceSoftmaxKernel) + ops::SequenceSoftmaxKernel, + ops::SequenceSoftmaxKernel) REGISTER_OP_CUDA_KERNEL( sequence_softmax_grad, - ops::SequenceSoftmaxGradKernel); + ops::SequenceSoftmaxGradKernel, + ops::SequenceSoftmaxGradKernel); diff --git a/paddle/fluid/operators/softmax_cudnn_op.cu.cc b/paddle/fluid/operators/softmax_cudnn_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..5596fa0648ccc151bc0d11de9c556599428a8d71 --- /dev/null +++ b/paddle/fluid/operators/softmax_cudnn_op.cu.cc @@ -0,0 +1,64 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/math/softmax.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class SoftmaxCUDNNKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* X = context.Input("X"); + auto* Out = context.Output("Out"); + + // allocate memory on device. + Out->mutable_data(context.GetPlace()); + + math::SoftmaxCUDNNFunctor()( + context.template device_context(), X, Out); + } +}; + +template +class SoftmaxGradCUDNNKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* Out = context.Input("Out"); + auto* dOut = context.Input(framework::GradVarName("Out")); + auto* dX = context.Output(framework::GradVarName("X")); + + // allocate memory on device. + dX->mutable_data(context.GetPlace()); + + math::SoftmaxGradCUDNNFunctor()( + context.template device_context(), Out, + dOut, dX); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +namespace plat = paddle::platform; +REGISTER_OP_KERNEL(softmax, CUDNN, plat::CUDAPlace, + ops::SoftmaxCUDNNKernel, + ops::SoftmaxCUDNNKernel); +REGISTER_OP_KERNEL(softmax_grad, CUDNN, plat::CUDAPlace, + ops::SoftmaxGradCUDNNKernel); diff --git a/paddle/fluid/operators/softmax_mkldnn_op.cc b/paddle/fluid/operators/softmax_mkldnn_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..cf0244e8662e827a90d8472a097315680579ff6d --- /dev/null +++ b/paddle/fluid/operators/softmax_mkldnn_op.cc @@ -0,0 +1,84 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "mkldnn.hpp" +#include "paddle/fluid/operators/softmax_op.h" +#include "paddle/fluid/platform/mkldnn_helper.h" + +#include + +namespace paddle { +namespace operators { + +using paddle::framework::Tensor; +using paddle::platform::MKLDNNDeviceContext; +using paddle::platform::MKLDNNMemDesc; + +using mkldnn::memory; // Note: paddle has also "memory" namespace +using mkldnn::primitive; +using mkldnn::softmax_forward; +using mkldnn::prop_kind; +using mkldnn::stream; + +template +class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { + public: + void Compute(const paddle::framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), + "It must use CPUPlace."); + auto& dev_ctx = ctx.template device_context(); + auto mkldnn_engine = dev_ctx.GetEngine(); + const Tensor* input = ctx.Input("X"); + Tensor* output = ctx.Output("Out"); + PADDLE_ENFORCE(input->dims().size() == 2UL, + "The input of softmax op must be a 2D matrix."); + const T* input_data = input->data(); + // allocate memory for output + T* output_data = output->mutable_data(ctx.GetPlace()); + std::vector src_tz = paddle::framework::vectorize2int(input->dims()); + std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + // MKL-DNN does support softmax over selected axis. Having 2D Tensor, + // we will make normalization after final eg. axis: 1 + PADDLE_ENFORCE(((src_tz[0] == dst_tz[0]) && (src_tz[1] == dst_tz[1])), + "Softmax input and output dimensions should match"); + // Same memory descriptor to be used for input and output + memory::dims softmax_tz = {src_tz[0], src_tz[1]}; + // Currently only supports NC data format + // TODO(jczaja-intel): support more formats + auto softmax_md = + MKLDNNMemDesc({softmax_tz}, memory::f32, memory::format::nc); + // Normalization is made after innermost dimension eg. C out of NC + auto softmax_desc = softmax_forward::desc(prop_kind::forward_scoring, + softmax_md, 1 /*dim: C*/); + // create memory primitives + auto softmax_src_memory = + memory({softmax_md, mkldnn_engine}, (void*)input_data); + auto softmax_dst_memory = + memory({softmax_md, mkldnn_engine}, (void*)output_data); + auto softmax_prim_desc = + softmax_forward::primitive_desc(softmax_desc, mkldnn_engine); + auto softmax = softmax_forward(softmax_prim_desc, softmax_src_memory, + softmax_dst_memory); + std::vector pipeline{softmax}; + stream(stream::kind::eager).submit(pipeline).wait(); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_KERNEL(softmax, MKLDNN, ::paddle::platform::CPUPlace, + ops::SoftmaxMKLDNNKernel); diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 09275ef290e8c78dc0902033e904cc4e7ccd7adb..e2c0f915d96b7746191572fa27b725d90cb6e2e5 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -13,7 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/softmax_op.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/cudnn_helper.h" +#endif +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif namespace paddle { namespace operators { @@ -33,8 +39,37 @@ class SoftmaxOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", x_dims); ctx->ShareLoD("X", /*->*/ "Out"); } -}; + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + // choose cudnn kernel if the runtime supported. + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kCUDNN; + } +#endif +#ifdef PADDLE_WITH_MKLDNN + if (library_ == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kMKLDNN; + } +#endif + + auto input_data_type = + framework::ToDataType(ctx.Input("X")->type()); + if (input_data_type == framework::proto::VarType::FP16) { + PADDLE_ENFORCE_EQ(library_, framework::LibraryType::kCUDNN, + "float16 can only be used when CUDNN is used"); + } + + std::string data_format = ctx.Attr("data_format"); + return framework::OpKernelType(input_data_type, ctx.GetPlace(), + framework::StringToDataLayout(data_format), + library_); + } +}; class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: SoftmaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) @@ -43,6 +78,20 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { "The input tensor of softmax. " "2-D with shape [batch_size, input_feature_dimensions]."); AddOutput("Out", "The normalized values with the same shape as X."); + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(false); + AddAttr( + "data_format", + "(string, default NCHW) Only used in " + "An optional string from: \"NHWC\", \"NCHW\". " + "Defaults to \"NHWC\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault("AnyLayout"); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); AddComment(R"DOC( Softmax Operator. @@ -80,6 +129,22 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + // choose cudnn kernel if the runtime supported. + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kCUDNN; + } +#endif + std::string data_format = ctx.Attr("data_format"); + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + framework::StringToDataLayout(data_format), library_); + } }; } // namespace operators diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index 39b246a5bedb2819fc9b7fc407cfe03e59af0b68..8f7840cee1dd95a828fd4ac8815e335a5db47e3d 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -23,21 +23,21 @@ using Tensor = framework::Tensor; namespace { template -__global__ void CrossEntropyGrad(T* logit_grad, const T* loss_grad, - const int64_t* labels, const int batch_size, - const int class_num) { - int tid = blockIdx.x * blockDim.x + threadIdx.x; - int sample_idx = tid / class_num; - - if (tid < batch_size) { - PADDLE_ASSERT(labels[sample_idx] >= 0 && labels[sample_idx] < class_num); - logit_grad[tid * class_num + labels[tid]] -= static_cast(1.); +__global__ void CrossEntropyGrad(T* logit_grad, const int64_t* labels, + const int batch_size, const int class_num) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < batch_size; + i += blockDim.x * gridDim.x) { + int idx = i * class_num + labels[i]; + logit_grad[idx] -= static_cast(1.); } +} - __syncthreads(); - - if (tid < batch_size * class_num) { - logit_grad[tid] *= loss_grad[sample_idx]; +template +__global__ void Scale(T* logit_grad, const T* loss_grad, const int num, + const int class_num) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; + i += blockDim.x * gridDim.x) { + logit_grad[i] *= loss_grad[i / class_num]; } } @@ -94,22 +94,22 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { const int batch_size = logit_grad->dims()[0]; const int class_num = logit_grad->dims()[1]; int block = 512; - int grid = (batch_size * class_num + block - 1) / block; + auto stream = context.cuda_device_context().stream(); if (context.Attr("soft_label")) { + int grid = (batch_size * class_num + block - 1) / block; const T* label_data = labels->data(); - SoftCrossEntropyGradientKernel< - T><<() - .stream()>>>(logit_grad_data, loss_grad_data, label_data, - batch_size, class_num); + SoftCrossEntropyGradientKernel<<>>( + logit_grad_data, loss_grad_data, label_data, batch_size, class_num); } else { + int grid = (batch_size + block - 1) / block; const int64_t* label_data = labels->data(); - CrossEntropyGrad< - T><<() - .stream()>>>(logit_grad_data, loss_grad_data, label_data, - batch_size, class_num); + CrossEntropyGrad<<>>( + logit_grad_data, label_data, batch_size, class_num); + int num = batch_size * class_num; + grid = (num + block - 1) / block; + Scale<<>>(logit_grad_data, loss_grad_data, num, + class_num); } } }; diff --git a/paddle/fluid/operators/target_assign_op.cc b/paddle/fluid/operators/target_assign_op.cc index a894b12fa35a121eff0b8f9d2d0eecc5ae5185f3..33ff967e5e8f5afbaa62ba39ce596687ae0a71cd 100644 --- a/paddle/fluid/operators/target_assign_op.cc +++ b/paddle/fluid/operators/target_assign_op.cc @@ -153,8 +153,8 @@ template struct NegTargetAssignFunctor, diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index 1842ecd745e3f5cb75600ce00d89018f81682632..7c604e14eb245232ed92f53a00b9bde45c2fbaec 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -19,6 +19,7 @@ limitations under the License. */ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/dynload/cudnn.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/macros.h" namespace paddle { @@ -80,11 +81,29 @@ enum class PoolingMode { template class CudnnDataType; +template <> +class CudnnDataType { + public: + static const cudnnDataType_t type = CUDNN_DATA_HALF; + // The scaling param type is float for HALF and FLOAT tensors + using ScalingParamType = const float; + using BatchNormParamType = float; + static ScalingParamType* kOne() { + static ScalingParamType v = 1.0; + return &v; + } + static ScalingParamType* kZero() { + static ScalingParamType v = 0.0; + return &v; + } +}; + template <> class CudnnDataType { public: static const cudnnDataType_t type = CUDNN_DATA_FLOAT; - typedef const float ScalingParamType; + using ScalingParamType = const float; + using BatchNormParamType = float; static ScalingParamType* kOne() { static ScalingParamType v = 1.0; return &v; @@ -99,7 +118,8 @@ template <> class CudnnDataType { public: static const cudnnDataType_t type = CUDNN_DATA_DOUBLE; - typedef const double ScalingParamType; + using ScalingParamType = const double; + using BatchNormParamType = double; static ScalingParamType* kOne() { static ScalingParamType v = 1.0; return &v; @@ -289,7 +309,7 @@ inline bool CanCUDNNBeUsed(const framework::ExecutionContext& ctx) { use_cudnn &= paddle::platform::is_gpu_place(ctx.GetPlace()); #ifdef PADDLE_WITH_CUDA if (use_cudnn) { - auto& dev_ctx = ctx.template device_context(); + auto& dev_ctx = ctx.device_context(); use_cudnn &= dev_ctx.cudnn_handle() != nullptr; } #endif diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 98b4178177b0a8bafd6fe34a92be2a07a2fbc5a7..59b76a1edb5ec5900520fbccb6a6f8f6e7a70aa4 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -10,43 +10,45 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/platform/device_context.h" +#include #include "paddle/fluid/memory/memory.h" - namespace paddle { namespace platform { DeviceContextPool* DeviceContextPool::pool = nullptr; -const platform::DeviceContext* DeviceContextPool::Get( - const platform::Place& place) { +platform::DeviceContext* DeviceContextPool::Get(const platform::Place& place) { auto it = device_contexts_.find(place); if (it == device_contexts_.end()) { PADDLE_THROW( "'Place' is not supported, Please re-compile with WITH_GPU " "option"); } - return it->second; + return it->second.get(); } DeviceContextPool::DeviceContextPool( const std::vector& places) { PADDLE_ENFORCE_GT(places.size(), 0); - for (size_t i = 0; i < places.size(); i++) { - if (platform::is_cpu_place(places[i])) { + using PtrType = std::unique_ptr; + std::unordered_set set; + for (auto& p : places) { + set.insert(p); + } + + for (auto& p : set) { + if (platform::is_cpu_place(p)) { #ifdef PADDLE_WITH_MKLDNN - device_contexts_.emplace(places[i], - new platform::MKLDNNDeviceContext( - boost::get(places[i]))); + device_contexts_.emplace( + p, PtrType(new MKLDNNDeviceContext(boost::get(p)))); #else - device_contexts_.emplace(places[i], - new platform::CPUDeviceContext( - boost::get(places[i]))); + device_contexts_.emplace( + p, PtrType(new CPUDeviceContext(boost::get(p)))); #endif - } else if (platform::is_gpu_place(places[i])) { + } else if (platform::is_gpu_place(p)) { #ifdef PADDLE_WITH_CUDA - device_contexts_.emplace(places[i], - new platform::CUDADeviceContext( - boost::get(places[i]))); + device_contexts_.emplace( + p, PtrType(new CUDADeviceContext(boost::get(p)))); #else PADDLE_THROW( "'CUDAPlace' is not supported, Please re-compile with WITH_GPU " @@ -159,6 +161,7 @@ CUDADeviceContext::~CUDADeviceContext() { Place CUDADeviceContext::GetPlace() const { return place_; } void CUDADeviceContext::Wait() const { + std::lock_guard guard(mutex_); PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); PADDLE_ENFORCE(cudaGetLastError()); } diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 603b890af13b529c490c29112a73a09cc815d07a..202394c7be7e103a609dd0999fc883c794ef0edd 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -103,6 +103,7 @@ class CUDADeviceContext : public DeviceContext { std::unique_ptr eigen_device_; std::unique_ptr eigen_stream_; + mutable std::mutex mutex_; cudaStream_t stream_; cudnnHandle_t cudnn_handle_; cublasHandle_t cublas_handle_; @@ -159,7 +160,7 @@ class DeviceContextPool { } /*! \brief Return handle of single device context. */ - const platform::DeviceContext* Get(const platform::Place& place); + platform::DeviceContext* Get(const platform::Place& place); template const typename DefaultDeviceContextType::TYPE* GetByPlace( @@ -172,19 +173,8 @@ class DeviceContextPool { private: static DeviceContextPool* pool; - constexpr static int LEFT_SHIFT = 8; - struct Hash { - std::hash hash_; - size_t operator()(const platform::Place& place) const { - int pre_hash = place.which() << LEFT_SHIFT; - if (platform::is_gpu_place(place)) { - pre_hash += boost::get(place).GetDeviceId(); - } - return hash_(pre_hash); - } - }; - std::unordered_map + std::unordered_map, PlaceHash> device_contexts_; DISABLE_COPY_AND_ASSIGN(DeviceContextPool); }; diff --git a/paddle/fluid/platform/device_tracer.cc b/paddle/fluid/platform/device_tracer.cc index 78e00d5420bbea40c9bea4be919ec4ce5ececdcb..3b4437f576e1c2e931a86ec6d5e823ec1f344c52 100644 --- a/paddle/fluid/platform/device_tracer.cc +++ b/paddle/fluid/platform/device_tracer.cc @@ -26,8 +26,14 @@ limitations under the License. */ namespace paddle { namespace platform { namespace { +// Current thread's id. Note, we don't distinguish nested threads +// for now. +thread_local int cur_thread_id = 0; +// Tracking the nested block stacks of each thread. +thread_local std::deque block_id_stack; +// Tracking the nested event stacks. +thread_local std::deque annotation_stack; -thread_local const char *cur_annotation = nullptr; std::once_flag tracer_once_flag; DeviceTracer *tracer = nullptr; } // namespace @@ -191,19 +197,19 @@ class DeviceTracerImpl : public DeviceTracer { correlations_[id] = anno; } - void AddCPURecords(const char *anno, uint64_t start_ns, uint64_t end_ns) { - if (!anno) { - // TODO(panyx0718): Currently, it doesn't support nested situation - // Up-level can be cleared by low-level and therefore get nullptr - // here. + void AddCPURecords(const std::string &anno, uint64_t start_ns, + uint64_t end_ns, int64_t device_id, int64_t thread_id) { + if (anno.empty()) { + VLOG(1) << "Empty timeline annotation."; return; } std::lock_guard l(trace_mu_); - cpu_records_.push_back(CPURecord{anno, start_ns, end_ns, 0}); + cpu_records_.push_back( + CPURecord{anno, start_ns, end_ns, device_id, thread_id}); } void AddMemRecords(const std::string &name, uint64_t start_ns, - uint64_t end_ns, uint32_t device_id, uint32_t stream_id, + uint64_t end_ns, int64_t device_id, int64_t stream_id, uint32_t correlation_id, uint64_t bytes) { // 0 means timestamp information could not be collected for the kernel. if (start_ns == 0 || end_ns == 0) { @@ -215,8 +221,8 @@ class DeviceTracerImpl : public DeviceTracer { stream_id, correlation_id, bytes}); } - void AddKernelRecords(uint64_t start, uint64_t end, uint32_t device_id, - uint32_t stream_id, uint32_t correlation_id) { + void AddKernelRecords(uint64_t start, uint64_t end, int64_t device_id, + int64_t stream_id, uint32_t correlation_id) { // 0 means timestamp information could not be collected for the kernel. if (start == 0 || end == 0) { VLOG(3) << correlation_id << " cannot be traced"; @@ -270,27 +276,30 @@ class DeviceTracerImpl : public DeviceTracer { continue; } auto *event = profile_pb.add_events(); + event->set_type(proto::Event::GPUKernel); event->set_name(correlations_.at(r.correlation_id)); event->set_start_ns(r.start_ns); event->set_end_ns(r.end_ns); - event->set_stream_id(r.stream_id); + event->set_sub_device_id(r.stream_id); event->set_device_id(r.device_id); } for (const CPURecord &r : cpu_records_) { auto *event = profile_pb.add_events(); + event->set_type(proto::Event::CPU); event->set_name(r.name); event->set_start_ns(r.start_ns); event->set_end_ns(r.end_ns); - event->set_stream_id(r.thread_id); - event->set_device_id(-1); + event->set_sub_device_id(r.thread_id); + event->set_device_id(r.device_id); } for (const MemRecord &r : mem_records_) { auto *event = profile_pb.add_events(); + event->set_type(proto::Event::GPUKernel); event->set_name(r.name); event->set_start_ns(r.start_ns); event->set_end_ns(r.end_ns); - event->set_stream_id(r.stream_id); + event->set_sub_device_id(r.stream_id); event->set_device_id(r.device_id); event->mutable_memcopy()->set_bytes(r.bytes); } @@ -323,8 +332,9 @@ class DeviceTracerImpl : public DeviceTracer { if ((domain == CUPTI_CB_DOMAIN_DRIVER_API) && (cbid == CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel)) { if (cbInfo->callbackSite == CUPTI_API_ENTER) { - const std::string anno = - cur_annotation ? cur_annotation : cbInfo->symbolName; + const std::string anno = !annotation_stack.empty() + ? annotation_stack.back() + : cbInfo->symbolName; tracer->AddAnnotation(cbInfo->correlationId, anno); } } else { @@ -351,14 +361,15 @@ class DeviceTracerDummy : public DeviceTracer { void AddAnnotation(uint64_t id, const std::string &anno) {} - void AddCPURecords(const char *anno, uint64_t start_ns, uint64_t end_ns) {} + void AddCPURecords(const std::string &anno, uint64_t start_ns, + uint64_t end_ns, int64_t device_id, int64_t thread_id) {} void AddMemRecords(const std::string &name, uint64_t start_ns, - uint64_t end_ns, uint32_t device_id, uint32_t stream_id, + uint64_t end_ns, int64_t device_id, int64_t stream_id, uint32_t correlation_id, uint64_t bytes) {} - void AddKernelRecords(uint64_t start, uint64_t end, uint32_t device_id, - uint32_t stream_id, uint32_t correlation_id) {} + void AddKernelRecords(uint64_t start, uint64_t end, int64_t device_id, + int64_t stream_id, uint32_t correlation_id) {} bool IsEnabled() { return false; } @@ -384,11 +395,28 @@ DeviceTracer *GetDeviceTracer() { return tracer; } -void SetCurAnnotation(const char *anno) { cur_annotation = anno; } +void SetCurAnnotation(const std::string &anno) { + annotation_stack.push_back(anno); +} + +void ClearCurAnnotation() { annotation_stack.pop_back(); } + +std::string CurAnnotation() { + if (annotation_stack.empty()) return ""; + return annotation_stack.back(); +} + +void SetCurBlock(int block_id) { block_id_stack.push_back(block_id); } + +void ClearCurBlock() { block_id_stack.pop_back(); } + +int BlockDepth() { return block_id_stack.size(); } + +void SetCurThread(int thread_id) { cur_thread_id = thread_id; } -void ClearCurAnnotation() { cur_annotation = nullptr; } +void ClearCurThread() { cur_thread_id = 0; } -const char *CurAnnotation() { return cur_annotation; } +int CurThread() { return cur_thread_id; } } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/device_tracer.h b/paddle/fluid/platform/device_tracer.h index 23f7cdbdffc9f48ac5555455bf745233c81dd0cb..deb3d23f786353b8e7a2f28d094e364158885a34 100644 --- a/paddle/fluid/platform/device_tracer.h +++ b/paddle/fluid/platform/device_tracer.h @@ -32,22 +32,23 @@ class DeviceTracer { struct KernelRecord { uint64_t start_ns; uint64_t end_ns; - uint32_t device_id; - uint32_t stream_id; + int64_t device_id; + int64_t stream_id; uint32_t correlation_id; }; struct CPURecord { std::string name; uint64_t start_ns; uint64_t end_ns; - uint64_t thread_id; + int64_t device_id; + int64_t thread_id; }; struct MemRecord { std::string name; uint64_t start_ns; uint64_t end_ns; - uint32_t device_id; - uint32_t stream_id; + int64_t device_id; + int64_t stream_id; uint32_t correlation_id; uint64_t bytes; }; @@ -64,18 +65,18 @@ class DeviceTracer { virtual void AddAnnotation(uint64_t id, const std::string& anno) = 0; virtual void AddMemRecords(const std::string& name, uint64_t start_ns, - uint64_t end_ns, uint32_t device_id, - uint32_t stream_id, uint32_t correlation_id, + uint64_t end_ns, int64_t device_id, + int64_t stream_id, uint32_t correlation_id, uint64_t bytes) = 0; - virtual void AddCPURecords(const char* anno, uint64_t start_ns, - uint64_t end_ns) = 0; + virtual void AddCPURecords(const std::string& anno, uint64_t start_ns, + uint64_t end_ns, int64_t device_id, + int64_t thread_id) = 0; // Add a cuda kernel stats. `correlation_id` will be mapped to annotation // added before for human readability. - virtual void AddKernelRecords(uint64_t start, uint64_t end, - uint32_t device_id, uint32_t stream_id, - uint32_t correlation_id) = 0; + virtual void AddKernelRecords(uint64_t start, uint64_t end, int64_t device_id, + int64_t stream_id, uint32_t correlation_id) = 0; // Generate a proto after done (Disabled). virtual proto::Profile GenProfile(const std::string& profile_path) = 0; @@ -87,10 +88,18 @@ class DeviceTracer { DeviceTracer* GetDeviceTracer(); // Set a name for the cuda kernel operation being launched by the thread. -void SetCurAnnotation(const char* anno); +void SetCurAnnotation(const std::string& anno); // Clear the name after the operation is done. void ClearCurAnnotation(); // Current name of the operation being run in the thread. -const char* CurAnnotation(); +std::string CurAnnotation(); + +void SetCurBlock(int block_id); +void ClearCurBlock(); +int BlockDepth(); + +void SetCurThread(int thread_id); +void ClearCurThread(); +int CurThread(); } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/float16.h b/paddle/fluid/platform/float16.h index 52fb8c2531357ad7a2b2f8613e5c7fbcef52c6bb..2cf311c7e56a9bbb0bdb0078d5cfefb4bb50018b 100644 --- a/paddle/fluid/platform/float16.h +++ b/paddle/fluid/platform/float16.h @@ -483,9 +483,124 @@ DEVICE inline bool operator>=(const half& a, const half& b) { #endif // PADDLE_CUDA_FP16 -// Arithmetic operators on ARMv8.2-A CPU -#if defined(PADDLE_WITH_NATIVE_FP16) -HOST inline float16 operator+(const float16& a, const float16& b) { +// Arithmetic operators for float16 on GPU +#if defined(PADDLE_CUDA_FP16) +HOSTDEVICE inline float16 operator+(const float16& a, const float16& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return float16(__hadd(half(a), half(b))); +#else + return float16(float(a) + float(b)); +#endif +} + +HOSTDEVICE inline float16 operator-(const float16& a, const float16& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return float16(__hsub(half(a), half(b))); +#else + return float16(float(a) - float(b)); +#endif +} + +HOSTDEVICE inline float16 operator*(const float16& a, const float16& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return float16(__hmul(half(a), half(b))); +#else + return float16(float(a) * float(b)); +#endif +} + +HOSTDEVICE inline float16 operator/(const float16& a, const float16& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 + // TODO(kexinzhao): check which cuda version starts to support __hdiv + float num = __half2float(half(a)); + float denom = __half2float(half(b)); + return float16(num / denom); +#else + return float16(float(a) / float(b)); +#endif +} + +HOSTDEVICE inline float16 operator-(const float16& a) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return float16(__hneg(half(a))); +#else + float16 res; + res.x = a.x ^ 0x8000; + return res; +#endif +} + +HOSTDEVICE inline float16& operator+=(float16& a, const float16& b) { + a = a + b; + return a; +} + +HOSTDEVICE inline float16& operator-=(float16& a, const float16& b) { + a = a - b; + return a; +} + +HOSTDEVICE inline float16& operator*=(float16& a, const float16& b) { + a = a * b; + return a; +} + +HOSTDEVICE inline float16& operator/=(float16& a, const float16& b) { + a = a / b; + return a; +} + +HOSTDEVICE inline bool operator==(const float16& a, const float16& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __heq(half(a), half(b)); +#else + return float(a) == float(b); +#endif +} + +HOSTDEVICE inline bool operator!=(const float16& a, const float16& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hne(half(a), half(b)); +#else + return float(a) != float(b); +#endif +} + +HOSTDEVICE inline bool operator<(const float16& a, const float16& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hlt(half(a), half(b)); +#else + return float(a) < float(b); +#endif +} + +HOSTDEVICE inline bool operator<=(const float16& a, const float16& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hle(half(a), half(b)); +#else + return float(a) <= float(b); +#endif +} + +HOSTDEVICE inline bool operator>(const float16& a, const float16& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hgt(half(a), half(b)); +#else + return float(a) > float(b); +#endif +} + +HOSTDEVICE inline bool operator>=(const float16& a, const float16& b) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530 + return __hge(half(a), half(b)); +#else + return float(a) >= float(b); +#endif +} + +// Arithmetic operators for float16 on ARMv8.2-A CPU +#elif defined(PADDLE_WITH_NATIVE_FP16) +inline float16 operator+(const float16& a, const float16& b) { float16 res; asm volatile( "ld1 {v0.h}[0], [%[a_ptr]]\n" @@ -501,7 +616,7 @@ HOST inline float16 operator+(const float16& a, const float16& b) { return res; } -HOST inline float16 operator-(const float16& a, const float16& b) { +inline float16 operator-(const float16& a, const float16& b) { float16 res; asm volatile( "ld1 {v0.h}[0], [%[a_ptr]]\n" @@ -517,7 +632,7 @@ HOST inline float16 operator-(const float16& a, const float16& b) { return res; } -HOST inline float16 operator*(const float16& a, const float16& b) { +inline float16 operator*(const float16& a, const float16& b) { float16 res; asm volatile( "ld1 {v0.h}[0], [%[a_ptr]]\n" @@ -533,7 +648,7 @@ HOST inline float16 operator*(const float16& a, const float16& b) { return res; } -HOST inline float16 operator/(const float16& a, const float16& b) { +inline float16 operator/(const float16& a, const float16& b) { float16 res; asm volatile( "ld1 {v0.h}[0], [%[a_ptr]]\n" @@ -549,7 +664,7 @@ HOST inline float16 operator/(const float16& a, const float16& b) { return res; } -HOST inline float16 operator-(const float16& a) { +inline float16 operator-(const float16& a) { float16 res; asm volatile( "ld1 {v0.h}[0], [%[a_ptr]]\n" @@ -564,27 +679,27 @@ HOST inline float16 operator-(const float16& a) { return res; } -HOST inline float16& operator+=(float16& a, const float16& b) { +inline float16& operator+=(float16& a, const float16& b) { a = a + b; return a; } -HOST inline float16& operator-=(float16& a, const float16& b) { +inline float16& operator-=(float16& a, const float16& b) { a = a - b; return a; } -HOST inline float16& operator*=(float16& a, const float16& b) { +inline float16& operator*=(float16& a, const float16& b) { a = a * b; return a; } -HOST inline float16& operator/=(float16& a, const float16& b) { +inline float16& operator/=(float16& a, const float16& b) { a = a / b; return a; } -HOST inline bool operator==(const float16& a, const float16& b) { +inline bool operator==(const float16& a, const float16& b) { uint16_t res; asm volatile( "ld1 {v0.h}[0], [%[a_ptr]]\n" @@ -600,11 +715,9 @@ HOST inline bool operator==(const float16& a, const float16& b) { return (res & 0xffff) != 0; } -HOST inline bool operator!=(const float16& a, const float16& b) { - return !(a == b); -} +inline bool operator!=(const float16& a, const float16& b) { return !(a == b); } -HOST inline bool operator<(const float16& a, const float16& b) { +inline bool operator<(const float16& a, const float16& b) { uint16_t res; asm volatile( "ld1 {v1.h}[0], [%[a_ptr]]\n" @@ -620,7 +733,7 @@ HOST inline bool operator<(const float16& a, const float16& b) { return (res & 0xffff) != 0; } -HOST inline bool operator<=(const float16& a, const float16& b) { +inline bool operator<=(const float16& a, const float16& b) { uint16_t res; asm volatile( "ld1 {v1.h}[0], [%[a_ptr]]\n" @@ -636,7 +749,7 @@ HOST inline bool operator<=(const float16& a, const float16& b) { return (res & 0xffff) != 0; } -HOST inline bool operator>(const float16& a, const float16& b) { +inline bool operator>(const float16& a, const float16& b) { uint16_t res; asm volatile( "ld1 {v0.h}[0], [%[a_ptr]]\n" @@ -652,7 +765,7 @@ HOST inline bool operator>(const float16& a, const float16& b) { return (res & 0xffff) != 0; } -HOST inline bool operator>=(const float16& a, const float16& b) { +inline bool operator>=(const float16& a, const float16& b) { uint16_t res; asm volatile( "ld1 {v0.h}[0], [%[a_ptr]]\n" @@ -668,71 +781,71 @@ HOST inline bool operator>=(const float16& a, const float16& b) { return (res & 0xffff) != 0; } -// Arithmetic operators, software emulated on other CPU +// Arithmetic operators for float16, software emulated on other CPU #else -HOSTDEVICE inline float16 operator+(const float16& a, const float16& b) { +inline float16 operator+(const float16& a, const float16& b) { return float16(float(a) + float(b)); } -HOSTDEVICE inline float16 operator-(const float16& a, const float16& b) { +inline float16 operator-(const float16& a, const float16& b) { return float16(float(a) - float(b)); } -HOSTDEVICE inline float16 operator*(const float16& a, const float16& b) { +inline float16 operator*(const float16& a, const float16& b) { return float16(float(a) * float(b)); } -HOSTDEVICE inline float16 operator/(const float16& a, const float16& b) { +inline float16 operator/(const float16& a, const float16& b) { return float16(float(a) / float(b)); } -HOSTDEVICE inline float16 operator-(const float16& a) { +inline float16 operator-(const float16& a) { float16 res; res.x = a.x ^ 0x8000; return res; } -HOSTDEVICE inline float16& operator+=(float16& a, const float16& b) { +inline float16& operator+=(float16& a, const float16& b) { a = float16(float(a) + float(b)); return a; } -HOSTDEVICE inline float16& operator-=(float16& a, const float16& b) { +inline float16& operator-=(float16& a, const float16& b) { a = float16(float(a) - float(b)); return a; } -HOSTDEVICE inline float16& operator*=(float16& a, const float16& b) { +inline float16& operator*=(float16& a, const float16& b) { a = float16(float(a) * float(b)); return a; } -HOSTDEVICE inline float16& operator/=(float16& a, const float16& b) { +inline float16& operator/=(float16& a, const float16& b) { a = float16(float(a) / float(b)); return a; } -HOSTDEVICE inline bool operator==(const float16& a, const float16& b) { +inline bool operator==(const float16& a, const float16& b) { return float(a) == float(b); } -HOSTDEVICE inline bool operator!=(const float16& a, const float16& b) { +inline bool operator!=(const float16& a, const float16& b) { return float(a) != float(b); } -HOSTDEVICE inline bool operator<(const float16& a, const float16& b) { +inline bool operator<(const float16& a, const float16& b) { return float(a) < float(b); } -HOSTDEVICE inline bool operator<=(const float16& a, const float16& b) { +inline bool operator<=(const float16& a, const float16& b) { return float(a) <= float(b); } -HOSTDEVICE inline bool operator>(const float16& a, const float16& b) { +inline bool operator>(const float16& a, const float16& b) { return float(a) > float(b); } -HOSTDEVICE inline bool operator>=(const float16& a, const float16& b) { +inline bool operator>=(const float16& a, const float16& b) { return float(a) >= float(b); } #endif diff --git a/paddle/fluid/platform/place.h b/paddle/fluid/platform/place.h index 501bddfc6ec8b5d0bf554b0911c32e47fd51ec15..4cc8b377b8b671eb5a446ecbae21ba9628fbd2c8 100644 --- a/paddle/fluid/platform/place.h +++ b/paddle/fluid/platform/place.h @@ -65,6 +65,18 @@ bool is_cpu_place(const Place &); bool places_are_same_class(const Place &, const Place &); bool is_same_place(const Place &, const Place &); +struct PlaceHash { + std::size_t operator()(const Place &p) const { + constexpr size_t num_dev_bits = 4; + std::hash ihash; + size_t dev_id = 0; + if (is_gpu_place(p)) { + dev_id = boost::get(p).device; + } + return ihash(dev_id << num_dev_bits | p.which()); + } +}; + std::ostream &operator<<(std::ostream &, const Place &); template diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 28ef3e04b1c50e0d42eeb27608259c6449429da5..b25206ff35cc87dcdd363bc0de54530f629d73ed 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -147,19 +147,48 @@ RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx) name_ = name; PushEvent(name_, dev_ctx_); // Maybe need the same push/pop behavior. - SetCurAnnotation(name_.c_str()); + SetCurAnnotation(name_); } RecordEvent::~RecordEvent() { if (g_state == ProfilerState::kDisabled) return; DeviceTracer* tracer = GetDeviceTracer(); if (tracer) { - tracer->AddCPURecords(CurAnnotation(), start_ns_, PosixInNsec()); + tracer->AddCPURecords(CurAnnotation(), start_ns_, PosixInNsec(), + BlockDepth(), CurThread()); } ClearCurAnnotation(); PopEvent(name_, dev_ctx_); } +RecordBlock::RecordBlock(int block_id) : start_ns_(PosixInNsec()) { + if (g_state == ProfilerState::kDisabled) return; + SetCurBlock(block_id); + name_ = string::Sprintf("block_%d", block_id); +} + +RecordBlock::~RecordBlock() { + if (g_state == ProfilerState::kDisabled) return; + DeviceTracer* tracer = GetDeviceTracer(); + if (tracer) { + // We try to put all blocks at the same nested depth in the + // same timeline lane. and distinguish the using thread_id. + tracer->AddCPURecords(name_, start_ns_, PosixInNsec(), BlockDepth(), + CurThread()); + } + ClearCurBlock(); +} + +RecordThread::RecordThread(int thread_id) { + if (g_state == ProfilerState::kDisabled) return; + SetCurThread(thread_id); +} + +RecordThread::~RecordThread() { + if (g_state == ProfilerState::kDisabled) return; + ClearCurThread(); +} + void EnableProfiler(ProfilerState state) { PADDLE_ENFORCE(state != ProfilerState::kDisabled, "Can't enbale profling, since the input state is ", diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index 3542ce6cda87e3b013d60393e4ba93da61921940..de9a5cc20d76bf84778e0933831f218abb66c465 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -118,6 +118,20 @@ struct RecordEvent { std::string full_name_; }; +struct RecordBlock { + explicit RecordBlock(int block_id); + ~RecordBlock(); + + private: + std::string name_; + uint64_t start_ns_; +}; + +struct RecordThread { + explicit RecordThread(int thread_id); + ~RecordThread(); +}; + // Return the event list of all threads. Assumed the returned value calls // event_lists, event_lists[i][j] represents the j-th Event of i-th thread. std::vector> GetAllEvents(); diff --git a/paddle/fluid/platform/profiler.proto b/paddle/fluid/platform/profiler.proto index 71b5a9b12ef4a045ebfd3ee3d06ee25032083ff5..7b42aa785ec6ad5731e3adee1e9f189127a826a1 100644 --- a/paddle/fluid/platform/profiler.proto +++ b/paddle/fluid/platform/profiler.proto @@ -18,12 +18,17 @@ package paddle.platform.proto; message MemCopy { optional uint64 bytes = 1; } message Event { + enum EventType { + CPU = 0; + GPUKernel = 1; + } + optional EventType type = 8; optional string name = 1; optional uint64 start_ns = 2; optional uint64 end_ns = 3; // When positive, it represents gpu id. When -1, it represents CPU. optional int64 device_id = 5; - optional uint32 stream_id = 6; + optional int64 sub_device_id = 6; optional MemCopy memcopy = 7; } diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index d62f34030894e2fa21925bbc44e24b4e7d738d15..fe991033dfc2a6ccc66b0ca5588fe8f808d1eb43 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -1,9 +1,16 @@ if(WITH_PYTHON) - cc_library(paddle_pybind SHARED - SRCS pybind.cc exception.cc protobuf.cc const_value.cc - DEPS pybind python backward proto_desc paddle_memory executor prune init profiler feed_fetch_method - ${GLOB_OP_LIB}) - if(NOT APPLE AND NOT ANDROID) - target_link_libraries(paddle_pybind rt) - endif(NOT APPLE AND NOT ANDROID) + if(WITH_AMD_GPU) + hip_library(paddle_pybind SHARED + SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc + DEPS pybind python backward proto_desc paddle_memory executor prune init profiler feed_fetch_method + ${GLOB_OP_LIB}) + else() + cc_library(paddle_pybind SHARED + SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc + DEPS pybind python backward proto_desc paddle_memory executor prune init profiler feed_fetch_method + ${GLOB_OP_LIB}) + if(NOT APPLE AND NOT ANDROID) + target_link_libraries(paddle_pybind rt) + endif(NOT APPLE AND NOT ANDROID) + endif(WITH_AMD_GPU) endif(WITH_PYTHON) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index ac7d1efb577505b70e10a70cdcfd3ed9c5fe1f5c..6c05442466f5f3d8e04a8f0a2206443b1007a107 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -26,16 +26,20 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/prune.h" +#include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/operators/cond_op.h" #include "paddle/fluid/operators/net_op.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/pybind/const_value.h" #include "paddle/fluid/pybind/exception.h" #include "paddle/fluid/pybind/pybind.h" +#include "paddle/fluid/pybind/recordio.h" #include "paddle/fluid/pybind/tensor_py.h" + #include "paddle/fluid/string/to_string.h" #ifdef PADDLE_WITH_CUDA @@ -100,12 +104,14 @@ PYBIND11_PLUGIN(core) { .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) + .def("set", PyCPUTensorSetFromArray) #ifdef PADDLE_WITH_CUDA .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) + .def("set", PyCUDATensorSetFromArray) #endif .def("shape", [](Tensor &self) { return vectorize(self.dims()); }) .def("set_float_element", TensorSetElement) @@ -217,8 +223,18 @@ All parameter, weight, gradient are variables in Paddle. [](Variable &self) -> operators::NetOp * { return self.GetMutable(); }, + py::return_value_policy::reference) + .def("get_reader", + [](Variable &self) -> framework::ReaderHolder * { + PADDLE_ENFORCE(self.IsType()); + return self.GetMutable(); + }, py::return_value_policy::reference); + py::class_(m, "Reader", "") + .def("has_next", &framework::ReaderHolder::HasNext) + .def("reset", &framework::ReaderHolder::ReInit); + py::class_(m, "Scope", "") .def("var", [](Scope &self, const std::string &name) -> Variable * { @@ -302,7 +318,6 @@ All parameter, weight, gradient are variables in Paddle. #endif }); // clang-format on - #ifdef PADDLE_WITH_CUDA py::class_(m, "Communicator").def(py::init<>()); #endif @@ -410,6 +425,12 @@ All parameter, weight, gradient are variables in Paddle. m.def("init_devices", &framework::InitDevices); m.def("is_compiled_with_cuda", IsCompiledWithCUDA); +#ifdef PADDLE_WITH_CUDA + m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool { + // Only GPUs with Compute Capability >= 53 support float16 + return platform::GetCUDAComputeCapability(place.device) >= 53; + }); +#endif m.def("set_feed_variable", framework::SetFeedVariable); m.def("get_fetch_variable", framework::GetFetchVariable); @@ -474,6 +495,8 @@ All parameter, weight, gradient are variables in Paddle. m.def("enable_profiler", platform::EnableProfiler); m.def("disable_profiler", platform::DisableProfiler); m.def("reset_profiler", platform::ResetProfiler); + + BindRecordIOWriter(m); return m.ptr(); } } // namespace pybind diff --git a/paddle/fluid/pybind/recordio.cc b/paddle/fluid/pybind/recordio.cc new file mode 100644 index 0000000000000000000000000000000000000000..16f8bfb1a2e3a840670594d3cc2970e690dce891 --- /dev/null +++ b/paddle/fluid/pybind/recordio.cc @@ -0,0 +1,70 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/pybind/recordio.h" +#include +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/recordio/writer.h" + +namespace paddle { +namespace pybind { + +class RecordIOWriter { + public: + RecordIOWriter(const std::string& filename, recordio::Compressor compressor, + size_t max_num_record) + : stream_(filename), writer_(&stream_, compressor, max_num_record) {} + + void AppendTensor(const framework::LoDTensor& tensor) { + tensors_.push_back(tensor); + } + + void CompleteAppendTensor() { + auto& ctx = + *platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); + framework::WriteToRecordIO(writer_, tensors_, ctx); + tensors_.clear(); + } + + void Close() { + PADDLE_ENFORCE(tensors_.empty()); + writer_.Flush(); + stream_.close(); + } + + private: + std::vector tensors_; + std::ofstream stream_; + recordio::Writer writer_; +}; + +void BindRecordIOWriter(py::module& m) { + py::class_ writer(m, "RecordIOWriter", ""); + py::enum_(writer, "Compressor", "") + .value("Snappy", recordio::Compressor::kSnappy) + .value("NoCompress", recordio::Compressor::kNoCompress); + + writer + .def("__init__", + [](RecordIOWriter& self, const std::string& filename, + recordio::Compressor compressor, size_t max_num_record) { + new (&self) RecordIOWriter(filename, compressor, max_num_record); + }) + .def("append_tensor", &RecordIOWriter::AppendTensor) + .def("complete_append_tensor", &RecordIOWriter::CompleteAppendTensor) + .def("close", &RecordIOWriter::Close); +} + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/recordio.h b/paddle/fluid/pybind/recordio.h new file mode 100644 index 0000000000000000000000000000000000000000..60e6a9e8595614b38375fca8c13d520739af9aaf --- /dev/null +++ b/paddle/fluid/pybind/recordio.h @@ -0,0 +1,26 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "pybind11/pybind11.h" +#include "pybind11/stl.h" + +namespace py = pybind11; + +namespace paddle { +namespace pybind { + +extern void BindRecordIOWriter(py::module& m); +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 1b0916ea0370d95a0c7dd149ee3f7b294c5e2351..6f8c597f8e610594851c318c122563523e4e7ea6 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -17,6 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/float16.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" @@ -71,27 +72,39 @@ struct CastToPyBufferImpl { paddle::platform::GpuMemcpyAsync( dst_ptr, src_ptr, sizeof(CUR_TYPE) * tensor.numel(), cudaMemcpyDeviceToHost, dev_ctx->stream()); + dev_ctx->Wait(); #else PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); #endif } else if (paddle::platform::is_cpu_place(tensor.place())) { dst_tensor = tensor; } - return py::buffer_info(dst_tensor.data(), sizeof(CUR_TYPE), - py::format_descriptor::format(), - (size_t)framework::arity(dst_tensor.dims()), - dims_outside, strides); + + if (std::type_index(typeid(CUR_TYPE)) == + std::type_index(typeid(platform::float16))) { + return py::buffer_info(dst_tensor.data(), sizeof(CUR_TYPE), + "e", /* np.dtype('e') == np.float16 */ + (size_t)framework::arity(dst_tensor.dims()), + dims_outside, strides); + } else { + return py::buffer_info(dst_tensor.data(), sizeof(CUR_TYPE), + py::format_descriptor::format(), + (size_t)framework::arity(dst_tensor.dims()), + dims_outside, strides); + } } else { constexpr bool less = I + 1 < std::tuple_size>::value; return CastToPyBufferImpl()(tensor); } } }; + } // namespace details + inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { auto buffer_info = - details::CastToPyBufferImpl()( - tensor); + details::CastToPyBufferImpl()(tensor); return buffer_info; } @@ -136,6 +149,22 @@ void PyCPUTensorSetFromArray( std::memcpy(dst, array.data(), sizeof(T) * array.size()); } +template <> +void PyCPUTensorSetFromArray( + framework::Tensor &self, + py::array_t array, + paddle::platform::CPUPlace &place) { + std::vector dims; + dims.reserve(array.ndim()); + for (size_t i = 0; i < array.ndim(); ++i) { + dims.push_back((int)array.shape()[i]); + } + + self.Resize(framework::make_ddim(dims)); + auto *dst = self.mutable_data(place); + std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size()); +} + #ifdef PADDLE_WITH_CUDA template void PyCUDATensorSetFromArray( @@ -157,6 +186,28 @@ void PyCUDATensorSetFromArray( paddle::platform::GpuMemcpyAsync(dst, array.data(), sizeof(T) * array.size(), cudaMemcpyHostToDevice, dev_ctx->stream()); } + +template <> +void PyCUDATensorSetFromArray( + framework::Tensor &self, + py::array_t array, + paddle::platform::CUDAPlace &place) { + std::vector dims; + dims.reserve(array.ndim()); + for (size_t i = 0; i < array.ndim(); ++i) { + dims.push_back((int)array.shape()[i]); + } + + self.Resize(framework::make_ddim(dims)); + auto *dst = self.mutable_data(place); + + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto dev_ctx = + static_cast(pool.Get(place)); + paddle::platform::GpuMemcpyAsync(dst, array.data(), + sizeof(uint16_t) * array.size(), + cudaMemcpyHostToDevice, dev_ctx->stream()); +} #endif } // namespace pybind diff --git a/paddle/fluid/recordio/CMakeLists.txt b/paddle/fluid/recordio/CMakeLists.txt index e1e7c2cdb3d0c960d5cd408420b5aaead73e70d7..92e97a6c85d7c8f01c8473feb9772f2285d49673 100644 --- a/paddle/fluid/recordio/CMakeLists.txt +++ b/paddle/fluid/recordio/CMakeLists.txt @@ -3,4 +3,7 @@ cc_library(header SRCS header.cc) cc_test(header_test SRCS header_test.cc DEPS header) cc_library(chunk SRCS chunk.cc DEPS snappystream snappy header zlib) cc_test(chunk_test SRCS chunk_test.cc DEPS chunk) -cc_library(recordio DEPS chunk header) +cc_library(writer SRCS writer.cc DEPS chunk) +cc_library(scanner SRCS scanner.cc DEPS chunk) +cc_test(writer_scanner_test SRCS writer_scanner_test.cc DEPS writer scanner) +cc_library(recordio DEPS chunk header writer scanner) diff --git a/paddle/fluid/recordio/chunk.cc b/paddle/fluid/recordio/chunk.cc index 587fd375c38ca83e1c65cb3ccc20b3509b6348c7..187a6a4ea7bd9d3a8ae48fa262e18f71b0f7d20d 100644 --- a/paddle/fluid/recordio/chunk.cc +++ b/paddle/fluid/recordio/chunk.cc @@ -24,33 +24,52 @@ namespace paddle { namespace recordio { constexpr size_t kMaxBufSize = 1024; +/** + * Read Stream by a fixed sized buffer. + * @param in input stream + * @param limit read at most `limit` bytes from input stream. 0 means no limit + * @param callback A function object with (const char* buf, size_t size) -> void + * as its type. + */ template -static void ReadStreamByBuf(std::istream& in, int limit, Callback callback) { +static void ReadStreamByBuf(std::istream& in, size_t limit, Callback callback) { char buf[kMaxBufSize]; std::streamsize actual_size; size_t counter = 0; - do { - auto actual_max = - limit > 0 ? std::min(limit - counter, kMaxBufSize) : kMaxBufSize; - actual_size = in.readsome(buf, actual_max); + size_t actual_max; + while (!in.eof() || + (limit != 0 && counter >= limit)) { // End of file or reach limit + actual_max = + limit != 0 ? std::min(limit - counter, kMaxBufSize) : kMaxBufSize; + in.read(buf, actual_max); + actual_size = in.gcount(); if (actual_size == 0) { break; } callback(buf, actual_size); - if (limit > 0) { + if (limit != 0) { counter += actual_size; } - } while (actual_size == kMaxBufSize); + } + in.clear(); // unset eof state } +/** + * Copy stream in to another stream + */ static void PipeStream(std::istream& in, std::ostream& os) { ReadStreamByBuf( - in, -1, [&os](const char* buf, size_t len) { os.write(buf, len); }); + in, 0, [&os](const char* buf, size_t len) { os.write(buf, len); }); } -static uint32_t Crc32Stream(std::istream& in, int limit = -1) { - auto crc = crc32(0, nullptr, 0); + +/** + * Calculate CRC32 from an input stream. + */ +static uint32_t Crc32Stream(std::istream& in, size_t limit = 0) { + uint32_t crc = static_cast(crc32(0, nullptr, 0)); ReadStreamByBuf(in, limit, [&crc](const char* buf, size_t len) { - crc = crc32(crc, reinterpret_cast(buf), len); + crc = static_cast(crc32( + crc, reinterpret_cast(buf), static_cast(len))); }); return crc; } @@ -85,28 +104,29 @@ bool Chunk::Write(std::ostream& os, Compressor ct) const { compressed_stream.reset(); } - auto end_pos = sout.tellg(); + sout.seekg(0, std::ios::end); + uint32_t len = static_cast(sout.tellg()); sout.seekg(0, std::ios::beg); - uint32_t len = static_cast(end_pos - sout.tellg()); uint32_t crc = Crc32Stream(sout); - sout.seekg(0, std::ios::beg); - Header hdr(static_cast(records_.size()), crc, ct, len); hdr.Write(os); + sout.seekg(0, std::ios::beg); + sout.clear(); PipeStream(sout, os); return true; } -void Chunk::Parse(std::istream& sin) { +bool Chunk::Parse(std::istream& sin) { Header hdr; - hdr.Parse(sin); + bool ok = hdr.Parse(sin); + if (!ok) { + return ok; + } auto beg_pos = sin.tellg(); - auto crc = Crc32Stream(sin, hdr.CompressSize()); + uint32_t crc = Crc32Stream(sin, hdr.CompressSize()); PADDLE_ENFORCE_EQ(hdr.Checksum(), crc); - Clear(); - - sin.seekg(beg_pos, std::ios::beg); + sin.seekg(beg_pos, sin.beg); std::unique_ptr compressed_stream; switch (hdr.CompressType()) { case Compressor::kNoCompress: @@ -126,8 +146,10 @@ void Chunk::Parse(std::istream& sin) { std::string buf; buf.resize(rec_len); stream.read(&buf[0], rec_len); + PADDLE_ENFORCE_EQ(rec_len, stream.gcount()); Add(buf); } + return true; } } // namespace recordio diff --git a/paddle/fluid/recordio/chunk.h b/paddle/fluid/recordio/chunk.h index 0ba9c63abbe72e7a51ddb1af5f0d206aa9f6cc5b..bf20ebd455c26ddeebeeea8db04cf7103b0c085f 100644 --- a/paddle/fluid/recordio/chunk.h +++ b/paddle/fluid/recordio/chunk.h @@ -26,9 +26,9 @@ namespace recordio { class Chunk { public: Chunk() : num_bytes_(0) {} - void Add(std::string buf) { - records_.push_back(buf); + void Add(const std::string& buf) { num_bytes_ += buf.size(); + records_.emplace_back(buf); } // dump the chunk into w, and clears the chunk and makes it ready for // the next add invocation. @@ -37,10 +37,15 @@ public: records_.clear(); num_bytes_ = 0; } - void Parse(std::istream& sin); - size_t NumBytes() { return num_bytes_; } + + // returns true if ok, false if eof + bool Parse(std::istream& sin); + size_t NumBytes() const { return num_bytes_; } + size_t NumRecords() const { return records_.size(); } const std::string& Record(int i) const { return records_[i]; } + bool Empty() const { return records_.empty(); } + private: std::vector records_; // sum of record lengths in bytes. diff --git a/paddle/fluid/recordio/chunk_test.cc b/paddle/fluid/recordio/chunk_test.cc index a67ba32ed6ab8bda230d1414975c96a0be6d682b..1f0e36a14d373ca96167199d4582bc8f17290ae8 100644 --- a/paddle/fluid/recordio/chunk_test.cc +++ b/paddle/fluid/recordio/chunk_test.cc @@ -26,7 +26,7 @@ TEST(Chunk, SaveLoad) { ch.Add(std::string("123", 4)); std::stringstream ss; ch.Write(ss, Compressor::kNoCompress); - ch.Clear(); + ss.seekg(0); ch.Parse(ss); ASSERT_EQ(ch.NumBytes(), 10U); } diff --git a/paddle/fluid/recordio/header.cc b/paddle/fluid/recordio/header.cc index 3641caaa8981020519cbc31e5362348c02d3bbce..ed09d58f6a3e2dba50bf4407c0463480575b248e 100644 --- a/paddle/fluid/recordio/header.cc +++ b/paddle/fluid/recordio/header.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/recordio/header.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace recordio { @@ -26,23 +27,33 @@ Header::Header() Header::Header(uint32_t num, uint32_t sum, Compressor c, uint32_t cs) : num_records_(num), checksum_(sum), compressor_(c), compress_size_(cs) {} -void Header::Parse(std::istream& is) { +bool Header::Parse(std::istream& is) { + uint32_t magic; + is.read(reinterpret_cast(&magic), sizeof(uint32_t)); + size_t read_size = is.gcount(); + if (read_size < sizeof(uint32_t)) { + return false; + } + PADDLE_ENFORCE_EQ(magic, kMagicNumber); + is.read(reinterpret_cast(&num_records_), sizeof(uint32_t)) .read(reinterpret_cast(&checksum_), sizeof(uint32_t)) .read(reinterpret_cast(&compressor_), sizeof(uint32_t)) .read(reinterpret_cast(&compress_size_), sizeof(uint32_t)); + return true; } void Header::Write(std::ostream& os) const { - os.write(reinterpret_cast(&num_records_), sizeof(uint32_t)) + os.write(reinterpret_cast(&kMagicNumber), sizeof(uint32_t)) + .write(reinterpret_cast(&num_records_), sizeof(uint32_t)) .write(reinterpret_cast(&checksum_), sizeof(uint32_t)) .write(reinterpret_cast(&compressor_), sizeof(uint32_t)) .write(reinterpret_cast(&compress_size_), sizeof(uint32_t)); } std::ostream& operator<<(std::ostream& os, Header h) { - os << h.NumRecords() << h.Checksum() - << static_cast(h.CompressType()) << h.CompressSize(); + os << "Header: " << h.NumRecords() << ", " << h.Checksum() << ", " + << static_cast(h.CompressType()) << ", " << h.CompressSize(); return os; } diff --git a/paddle/fluid/recordio/header.h b/paddle/fluid/recordio/header.h index cbd52642a668d1eaeeafb672e50af1a476975080..9200ac090de4514bef3704ac502039222eef2284 100644 --- a/paddle/fluid/recordio/header.h +++ b/paddle/fluid/recordio/header.h @@ -19,8 +19,6 @@ namespace paddle { namespace recordio { -// Default ChunkSize -constexpr size_t kDefaultMaxChunkSize = 32 * 1024 * 1024; // MagicNumber for memory checking constexpr uint32_t kMagicNumber = 0x01020304; @@ -44,7 +42,9 @@ public: Header(uint32_t num, uint32_t sum, Compressor ct, uint32_t cs); void Write(std::ostream& os) const; - void Parse(std::istream& is); + + // returns true if OK, false if eof + bool Parse(std::istream& is); uint32_t NumRecords() const { return num_records_; } uint32_t Checksum() const { return checksum_; } diff --git a/paddle/fluid/recordio/scanner.cc b/paddle/fluid/recordio/scanner.cc new file mode 100644 index 0000000000000000000000000000000000000000..c22281dc97e05173ad76ce76959833b92f11c4ee --- /dev/null +++ b/paddle/fluid/recordio/scanner.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/recordio/scanner.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace recordio { +Scanner::Scanner(std::unique_ptr &&stream) + : stream_(std::move(stream)) { + Reset(); +} + +Scanner::Scanner(const std::string &filename) { + stream_.reset(new std::ifstream(filename)); + Reset(); +} + +void Scanner::Reset() { + stream_->clear(); + stream_->seekg(0, std::ios::beg); + ParseNextChunk(); +} + +std::string Scanner::Next() { + PADDLE_ENFORCE(!eof_, "StopIteration"); + auto rec = cur_chunk_.Record(offset_++); + if (offset_ == cur_chunk_.NumRecords()) { + ParseNextChunk(); + } + return rec; +} + +void Scanner::ParseNextChunk() { + eof_ = !cur_chunk_.Parse(*stream_); + offset_ = 0; +} + +bool Scanner::HasNext() const { return !eof_; } +} // namespace recordio +} // namespace paddle diff --git a/paddle/fluid/recordio/scanner.h b/paddle/fluid/recordio/scanner.h new file mode 100644 index 0000000000000000000000000000000000000000..f3f17b69f195ddd92f5a39ead9755a7b8e2dd329 --- /dev/null +++ b/paddle/fluid/recordio/scanner.h @@ -0,0 +1,44 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/recordio/chunk.h" +namespace paddle { +namespace recordio { + +class Scanner { +public: + explicit Scanner(std::unique_ptr&& stream); + + explicit Scanner(const std::string& filename); + + void Reset(); + + std::string Next(); + + bool HasNext() const; + +private: + std::unique_ptr stream_; + Chunk cur_chunk_; + size_t offset_; + bool eof_; + + void ParseNextChunk(); +}; +} // namespace recordio +} // namespace paddle diff --git a/paddle/fluid/recordio/writer.cc b/paddle/fluid/recordio/writer.cc new file mode 100644 index 0000000000000000000000000000000000000000..196d66edff8cc6000afcd74fb945c05dcab7106a --- /dev/null +++ b/paddle/fluid/recordio/writer.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "paddle/fluid/recordio/writer.h" +#include "paddle/fluid/platform/enforce.h" +namespace paddle { +namespace recordio { +void Writer::Write(const std::string& record) { + cur_chunk_.Add(record); + if (cur_chunk_.NumRecords() >= max_num_records_in_chunk_) { + Flush(); + } +} + +void Writer::Flush() { + cur_chunk_.Write(stream_, compressor_); + cur_chunk_.Clear(); +} + +Writer::~Writer() { + PADDLE_ENFORCE(cur_chunk_.Empty(), "Writer must be flushed when destroy."); +} + +} // namespace recordio +} // namespace paddle diff --git a/paddle/fluid/recordio/writer.h b/paddle/fluid/recordio/writer.h new file mode 100644 index 0000000000000000000000000000000000000000..0c478d507547b10b8ebaaf5e512557a5c8c13e65 --- /dev/null +++ b/paddle/fluid/recordio/writer.h @@ -0,0 +1,43 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/recordio/chunk.h" +namespace paddle { +namespace recordio { + +class Writer { +public: + Writer(std::ostream* sout, + Compressor compressor, + size_t max_num_records_in_chunk = 1000) + : stream_(*sout), + max_num_records_in_chunk_(max_num_records_in_chunk), + compressor_(compressor) {} + + void Write(const std::string& record); + + void Flush(); + + ~Writer(); + +private: + std::ostream& stream_; + size_t max_num_records_in_chunk_; + Chunk cur_chunk_; + Compressor compressor_; +}; + +} // namespace recordio +} // namespace paddle diff --git a/paddle/fluid/recordio/writer_scanner_test.cc b/paddle/fluid/recordio/writer_scanner_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..7e764f0d9439709ad101af2b8864dc0158bd359b --- /dev/null +++ b/paddle/fluid/recordio/writer_scanner_test.cc @@ -0,0 +1,69 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gtest/gtest.h" + +#include +#include "paddle/fluid/recordio/scanner.h" +#include "paddle/fluid/recordio/writer.h" + +TEST(WriterScanner, Normal) { + std::stringstream* stream = new std::stringstream(); + + { + paddle::recordio::Writer writer(stream, + paddle::recordio::Compressor::kSnappy); + writer.Write("ABC"); + writer.Write("BCD"); + writer.Write("CDE"); + writer.Flush(); + } + + { + stream->seekg(0, std::ios::beg); + std::unique_ptr stream_ptr(stream); + paddle::recordio::Scanner scanner(std::move(stream_ptr)); + ASSERT_TRUE(scanner.HasNext()); + ASSERT_EQ(scanner.Next(), "ABC"); + ASSERT_EQ("BCD", scanner.Next()); + ASSERT_TRUE(scanner.HasNext()); + ASSERT_EQ("CDE", scanner.Next()); + ASSERT_FALSE(scanner.HasNext()); + } +} + +TEST(WriterScanner, TinyChunk) { + std::stringstream* stream = new std::stringstream(); + { + paddle::recordio::Writer writer( + stream, paddle::recordio::Compressor::kNoCompress, 2 /*max chunk num*/); + writer.Write("ABC"); + writer.Write("BCD"); + writer.Write("CDE"); + writer.Write("DEFG"); + writer.Flush(); + } + + { + stream->seekg(0, std::ios::beg); + std::unique_ptr stream_ptr(stream); + paddle::recordio::Scanner scanner(std::move(stream_ptr)); + ASSERT_TRUE(scanner.HasNext()); + ASSERT_EQ(scanner.Next(), "ABC"); + ASSERT_EQ(scanner.Next(), "BCD"); + ASSERT_EQ(scanner.Next(), "CDE"); + ASSERT_EQ(scanner.Next(), "DEFG"); + ASSERT_FALSE(scanner.HasNext()); + } +} \ No newline at end of file diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh old mode 100644 new mode 100755 index 6be2bd8fad9e33cf4e1dcafdd6b8f39111bdbe88..322f72e4a58c7e8f2c26d994477cbb55551c595a --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -35,8 +35,9 @@ function cmake_gen() { -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} ${PYTHON_FLAGS} -DWITH_DSO=ON - -DWITH_DOC=OFF + -DWITH_DOC=${WITH_DOC:-OFF} -DWITH_GPU=${WITH_GPU:-OFF} + -DWITH_AMD_GPU=${WITH_AMD_GPU:-OFF} -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} -DWITH_MKL=${WITH_MKL:-ON} -DWITH_AVX=${WITH_AVX:-OFF} @@ -50,6 +51,7 @@ function cmake_gen() { -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-ON} -DWITH_TESTING=${WITH_TESTING:-ON} -DWITH_FAST_BUNDLE_TEST=ON + -DCMAKE_MODULE_PATH=/opt/rocm/hip/cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON ======================================== EOF @@ -60,8 +62,9 @@ EOF -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} \ ${PYTHON_FLAGS} \ -DWITH_DSO=ON \ - -DWITH_DOC=OFF \ + -DWITH_DOC=${WITH_DOC:-OFF} \ -DWITH_GPU=${WITH_GPU:-OFF} \ + -DWITH_AMD_GPU=${WITH_AMD_GPU:-OFF} \ -DWITH_DISTRIBUTE=${WITH_DISTRIBUTE:-OFF} \ -DWITH_MKL=${WITH_MKL:-ON} \ -DWITH_AVX=${WITH_AVX:-OFF} \ @@ -74,6 +77,7 @@ EOF -DWITH_STYLE_CHECK=${WITH_STYLE_CHECK:-ON} \ -DWITH_TESTING=${WITH_TESTING:-ON} \ -DWITH_FAST_BUNDLE_TEST=ON \ + -DCMAKE_MODULE_PATH=/opt/rocm/hip/cmake \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON } @@ -231,7 +235,7 @@ gen_capi_package gen_fluid_inference_lib if [[ ${WITH_C_API:-OFF} == "ON" ]]; then - printf "PaddlePaddle C-API libraries was generated on build/paddle.tgz\n" + printf "PaddlePaddle C-API libraries was generated on build/paddle.tgz\n" else printf "If you need to install PaddlePaddle in develop docker image," printf "please make install or pip install build/python/dist/*.whl.\n" diff --git a/paddle/scripts/tools/build_docs/.gitignore b/paddle/scripts/tools/build_docs/.gitignore deleted file mode 100644 index 6ec14c8f5bc3774a81dbe87c44f458594b38f12c..0000000000000000000000000000000000000000 --- a/paddle/scripts/tools/build_docs/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -doc -doc_cn diff --git a/paddle/scripts/tools/build_docs/build_docs.sh b/paddle/scripts/tools/build_docs/build_docs.sh deleted file mode 100755 index f9bc8bf63ae9afdfca1ff660bc83e62e71f03005..0000000000000000000000000000000000000000 --- a/paddle/scripts/tools/build_docs/build_docs.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -docker run --rm \ - -v $(git rev-parse --show-toplevel):/paddle \ - -e "WITH_GPU=OFF" \ - -e "WITH_AVX=ON" \ - -e "WITH_DOC=ON" \ - -e "WOBOQ=ON" \ - ${1:-"paddlepaddle/paddle:latest-dev"} diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 6e24cbdd3f6a4f05c1691dc643d880f6f454429d..90c2dfbba78418fb7b731f5363017d70577b1ae5 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,27 +1,29 @@ - -file(GLOB TRAINER_PY_FILES . ./paddle/trainer/*.py) -file(GLOB HELPERS_PY_FILES . ./paddle/trainer_config_helpers/*.py) file(GLOB UTILS_PY_FILES . ./paddle/utils/*.py) -file(GLOB_RECURSE V2_PY_FILES ./paddle/v2/ *.py) file(GLOB_RECURSE FLUID_PY_FILES ./paddle/fluid/ *.py) - set(PY_FILES paddle/__init__.py - ${TRAINER_PY_FILES} - ${HELPERS_PY_FILES} ${UTILS_PY_FILES} - ${V2_PY_FILES} ${FLUID_PY_FILES}) -add_custom_target(copy_paddle_master) +if(NOT WITH_FLUID) + file(GLOB TRAINER_PY_FILES . ./paddle/trainer/*.py) + file(GLOB HELPERS_PY_FILES . ./paddle/trainer_config_helpers/*.py) + file(GLOB_RECURSE V2_PY_FILES ./paddle/v2/ *.py) + set(PY_FILES ${PY_FILES} + ${TRAINER_PY_FILES} + ${HELPERS_PY_FILES} + ${V2_PY_FILES}) -SET(COPY_PADDLE_MASTER "") -if(WITH_GOLANG) - SET(COPY_PADDLE_MASTER "copy_paddle_master") - add_custom_command(TARGET ${COPY_PADDLE_MASTER} - COMMAND cp ${paddle_master_LIB_PATH} ${PADDLE_SOURCE_DIR}/python/paddle/v2/master/ - ) - add_dependencies(copy_paddle_master paddle_master) -endif(WITH_GOLANG) + add_custom_target(copy_paddle_master) + + SET(COPY_PADDLE_MASTER "") + if(WITH_GOLANG) + SET(COPY_PADDLE_MASTER "copy_paddle_master") + add_custom_command(TARGET ${COPY_PADDLE_MASTER} + COMMAND cp ${paddle_master_LIB_PATH} ${PADDLE_SOURCE_DIR}/python/paddle/v2/master/ + ) + add_dependencies(copy_paddle_master paddle_master) + endif(WITH_GOLANG) +endif() set(MKL_SHARED_LIBS "") set(MKL_DEPENDS "") @@ -59,23 +61,28 @@ add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND ${CMAKE_COMMAND} -E copy_directory ${PADDLE_PYTHON_BUILD_DIR}/lib* ${PADDLE_PYTHON_BUILD_DIR}/lib-python DEPENDS gen_proto_py copy_paddle_pybind framework_py_proto profiler_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER}) -set(paddle_python_deps ${PADDLE_PYTHON_BUILD_DIR}/.timestamp paddle_pserver_main paddle_trainer paddle_merge_model ${MKL_DEPENDS}) -if(WITH_SWIG_PY) - list(APPEND paddle_python_deps python_api_wheel) +set(paddle_python_deps ${PADDLE_PYTHON_BUILD_DIR}/.timestamp ${MKL_DEPENDS}) +if(NOT WITH_FLUID) + set(paddle_python_deps ${paddle_python_deps} paddle_pserver_main paddle_trainer paddle_merge_model) + if(WITH_SWIG_PY) + list(APPEND paddle_python_deps python_api_wheel) + endif() endif() add_custom_target(paddle_python ALL DEPENDS ${paddle_python_deps}) set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/) if (WITH_TESTING) - add_subdirectory(paddle/trainer_config_helpers/tests) - if (WITH_SWIG_PY) - # enable v2 API unittest only when paddle swig api is compiled - add_subdirectory(paddle/v2/tests) - add_subdirectory(paddle/v2/reader/tests) - add_subdirectory(paddle/v2/plot/tests) - add_subdirectory(paddle/fluid/tests) + if(NOT WITH_FLUID) + add_subdirectory(paddle/trainer_config_helpers/tests) + if (WITH_SWIG_PY) + # enable v2 API unittest only when paddle swig api is compiled + add_subdirectory(paddle/v2/tests) + add_subdirectory(paddle/v2/reader/tests) + add_subdirectory(paddle/v2/plot/tests) + endif() endif() + add_subdirectory(paddle/fluid/tests) endif() install(DIRECTORY ${PADDLE_PYTHON_PACKAGE_DIR} DESTINATION opt/paddle/share/wheels diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 84a57aff516ad2d7ba1efaf1d530e77747d3b254..fcea28220485039c9daf3c5fa2688c31f9f34c42 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -35,11 +35,12 @@ from core import LoDTensor, CPUPlace, CUDAPlace from distribute_transpiler import DistributeTranspiler from distribute_transpiler_simple import SimpleDistributeTranspiler from concurrency import (Go, make_channel, channel_send, channel_recv, - channel_close) + channel_close, Select) import clip from memory_optimization_transpiler import memory_optimize, release_memory import profiler import unique_name +import recordio_writer Tensor = LoDTensor @@ -66,6 +67,7 @@ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ 'release_memory', 'profiler', 'unique_name', + 'recordio_writer', ] diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index b6f20daee3a585777a23255355f0a0e31328d23f..7af6ed1463ab737e871da487f2a687301652ef2d 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -248,12 +248,15 @@ def _callback_lookup_(op): if o_argu in self.param_grad_names: allreduce_out_name = o_argu + "__nccl_all_reduce__" op_desc = _create_op_desc_( - "ncclAllReduce", { + "ncclReduce", + { "X": [o_argu], "Communicator": ['nccl_com__do_not_change_'] - }, {"Out": [allreduce_out_name]}, - {"reduction": "ncclSum"}) + }, + {"Out": [allreduce_out_name]}, + {"reduction": "ncclSum", + "root": 0}, ) block.desc.append_op().copy_from(op_desc) op_desc = _create_op_desc_( diff --git a/python/paddle/fluid/concurrency.py b/python/paddle/fluid/concurrency.py index dec224fc886cd0739add0ebb6488625ef5063b8d..d65e1a6858373d8e172cb8112a10a77d2e9bd5bc 100644 --- a/python/paddle/fluid/concurrency.py +++ b/python/paddle/fluid/concurrency.py @@ -12,17 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from layers.control_flow import BlockGuard +from layers.control_flow import BlockGuard, equal +from .framework import Operator from layer_helper import LayerHelper, unique_name from layers import fill_constant import core __all__ = [ - 'Go', - 'make_channel', - 'channel_send', - 'channel_recv', - 'channel_close', + 'Go', 'make_channel', 'channel_send', 'channel_recv', 'channel_close', + 'Select' ] @@ -78,6 +76,185 @@ class Go(BlockGuard): attrs={'sub_block': go_block}) +class SelectCase(object): + DEFAULT = 0 + SEND = 1 + RECEIVE = 2 + + def __init__(self, + case_idx, + case_to_execute, + channel_action_fn=None, + channel=None, + value=None): + self.helper = LayerHelper('conditional_block') + self.main_program = self.helper.main_program + self.is_scalar_condition = True + + self.case_to_execute = case_to_execute + self.idx = case_idx + + # Since we aren't going to use the `channel_send` or `channel_recv` + # functions directly, we just need to capture the name. + self.action = (self.SEND + if channel_action_fn.__name__ == ('channel_send') else + self.RECEIVE) if channel_action_fn else self.DEFAULT + self.value = value + self.channel = channel + + def __enter__(self): + self.block = self.main_program.create_block() + + def construct_op(self): + main_program = self.helper.main_program + cases_block = main_program.current_block() + + inner_outputs = set() + input_set = set() + params = set() + + for op in self.block.ops: + # Iterate over all operators, get all the inputs + # and add as input to the SelectCase operator. + for iname in op.input_names: + for in_var_name in op.input(iname): + if in_var_name not in inner_outputs: + input_set.add(in_var_name) + + for oname in op.output_names: + for out_var_name in op.output(oname): + inner_outputs.add(out_var_name) + + param_list = [ + cases_block.var(each_name) for each_name in params + if each_name not in input_set + ] + + # Iterate over all operators, get all the outputs + # add to the output list of SelectCase operator only if + # they exist in the parent block. + out_vars = [] + for inner_out_name in inner_outputs: + if inner_out_name in cases_block.vars: + out_vars.append(cases_block.var(inner_out_name)) + + # First, create an op that will determine whether or not this is the + # conditional variable to execute. + should_execute_block = equal( + fill_constant( + shape=[1], dtype=core.VarDesc.VarType.INT32, value=self.idx), + self.case_to_execute) + + step_scope = cases_block.create_var( + type=core.VarDesc.VarType.STEP_SCOPES) + + cases_block.append_op( + type='conditional_block', + inputs={'X': [should_execute_block], + 'Params': param_list}, + outputs={'Out': out_vars, + 'Scope': [step_scope]}, + attrs={ + 'sub_block': self.block, + 'is_scalar_condition': self.is_scalar_condition + }) + + return '%s,%s,%s,%s' % (self.idx, self.action, self.channel.name + if self.channel else '', self.value.name + if self.value else '') + + def __exit__(self, exc_type, exc_val, exc_tb): + self.main_program.rollback() + if exc_type is not None: + return False # re-raise exception + return True + + +class Select(BlockGuard): + def __init__(self, name=None): + self.helper = LayerHelper('select', name=name) + self.cases = [] + + super(Select, self).__init__(self.helper.main_program) + self.case_to_execute = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.INT32, value=-1) + + def __enter__(self): + super(Select, self).__enter__() + return self + + def case(self, channel_action_fn, channel, value): + """Create a new block for this condition. + """ + select_case = SelectCase( + len(self.cases), self.case_to_execute, channel_action_fn, channel, + value) + + self.cases.append(select_case) + + return select_case + + def default(self): + """Create a default case block for this condition. + """ + default_case = SelectCase(len(self.cases), self.case_to_execute) + + self.cases.append(default_case) + + return default_case + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + return False + + # Create a select op and another block to wrap its + # case blocks. + select_block = self.helper.main_program.current_block() + parent_block = self.helper.main_program.block(select_block.parent_idx) + + # Construct each case op, inside the newly created select block. + serialized_cases = [] + for case in self.cases: + serialized_cases.append(case.construct_op()) + + intermediate = set() + params = set() + + for case_block in select_block.ops: + if case_block.attrs and 'sub_block' in case_block.attrs: + for each_op in case_block.attrs['sub_block'].ops: + assert isinstance(each_op, Operator) + for iname in each_op.input_names: + for in_var_name in each_op.input(iname): + if in_var_name not in intermediate: + params.add(in_var_name) + + for oname in each_op.output_names: + for out_var_name in each_op.output(oname): + intermediate.add(out_var_name) + + out_list = [ + parent_block.var(var_name) for var_name in parent_block.vars + if var_name in intermediate + ] + + X = [select_block.var_recursive(x_name) for x_name in params] + + # Needs to be used by `equal` inside the cases block. + X.append(self.case_to_execute) + + # Construct the select op. + parent_block.append_op( + type='select', + inputs={'X': X, + 'case_to_execute': self.case_to_execute}, + attrs={'sub_block': select_block, + 'cases': serialized_cases}, + outputs={'Out': out_list}) + + return super(Select, self).__exit__(exc_type, exc_val, exc_tb) + + def make_channel(dtype, capacity=0): """ Helps implementation of a concurrent program by creating a "channel" of @@ -134,7 +311,7 @@ def make_channel(dtype, capacity=0): return channel -def channel_send(channel, value): +def channel_send(channel, value, is_copy=False): """ Sends a value through a channel variable. Used by an unbuffered or buffered channel to pass data from within or to a concurrent Go block, where @@ -144,6 +321,8 @@ def channel_send(channel, value): channel (Variable|Channel): Channel variable created using `make_channel`. value (Variable): Value to send to channel + is_copy (bool): Copy data while channel send. If False, then data + is moved. The input cannot be used after move. (default False) Returns: Variable: The boolean status on whether or not the channel successfully sent the passed value. @@ -165,11 +344,26 @@ def channel_send(channel, value): type=core.VarDesc.VarType.LOD_TENSOR, dtype=core.VarDesc.VarType.BOOL) + X = value + + if is_copy is True: + copied_X = helper.create_variable( + name=unique_name.generate(value.name + '_copy'), + type=value.type, + dtype=value.dtype, + shape=value.shape, + lod_level=value.lod_level, + capacity=value.capacity) + + assign_op = channel_send_block.append_op( + type="assign_op", inputs={"X": value}, outputs={"Out": copied_X}) + X = copied_X + channel_send_op = channel_send_block.append_op( type="channel_send", inputs={ "Channel": channel, - "X": value, + "X": X, }, outputs={"Status": status}) @@ -198,7 +392,7 @@ def channel_recv(channel, return_value): ch = fluid.make_channel(dtype='int32', capacity=10) with fluid.Go(): - returned_value = fluid.channel_recv(ch, 'int32') + returned_value, return_status = fluid.channel_recv(ch, 'int32') # Code to send data through the channel. """ diff --git a/python/paddle/fluid/debuger.py b/python/paddle/fluid/debuger.py index b7a906654a7ed60d610fc2e9801bf6108e555dcb..7b4afa9bf65e1369329cd4648c1f5c4bd8fa8357 100644 --- a/python/paddle/fluid/debuger.py +++ b/python/paddle/fluid/debuger.py @@ -52,9 +52,11 @@ reprtpl = "{ttype} {name} ({reprs})" def repr_lodtensor(proto): - if not proto.lod_tensor: return - level = proto.lod_tensor.lod_level - reprs = repr_tensor(proto.lod_tensor.tensor) + if proto.type.type != framework_pb2.VarType.LOD_TENSOR: + return + + level = proto.type.lod_tensor.lod_level + reprs = repr_tensor(proto.type.lod_tensor.tensor) return reprtpl.format( ttype="LoDTensor" if level > 0 else "Tensor", name=proto.name, @@ -62,20 +64,24 @@ def repr_lodtensor(proto): def repr_selected_rows(proto): - if not proto.selected_rows: return + if proto.type.type != framework_pb2.VarType.SELECTED_ROWS: + return + return reprtpl.format( ttype="SelectedRows", name=proto.name, - reprs=repr_tensor(proto.selected_rows)) + reprs=repr_tensor(proto.type.selected_rows)) def repr_tensor_array(proto): - if not proto.tensor_array: return + if proto.type.type != framework_pb2.VarType.LOD_TENSOR_ARRAY: + return + return reprtpl.format( ttype="TensorArray", name=proto.name, - reprs="level=%d, %s" % (proto.tensor_array.lod_level, - repr_tensor(proto.lod_tensor))) + reprs="level=%d, %s" % (proto.type.tensor_array.lod_level, + repr_tensor(proto.type.lod_tensor.tensor))) type_handlers = [ diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index bb2ce4d45d5da6b2fbd097a94479f3696271e5ec..62147d325b699a62bd39cfbaca44874b7fc19a0f 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -20,6 +20,7 @@ from layer_helper import LayerHelper from distributed_spliter import * import math from . import core +import debuger class VarBlock: @@ -250,6 +251,8 @@ class DistributeTranspiler: def get_trainer_program(self): # remove optimize ops and add a send op to main_program self.program.global_block().delete_ops(self.optimize_ops) + # FIXME(typhoonzero): serialize once will fix error occurs when clone. + self.program.__str__() return self.program def get_pserver_program(self, endpoint): @@ -287,6 +290,7 @@ class DistributeTranspiler: dtype=v.dtype, shape=v.shape) recv_inputs.append(var) + # step3 optimize_block = pserver_program.create_block(0) # step 4 @@ -305,14 +309,57 @@ class DistributeTranspiler: # Iterate through the ops, and if an op and the optimize ops # which located on current pserver are in one set, then # append it into the sub program. - for _, op in enumerate(self.optimize_ops): - for _, opt_op in enumerate(opt_op_on_pserver): - if ufind.is_connected(op, opt_op): - if self._is_opt_op(op): - self._append_pserver_ops(optimize_block, op, endpoint) - else: - self._append_pserver_non_opt_ops(optimize_block, op) - break + + # We try to put optimization program run parallelly, assume + # optimization program always looks like: + # + # prevop -> prevop -> opt op -> following op -> following op; -> + # prevop -> prevop -> opt op -> following op -> following op; -> + # global op -> global op + # + # we put operators that can run parallelly to many program blocks. + # in above example, we seperate ops by the ";". Global ops must run + # after all the optimize ops finished. + + global_ops = [] + # HACK: optimization global ops only used to scale beta1 and beta2 + # replace it with dependency engine. + for op in self.optimize_ops: + if op.type == "scale": + for in_name in op.input_arg_names: + if in_name.startswith("beta1_pow_acc") or\ + in_name.startswith("beta2_pow_acc"): + global_ops.append(op) + + def __append_optimize_op__(op, block): + if self._is_opt_op(op): + self._append_pserver_ops(block, op, endpoint, + default_main_program()) + else: + self._append_pserver_non_opt_ops(block, op) + + # append op to the current block + per_opt_block = optimize_block + for _, opt_op in enumerate(opt_op_on_pserver): + for _, op in enumerate(self.optimize_ops): + # optimizer is connected to itself + if ufind.is_connected(op, opt_op) and \ + op not in global_ops: + __append_optimize_op__(op, per_opt_block) + per_opt_block = pserver_program.create_block(0) + + # append global ops + for glb_op in global_ops: + __append_optimize_op__(glb_op, per_opt_block) + + # NOT USED: single block version: + # + # for _, op in enumerate(self.optimize_ops): + # for _, opt_op in enumerate(opt_op_on_pserver): + # if ufind.is_connected(op, opt_op): + # __append_optimize_op__(glb_op, optimize_block) + # break + # step5 append the listen_and_serv op pserver_program.global_block().append_op( type="listen_and_serv", @@ -518,9 +565,12 @@ class DistributeTranspiler: orig_var_name = "" if suff_idx >= 0: orig_var_name = varname[:suff_idx] + else: + orig_var_name = varname return orig_var_name - def _append_pserver_ops(self, optimize_block, opt_op, endpoint): + def _append_pserver_ops(self, optimize_block, opt_op, endpoint, + origin_program): program = optimize_block.program pserver_block = program.global_block() new_inputs = dict() @@ -531,7 +581,8 @@ class DistributeTranspiler: grad_block = None for g in self.param_grad_ep_mapping[endpoint]["grads"]: if same_or_split_var( - self._orig_varname(g.name), opt_op.input(key)[0]): + self._orig_varname(g.name), + self._orig_varname(opt_op.input(key)[0])): grad_block = g break if not grad_block: @@ -576,7 +627,17 @@ class DistributeTranspiler: elif key == "LearningRate": # leraning rate variable has already be created by non-optimize op, # don't create it once again. - new_inputs[key] = pserver_block.vars[opt_op.input(key)[0]] + lr_varname = opt_op.input(key)[0] + if pserver_block.vars.has_key(lr_varname): + new_inputs[key] = pserver_block.vars[opt_op.input(key)[0]] + else: + origin_var = origin_program.global_block().vars[lr_varname] + tmpvar = pserver_block.create_var( + name=origin_var.name, + persistable=origin_var.persistable, + dtype=origin_var.dtype, + shape=origin_var.shape) + new_inputs[key] = tmpvar for key in opt_op.input_names: new_shape = None @@ -646,10 +707,22 @@ class DistributeTranspiler: # If one op's input is another op's output or # one op's output is another op's input, we say # the two operator is connected. - op1_input_names = op1.desc.input_arg_names() + def _append_inname_remove_beta(varname_list): + op_input_names = [] + for in_name in varname_list: + # HACK: remove beta1 and beta2 to avoid let all + # ops connected. + if in_name.startswith("beta2_pow_acc") or \ + in_name.startswith("beta1_pow_acc"): + continue + else: + op_input_names.append(in_name) + return op_input_names + + op1_input_names = _append_inname_remove_beta(op1.desc.input_arg_names()) op1_output_names = op1.desc.output_arg_names() - op2_input_names = op2.desc.input_arg_names() + op2_input_names = _append_inname_remove_beta(op2.desc.input_arg_names()) op2_output_names = op2.desc.output_arg_names() if set(op1_output_names) & set(op2_input_names) or \ @@ -680,7 +753,7 @@ class DistributeTranspiler: param_names = [ p.name for p in self.param_grad_ep_mapping[endpoint]["params"] ] - if op.input("Param") in param_names: + if op.input("Param")[0] in param_names: return True else: for n in param_names: diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 4490f2bf153f672464ec8bca2a44109c9fe0dd04..2612fb1ae41986ae0d5c6e942cc3accebcb00e19 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -235,6 +235,77 @@ class Executor(object): tensor.set_lod(lod) return tensor + def _get_program_cache(self, program_cache_key): + return self.program_caches.get(program_cache_key, None) + + def _add_program_cache(self, program_cache_key, program): + self.program_caches[program_cache_key] = program + + def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name, + fetch_var_name): + tmp_program = program.clone() + + global_block = tmp_program.global_block() + + if feed_var_name in global_block.vars: + feed_var = global_block.var(feed_var_name) + else: + feed_var = global_block.create_var( + name=feed_var_name, + type=core.VarDesc.VarType.FEED_MINIBATCH, + persistable=True) + + if fetch_var_name in global_block.vars: + fetch_var = global_block.var(fetch_var_name) + else: + fetch_var = global_block.create_var( + name=fetch_var_name, + type=core.VarDesc.VarType.FETCH_LIST, + persistable=True) + + # prepend feed operators + if not has_feed_operators(global_block, feed, feed_var_name): + for i, name in enumerate(feed): + out = global_block.var(name) + global_block.prepend_op( + type='feed', + inputs={'X': [feed_var]}, + outputs={'Out': [out]}, + attrs={'col': i}) + + # append fetch_operators + if not has_fetch_operators(global_block, fetch_list, fetch_var_name): + for i, var in enumerate(fetch_list): + assert isinstance(var, Variable) or isinstance(var, str), ( + "Wrong type for fetch_list[%s]: %s" % (i, type(var))) + global_block.append_op( + type='fetch', + inputs={'X': [var]}, + outputs={'Out': [fetch_var]}, + attrs={'col': i}) + + return tmp_program + + def _feed_data(self, program, feed, feed_var_name, scope): + # feed var to framework + for op in program.global_block().ops: + if op.desc.type() == 'feed': + feed_target_name = op.desc.output('Out')[0] + cur_feed = feed[feed_target_name] + if not isinstance(cur_feed, core.LoDTensor): + cur_feed = self.aslodtensor(cur_feed) + idx = op.desc.attr('col') + core.set_feed_variable(scope, cur_feed, feed_var_name, idx) + else: + break + + def _fetch_data(self, fetch_list, fetch_var_name, scope): + outs = [ + core.get_fetch_variable(scope, fetch_var_name, i) + for i in xrange(len(fetch_list)) + ] + return outs + def run(self, program=None, feed=None, @@ -268,7 +339,6 @@ class Executor(object): raise TypeError("feed should be a map") if fetch_list is None: fetch_list = [] - if program is None: program = default_main_program() @@ -278,79 +348,30 @@ class Executor(object): if scope is None: scope = global_scope() - program_cache = None - program_cache_key = get_program_cache_key(feed, fetch_list) - + cache_key = get_program_cache_key(feed, fetch_list) if use_program_cache: - # find program cache by cache_key - program_cache = self.program_caches.get(program_cache_key, None) - # TODO(qiao): Should check program_cache and program are exactly the same. + cached_program = self._get_program_cache(cache_key) + if cached_program is None: + cached_program = self._add_feed_fetch_ops( + program=program, + feed=feed, + fetch_list=fetch_list, + feed_var_name=feed_var_name, + fetch_var_name=fetch_var_name) + self._add_program_cache(cache_key, cached_program) + program = cached_program else: - self.program_caches.pop(program_cache_key, None) - - if program_cache is None: - program_cache = program.clone() - - if use_program_cache: - self.program_caches[program_cache_key] = program_cache - - global_block = program_cache.global_block() - - if feed_var_name in global_block.vars: - feed_var = global_block.var(feed_var_name) - else: - feed_var = global_block.create_var( - name=feed_var_name, - type=core.VarDesc.VarType.FEED_MINIBATCH, - persistable=True) - - if fetch_var_name in global_block.vars: - fetch_var = global_block.var(fetch_var_name) - else: - fetch_var = global_block.create_var( - name=fetch_var_name, - type=core.VarDesc.VarType.FETCH_LIST, - persistable=True) - - # prepend feed operators - if not has_feed_operators(global_block, feed, feed_var_name): - for i, name in enumerate(feed): - out = global_block.var(name) - global_block.prepend_op( - type='feed', - inputs={'X': [feed_var]}, - outputs={'Out': [out]}, - attrs={'col': i}) - - # append fetch_operators - if not has_fetch_operators(global_block, fetch_list, - fetch_var_name): - for i, var in enumerate(fetch_list): - assert isinstance(var, Variable) or isinstance(var, str), ( - "Wrong type for fetch_list[%s]: %s" % (i, type(var))) - global_block.append_op( - type='fetch', - inputs={'X': [var]}, - outputs={'Out': [fetch_var]}, - attrs={'col': i}) - - # feed var to framework - for op in program_cache.global_block().ops: - if op.desc.type() == 'feed': - feed_target_name = op.desc.output('Out')[0] - cur_feed = feed[feed_target_name] - if not isinstance(cur_feed, core.LoDTensor): - cur_feed = self.aslodtensor(cur_feed) - idx = op.desc.attr('col') - core.set_feed_variable(scope, cur_feed, feed_var_name, idx) - else: - break - - self.executor.run(program_cache.desc, scope, 0, True, True) - outs = [ - core.get_fetch_variable(scope, fetch_var_name, i) - for i in xrange(len(fetch_list)) - ] + self.program_caches.pop(cache_key, None) + program = self._add_feed_fetch_ops( + program=program, + feed=feed, + fetch_list=fetch_list, + feed_var_name=feed_var_name, + fetch_var_name=fetch_var_name) + + self._feed_data(program, feed, feed_var_name, scope) + self.executor.run(program.desc, scope, 0, True, True) + outs = self._fetch_data(fetch_list, fetch_var_name, scope) if return_numpy: outs = as_numpy(outs) return outs diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index d14d6349b1bcf598e25bbeb9913d2d0da71a5054..3e78788f470556d2196b5104f69a0a3285543ec4 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -487,7 +487,7 @@ class Operator(object): 'rnn_memory_helper_grad', 'conditional_block', 'while', 'send', 'recv', 'listen_and_serv', 'parallel_do', 'save_combine', 'load_combine', 'ncclInit', 'channel_create', 'channel_close', - 'channel_send', 'channel_recv' + 'channel_send', 'channel_recv', 'select' } if type not in no_kernel_op_set: self.desc.infer_var_type(self.block.desc) @@ -918,6 +918,24 @@ class Block(object): name=v.name) self.vars[new_p.name] = new_p + def clone_variable(self, var): + """ + Clone a variable into current block. + Args: + var: the variable to be cloned. + + Returns: + The new variable cloned from 'var' in current block. + """ + assert isinstance(var, Variable) + return self.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=True) + class Program(object): def __init__(self): @@ -960,14 +978,14 @@ class Program(object): """Clone the Program object Set for_test to False when we want to clone the program for training. - Set for_test to True when we want to clone the program for testing. + Set for_test to True when we want to clone the program for testing. Args: for_test(bool): Some operators, such as batch_norm and drop_out ops, behave differently in training and testing. If for_test is True, the is_test attributes in these operators will be set to True for - testing purposes, otherwise, they remain unchanged. - + testing purposes, otherwise, they remain unchanged. + Returns(Program): The cloned Program object. """ diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 35aa80a2ae9a6289665b581275fb86c3931fd7a8..1c0f1f6eb415b1c05c1052c1f52743a19c49f017 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -47,7 +47,7 @@ def is_parameter(var): def is_persistable(var): if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST: + var.desc.type() == core.VarDesc.VarType.FETCH_LIST: return False return var.persistable diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index da7e74c901e1f5be709c5f9d73f048bfda0c5549..58b668227168c5c5e080f3928035ad98303bbae9 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -399,6 +399,9 @@ class LayerHelper(object): if isinstance(act, basestring): act = {'type': act} tmp = self.create_tmp_variable(dtype=input_var.dtype) + + if 'use_mkldnn' in self.kwargs: + act['use_mkldnn'] = self.kwargs.get('use_mkldnn') act_type = act.pop('type') self.append_op( type=act_type, diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index ea189749bc6cc1e37c1efc6fea424143b887cecd..cd519e1ee082d27ccadc6247c149701fac31e812 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -129,13 +129,11 @@ def detection_output(loc, prior_box_var=prior_box_var, target_box=loc, code_type='decode_center_size') - old_shape = scores.shape scores = ops.reshape(x=scores, shape=(-1, old_shape[-1])) - scores = ops.softmax(x=scores) + scores = nn.softmax(input=scores) scores = ops.reshape(x=scores, shape=old_shape) scores = nn.transpose(scores, perm=[0, 2, 1]) - nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype) helper.append_op( type="multiclass_nms", @@ -475,6 +473,7 @@ def ssd_loss(location, # 2. Compute confidence for mining hard examples # 2.1. Get the target label based on matched indices gt_label = ops.reshape(x=gt_label, shape=gt_label.shape + (1, )) + gt_label.stop_gradient = True target_label, _ = target_assign( gt_label, matched_indices, mismatch_value=background_label) # 2.2. Compute confidence loss. @@ -482,10 +481,12 @@ def ssd_loss(location, confidence = __reshape_to_2d(confidence) target_label = tensor.cast(x=target_label, dtype='int64') target_label = __reshape_to_2d(target_label) + target_label.stop_gradient = True conf_loss = nn.softmax_with_cross_entropy(confidence, target_label) # 3. Mining hard examples conf_loss = ops.reshape(x=conf_loss, shape=(num, num_prior)) + conf_loss.stop_gradient = True neg_indices = helper.create_tmp_variable(dtype='int32') dtype = matched_indices.dtype updated_matched_indices = helper.create_tmp_variable(dtype=dtype) @@ -695,6 +696,8 @@ def multi_box_head(inputs, outputs={"Boxes": box, "Variances": var}, attrs=attrs, ) + box.stop_gradient = True + var.stop_gradient = True return box, var def _reshape_with_axis_(input, axis=1): diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index af3ae54248a744e7e2fed8190aeeb0eb481cb315..bc5e291ad811315ddc9d101853d69c7f5ab5082d 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -13,11 +13,17 @@ # limitations under the License. from .. import core -from ..layer_helper import LayerHelper +from ..framework import convert_np_dtype_to_dtype_, default_main_program, default_startup_program +from ..unique_name import generate as unique_name from control_flow import BlockGuard from ..layer_helper import LayerHelper +from ..executor import global_scope -__all__ = ['data', 'BlockGuardServ', 'ListenAndServ', 'Send'] +__all__ = [ + 'data', 'BlockGuardServ', 'ListenAndServ', 'Send', 'open_recordio_file', + 'open_files', 'read_file', 'create_shuffle_reader', + 'create_double_buffer_reader', 'create_multi_pass_reader' +] def data(name, @@ -224,3 +230,136 @@ def Recv(endpoints, get_vars): outputs={"Out": get_vars}, attrs={"endpoints": endpoints, "epmap": epmap}) + + +def monkey_patch_reader_methods(reader): + def __get_reader__(): + scope = global_scope() + var = scope.find_var(reader.name) + return var.get_reader() + + def eof(): + return not __get_reader__().has_next() + + def reset(): + return __get_reader__().reset() + + reader.eof = eof + reader.reset = reset + reader.stop_gradient = True + reader.persistable = True + return reader + + +def _copy_reader_var_(block, var): + new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER) + new_var.desc.set_shapes(var.desc.shapes()) + new_var.desc.set_dtypes(var.desc.dtypes()) + new_var.persistable = True + return monkey_patch_reader_methods(new_var) + + +def open_recordio_file(filename, shapes, lod_levels, dtypes): + dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] + shape_concat = [] + ranks = [] + + for shape in shapes: + shape_concat.extend(shape) + ranks.append(len(shape)) + + var_name = unique_name('open_recordio_file') + + startup_blk = default_startup_program().current_block() + startup_var = startup_blk.create_var(name=var_name) + startup_blk.append_op( + type='create_recordio_file_reader', + outputs={'Out': [startup_var]}, + attrs={ + 'shape_concat': shape_concat, + 'lod_levels': lod_levels, + 'filename': filename, + 'ranks': ranks + }) + + startup_var.desc.set_dtypes(dtypes) + startup_var.persistable = True + return _copy_reader_var_(default_main_program().current_block(), + startup_var) + + +def open_files(filenames, thread_num, shapes, lod_levels, dtypes): + dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] + shape_concat = [] + ranks = [] + + for shape in shapes: + shape_concat.extend(shape) + ranks.append(len(shape)) + + var_name = unique_name('multiple_reader') + + startup_blk = default_startup_program().current_block() + startup_var = startup_blk.create_var(name=var_name) + startup_blk.append_op( + type='open_files', + outputs={'Out': [startup_var]}, + attrs={ + 'shape_concat': shape_concat, + 'lod_levels': lod_levels, + 'ranks': ranks, + 'file_names': filenames, + 'thread_num': thread_num + }) + + startup_var.desc.set_dtypes(dtypes) + startup_var.persistable = True + return _copy_reader_var_(default_main_program().current_block(), + startup_var) + + +def __create_decorated_reader__(op_type, reader, attrs): + var_name = unique_name(op_type) + startup_blk = default_startup_program().current_block() + startup_var = startup_blk.create_var(name=var_name) + startup_blk.append_op( + type=op_type, + inputs={'UnderlyingReader': reader}, + outputs={'Out': [startup_var]}, + attrs=attrs) + startup_var.persistable = True + return _copy_reader_var_(default_main_program().current_block(), + startup_var) + + +def create_shuffle_reader(reader, buffer_size): + return __create_decorated_reader__('create_shuffle_reader', reader, + {'buffer_size': int(buffer_size)}) + + +def create_double_buffer_reader(reader, place=None): + attrs = dict() + if place is not None: + attrs['place'] = str(place).upper() + return __create_decorated_reader__('create_double_buffer_reader', reader, + attrs) + + +def create_multi_pass_reader(reader, pass_num): + return __create_decorated_reader__('create_multi_pass_reader', reader, + {'pass_num': int(pass_num)}) + + +def read_file(file_obj): + helper = LayerHelper('read_file') + out = [ + helper.create_tmp_variable( + stop_gradient=True, dtype='float32') + for _ in range(len(file_obj.desc.shapes())) + ] + helper.append_op( + type='read', inputs={'Reader': [file_obj]}, outputs={'Out': out}) + if len(out) == 1: + return out[0] + else: + return out diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index bd79022a0c39cf18bd05d49ac62986d342a4ae06..35b01a79914b3427836d4abd51aa2e2eb471d517 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -16,10 +16,7 @@ import cStringIO import functools import warnings -from .. import proto - -framework_pb2 = proto.framework_pb2 - +from ..proto import framework_pb2 from ..framework import OpProtoHolder, Variable from ..layer_helper import LayerHelper diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 10b0405f47097fa3d83690e519ea878e082f68b9..679de6ce2aa67abe1322702fcb371eded0130698 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -39,6 +39,8 @@ __all__ = [ 'sequence_conv', 'conv2d', 'sequence_pool', + 'sequence_softmax', + 'softmax', 'pool2d', 'batch_norm', 'beam_search_decode', @@ -49,6 +51,7 @@ __all__ = [ 'reduce_mean', 'reduce_max', 'reduce_min', + 'reduce_prod', 'sequence_first_step', 'sequence_last_step', 'dropout', @@ -70,6 +73,7 @@ __all__ = [ 'smooth_l1', 'one_hot', 'autoincreased_step_counter', + 'lod_reset', ] @@ -78,19 +82,19 @@ def fc(input, num_flatten_dims=1, param_attr=None, bias_attr=None, + use_mkldnn=False, act=None, name=None): """ **Fully Connected Layer** The fully connected layer can take multiple tensors as its inputs. It - creates a variable (one for each input tensor) called weights for each - input tensor, which represents a fully connected weight matrix from - each input unit to each output unit. The fully connected layer - multiplies each input tensor with its coresponding weight to produce - an output Tensor. If multiple input tensors are given, the results of - multiple multiplications will be sumed up. If bias_attr is not None, - a biases variable will be created and added to the output. Finally, + creates a variable called weights for each input tensor, which represents + a fully connected weight matrix from each input unit to each output unit. + The fully connected layer multiplies each input tensor with its coresponding + weight to produce an output Tensor. If multiple input tensors are given, + the results of multiple multiplications will be sumed up. If bias_attr is + not None, a bias variable will be created and added to the output. Finally, if activation is not None, it will be applied to the output as well. This process can be formulated as follows: @@ -109,44 +113,27 @@ def fc(input, * :math:`Out`: The output tensor. Args: - input(Variable|list): The input tensor(s) to the fully connected layer. - size(int): The number of output units in the fully connected layer. - num_flatten_dims(int): The fc layer can accept an input tensor with more - than two dimensions. If this happens, the - multidimensional tensor will first be flattened - into a 2-dimensional matrix. The parameter - `num_flatten_dims` determines how the input tensor - is flattened: the first `num_flatten_dims` - (inclusive, index starts from 1) dimensions will - be flatten to form the first dimension of the - final matrix (height of the matrix), and the rest - `rank(X) - num_flatten_dims` dimensions are - flattened to form the second dimension of the - final matrix (width of the matrix). For example, - suppose `X` is a 6-dimensional tensor with a shape - [2, 3, 4, 5, 6], and `num_flatten_dims` = 3. Then, - the flattened matrix will have a shape - [2 x 3 x 4, 5 x 6] = [24, 30]. By default, - `num_flatten_dims` is set to 1. - param_attr(ParamAttr|list): The parameter attribute for learnable - parameters/weights of the fully connected - layer. - param_initializer(ParamAttr|list): The initializer used for the - weight/parameter. If set None, - XavierInitializer() will be used. - bias_attr(ParamAttr|list): The parameter attribute for the bias parameter - for this layer. If set None, no bias will be - added to the output units. - bias_initializer(ParamAttr|list): The initializer used for the bias. - If set None, then ConstantInitializer() - will be used. - act(str): Activation to be applied to the output of the fully connected - layer. - name(str): Name/alias of the fully connected layer. - + input (Variable|list of Variable): The input tensor(s) of this layer, and the dimension of + the input tensor(s) is at least 2. + size(int): The number of output units in this layer. + num_flatten_dims (int, default 1): The fc layer can accept an input tensor with more than + two dimensions. If this happens, the multidimensional tensor will first be flattened + into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input + tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1) + dimensions will be flatten to form the first dimension of the final matrix (height of + the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to + form the second dimension of the final matrix (width of the matrix). For example, suppose + `X` is a 6-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3. + Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. + param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable + parameters/weights of this layer. + bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias + of this layer. If it is set to None, no bias will be added to the output units. + act (str, default None): Activation to be applied to the output of this layer. + name (str, default None): The name of this layer. Returns: - Variable: The output tensor variable. + A tensor variable storing the transformation result. Raises: ValueError: If rank of the input tensor is less than 2. @@ -177,8 +164,11 @@ def fc(input, inputs={"X": input_var, "Y": w}, outputs={"Out": tmp}, - attrs={"x_num_col_dims": num_flatten_dims, - "y_num_col_dims": 1}) + attrs={ + "x_num_col_dims": num_flatten_dims, + "y_num_col_dims": 1, + 'use_mkldnn': use_mkldnn + }) mul_results.append(tmp) # sum @@ -1102,17 +1092,43 @@ def sequence_conv(input, return helper.append_activation(pre_act) +def sequence_softmax(input, param_attr=None, bias_attr=None, use_cudnn=True): + helper = LayerHelper('sequence_softmax', **locals()) + dtype = helper.input_dtype() + softmax_out = helper.create_tmp_variable(dtype) + helper.append_op( + type="sequence_softmax", + inputs={"X": input}, + outputs={"Out": softmax_out}, + attrs={"use_cudnn": use_cudnn}) + return softmax_out + + +def softmax(input, param_attr=None, bias_attr=None, use_cudnn=True): + helper = LayerHelper('softmax', **locals()) + dtype = helper.input_dtype() + softmax_out = helper.create_tmp_variable(dtype) + helper.append_op( + type="softmax", + inputs={"X": input}, + outputs={"Out": softmax_out}, + attrs={"use_cudnn": use_cudnn}) + return softmax_out + + def conv2d(input, num_filters, filter_size, stride=1, padding=0, + dilation=1, groups=None, param_attr=None, bias_attr=None, use_cudnn=True, use_mkldnn=False, - act=None): + act=None, + name=None): """ **Convlution2D Layer** @@ -1173,6 +1189,9 @@ def conv2d(input, padding(int|tuple): The padding size. If padding is a tuple, it must contain two integers, (padding_H, padding_W). Otherwise, the padding_H = padding_W = padding. Default: padding = 0. + dilation(int|tuple): The dilation size. If dilation is a tuple, it must + contain two integers, (dilation_H, dilation_W). Otherwise, the + dilation_H = dilation_W = dilation. Default: dilation = 1. groups(int): The groups number of the Conv2d Layer. According to grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2, the first half of the filters is only connected to the first half @@ -1183,6 +1202,8 @@ def conv2d(input, use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn library is installed. Default: True act(str): Activation type. Default: None + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: The tensor variable storing the convolution and \ @@ -1223,6 +1244,7 @@ def conv2d(input, filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') stride = utils.convert_to_list(stride, 2, 'stride') padding = utils.convert_to_list(padding, 2, 'padding') + dilation = utils.convert_to_list(dilation, 2, 'dilation') if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") @@ -1252,6 +1274,7 @@ def conv2d(input, attrs={ 'strides': stride, 'paddings': padding, + 'dilations': dilation, 'groups': groups, 'use_cudnn': use_cudnn, 'use_mkldnn': use_mkldnn @@ -1660,7 +1683,9 @@ def conv2d_transpose(input, stride=1, dilation=1, param_attr=None, + bias_attr=None, use_cudnn=True, + act=None, name=None): """ **Convlution2D transpose layer** @@ -1729,8 +1754,10 @@ def conv2d_transpose(input, dilation_H = dilation_W = dilation. Default: dilation = 1. param_attr(ParamAttr): The parameters to the Conv2d_transpose Layer. Default: None + bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn library is installed. Default: True + act(str): Activation type. Default: None name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. @@ -1783,12 +1810,12 @@ def conv2d_transpose(input, img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) - out = helper.create_tmp_variable(dtype=input.dtype) + pre_bias = helper.create_tmp_variable(dtype=input.dtype) helper.append_op( type='conv2d_transpose', inputs={'Input': [input], 'Filter': [img_filter]}, - outputs={'Output': out}, + outputs={'Output': pre_bias}, attrs={ 'strides': stride, 'paddings': padding, @@ -1796,55 +1823,57 @@ def conv2d_transpose(input, 'use_cudnn': use_cudnn }) + pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) + out = helper.append_activation(pre_act) return out -def sequence_expand(x, y, name=None): +def sequence_expand(x, y, ref_level=-1, name=None): """Sequence Expand Layer. This layer will expand the input variable **x** - according to LoD information of **y**. And the following examples will - explain how sequence_expand works: + according to specified level lod of **y**. Please note that lod level of + **x** is at most 1 and rank of **x** is at least 2. When rank of **x** + is greater than 2, then it would be viewed as a 2-D tensor. + Following examples will explain how sequence_expand works: .. code-block:: text * Case 1 x is a LoDTensor: - x.lod = [[0, 2, 3], - [0, 1, 3, 4]] - x.data = [a, b, c, d] + x.lod = [[0, 2, 4]] + x.data = [[a], [b], [c], [d]] x.dims = [4, 1] y is a LoDTensor: y.lod = [[0, 2, 4], [0, 3, 6, 7, 8]] - with condition len(y.lod[-1]) - 1 == x.dims[0] + ref_level: 0 - then output is a 2-level LoDTensor: - out.lod = [[0, 2, 4], - [0, 3, 6, 7, 8]] - out.data = [a, a, a, b, b, b, c, d] + then output is a 1-level LoDTensor: + out.lod = [[0, 2, 4, 6, 8]] + out.data = [[a], [b], [a], [b], [c], [d], [c], [d]] out.dims = [8, 1] * Case 2 x is a Tensor: - x.data = [a, b, c] + x.data = [[a], [b], [c]] x.dims = [3, 1] y is a LoDTensor: - y.lod = [[0, 2, 3, 6]] - - with condition len(y.lod[-1]) - 1 == x.dims[0] + y.lod = [[0, 2, 2, 5]] - then output is a 1-level LoDTensor: - out.lod = [[0, 2, 3, 6]] - out.data = [a, a, b, c, c, c] - out.dims = [6, 1] + ref_level: -1 + then output is a Tensor: + out.data = [[a], [a], [c], [c], [c]] + out.dims = [5, 1] Args: x (Variable): The input variable which is a Tensor or LoDTensor. y (Variable): The input variable which is a LoDTensor. + ref_level (int): Lod level of `y` to be referred by `x`. If set to -1, + refer the last level of lod. name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + will be named automatically. Returns: Variable: The expanded variable which is a LoDTensor. @@ -1855,14 +1884,17 @@ def sequence_expand(x, y, name=None): x = fluid.layers.data(name='x', shape=[10], dtype='float32') y = fluid.layers.data(name='y', shape=[10, 20], dtype='float32', lod_level=1) - out = layers.sequence_expand(x=x, y=y) + out = layers.sequence_expand(x=x, y=y, ref_level=0) """ helper = LayerHelper('sequence_expand', input=x, **locals()) dtype = helper.input_dtype() tmp = helper.create_tmp_variable(dtype) helper.append_op( - type='sequence_expand', inputs={'X': x, - 'Y': y}, outputs={'Out': tmp}) + type='sequence_expand', + inputs={'X': x, + 'Y': y}, + outputs={'Out': tmp}, + attrs={'ref_level': ref_level}) return tmp @@ -2202,6 +2234,53 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): return out +def reduce_prod(input, dim=None, keep_dim=False, name=None): + """ + Computes the product of tensor elements over the given dimension. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + dim (int|None): The dimension along which the product is performed. If + :attr:`None`, multipy all elements of :attr:`input` and return a + Tensor variable with a single element, otherwise must be in the + range :math:`[-rank(input), rank(input))`. If :math:`dim < 0`, + the dimension to reduce is :math:`rank + dim`. + keep_dim (bool|False): Whether to reserve the reduced dimension in the + output Tensor. The result tensor will have one fewer dimension + than the :attr:`input` unless :attr:`keep_dim` is true. + name(str|None): A name for this layer(optional). If set None, the + layer will be named automatically. + + Returns: + Variable: The reduced Tensor variable. + + Examples: + .. code-block:: python + + # x is a Tensor variable with following elements: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_prod(x) # [0.0002268] + fluid.layers.reduce_prod(x, dim=0) # [0.02, 0.06, 0.3, 0.63] + fluid.layers.reduce_prod(x, dim=-1) # [0.027, 0.0084] + fluid.layers.reduce_prod(x, dim=1, + keep_dim=True) # [[0.027], [0.0084]] + """ + helper = LayerHelper('reduce_prod', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='reduce_prod', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None else 0, + 'keep_dim': keep_dim, + 'reduce_all': True if dim == None else False + }) + return out + + def split(input, num_or_sections, dim=-1, name=None): """ Split the input tensor into multiple sub-tensors. @@ -3236,3 +3315,98 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): counter.stop_gradient = True return counter + + +def lod_reset(x, y=None, target_lod=None): + """ + LoD Reset Operator. Set LoD of **x** to a new one specified by **y** or + **target_lod**. When **y** provided, **y.lod** would be considered as target + LoD first, otherwise **y.data** would be considered as target LoD. If **y** + is not provided, target LoD should be specified by **target_lod**. + If target LoD is specified by **Y.data** or **target_lod**, only one level + LoD is supported. + + .. code-block:: text + + * Example 1: + + Given a 1-level LoDTensor x: + x.lod = [[ 0, 2, 5 6 ]] + x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + x.dims = [6, 1] + + target_lod: [0, 4, 6] + + then we get a 1-level LoDTensor: + out.lod = [[ 0, 4, 6 ]] + out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + out.dims = [6, 1] + + * Example 2: + + Given a 1-level LoDTensor x: + x.lod = [[ 0, 2, 5 6 ]] + x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + x.dims = [6, 1] + + y is a Tensor: + y.data = [[0, 2, 6]] + y.dims = [1, 3] + + then we get a 1-level LoDTensor: + out.lod = [[ 0, 2, 6 ]] + out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + out.dims = [6, 1] + + * Example 3: + + Given a 1-level LoDTensor x: + x.lod = [[ 0, 2, 5 6 ]] + x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + x.dims = [6, 1] + + y is a 2-level LoDTensor: + y.lod = [[0, 2, 4], [0, 2, 5, 6]] + y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]] + y.dims = [6, 1] + + then we get a 2-level LoDTensor: + out.lod = [[0, 2, 4], [0, 2, 5, 6]] + out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] + out.dims = [6, 1] + + Args: + x (Variable): Input variable which could be a Tensor or LodTensor. + y (Variable|None): If provided, output's LoD would be derived from y. + target_lod (list|tuple|None): One level LoD which should be considered + as target LoD when y not provided. + + Returns: + Variable: Output variable with LoD specified by this operator. + + Raises: + ValueError: If y and target_lod are both None. + + Examples: + .. code-block:: python + + x = layers.data(name='x', shape=[10]) + y = layers.data(name='y', shape=[10, 20], lod_level=2) + out = layers.lod_reset(x=x, y=y) + """ + helper = LayerHelper("lod_reset", **locals()) + out = helper.create_tmp_variable(dtype=x.dtype) + if y is not None: + helper.append_op( + type="lod_reset", inputs={'X': x, + 'Y': y}, outputs={'Out': out}) + elif target_lod is not None: + helper.append_op( + type="lod_reset", + inputs={'X': x}, + attrs={'target_lod': target_lod}, + outputs={'Out': out}) + else: + raise ValueError("y and target_lod should not be both None.") + + return out diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 0b88b639629ac73b16ec36aa5930c3d6a9665943..f5c6b47d243dcf4ba985cfb41fc23b44d3ed809f 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -59,8 +59,6 @@ __all__ = [ 'elementwise_pow', 'clip', 'clip_by_norm', - 'softmax', - 'sequence_softmax', 'logical_and', 'logical_or', 'logical_xor', @@ -70,6 +68,8 @@ __all__ = [ 'gaussian_random', 'gaussian_random_batch_size_like', 'cumsum', + 'scatter', + 'sum', ] + __activations__ for _OP in set(__all__): diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 421963a2f9120dae3a72142681f0a30232c11166..180575c35dc6e115e11cccf9fff9fb2d3cd7e9a6 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -13,7 +13,7 @@ # limitations under the License. from collections import defaultdict - +from paddle.fluid.framework import Program import framework import layers from backward import append_backward @@ -23,8 +23,12 @@ from initializer import Constant from layer_helper import LayerHelper from regularizer import append_regularization_ops from clip import append_gradient_clip_ops, error_clip_callback +from contextlib import contextmanager -__all__ = ['SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad'] +__all__ = [ + 'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', + 'Adadelta', 'ModelAverage' +] class Optimizer(object): @@ -119,7 +123,12 @@ class Optimizer(object): """ pass - def _add_accumulator(self, name, param, dtype=None, fill_value=0.0): + def _add_accumulator(self, + name, + param, + dtype=None, + fill_value=0.0, + shape=None): """Utility function to add an accumulator for a parameter Args: @@ -133,17 +142,19 @@ class Optimizer(object): param.name in self._accumulators[name]): raise Exception("Accumulator {} already exists for parameter {}". format(name, param.name)) - + if shape == None: + shape = param.shape assert isinstance(self.helper, LayerHelper) var = self.helper.create_global_variable( name=unique_name.generate(name), persistable=True, dtype=dtype or param.dtype, type=param.type, - shape=param.shape) + shape=shape) self.helper.set_variable_initializer( var, initializer=Constant(value=float(fill_value))) self._accumulators[name][param.name] = var + return var def _get_accumulator(self, name, param): """Utility function to fetch an accumulator for a parameter @@ -223,6 +234,8 @@ class Optimizer(object): params_grads = append_backward(loss, parameter_list, no_grad_set, [error_clip_callback]) + params_grads = sorted(params_grads, key=lambda x: x[0].name) + params_grads = append_gradient_clip_ops(params_grads) # Add regularization if any @@ -578,6 +591,205 @@ class DecayedAdagradOptimizer(Optimizer): return decayed_adagrad_op +class AdadeltaOptimizer(Optimizer): + """ + **Adadelta Optimizer** + Simple Adadelta optimizer with average squared grad state and + average squared update state. + The details of adadelta please refer to this + `ADADELTA: AN ADAPTIVE LEARNING RATE METHOD + `_. + + .. math:: + + E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\ + learning\\_rate &= sqrt( ( E(dx_{t-1}^2) + \\epsilon ) / ( \\ + E(g_t^2) + \\epsilon ) ) \\\\ + E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\\_rate)^2 + + Args: + learning_rate(float): global leraning rate + rho(float): rho in equation + epsilon(float): epsilon in equation + + Examples: + .. code-block:: python + + optimizer = fluid.optimizer.Adadelta( + learning_rate=0.0003, epsilon=1.0e-6, rho=0.95) + _, params_grads = optimizer.minimize(cost) + """ + + _avg_squared_grad_acc_str = "_avg_squared_grad" + _avg_squared_update_acc_str = "_avg_squared_update" + + def __init__(self, learning_rate, epsilon=1.0e-6, rho=0.95, **kwargs): + if learning_rate is None: + raise ValueError("learning_rate is not set.") + if epsilon is None: + raise ValueError("epsilon is not set.") + if rho is None: + raise ValueError("rho is not set.") + super(AdadeltaOptimizer, self).__init__( + learning_rate=learning_rate, **kwargs) + self.type = "adadelta" + self._epsilon = epsilon + self._rho = rho + + def _create_accumulators(self, block, parameters): + if not isinstance(block, framework.Block): + raise TypeError("block is not instance of framework.Block.") + + for p in parameters: + self._add_accumulator(self._avg_squared_grad_acc_str, p) + self._add_accumulator(self._avg_squared_update_acc_str, p) + + def _append_optimize_op(self, block, param_and_grad): + if not isinstance(block, framework.Block): + raise TypeError("block is not instance of framework.Block.") + + avg_squared_grad_acc = self._get_accumulator( + self._avg_squared_grad_acc_str, param_and_grad[0]) + avg_squared_update_acc = self._get_accumulator( + self._avg_squared_update_acc_str, param_and_grad[0]) + + # Create the adadelta optimizer op + adadelta_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "AvgSquaredGrad": avg_squared_grad_acc, + "AvgSquaredUpdate": avg_squared_update_acc + }, + outputs={ + "ParamOut": param_and_grad[0], + "AvgSquaredGradOut": avg_squared_grad_acc, + "AvgSquaredUpdateOut": avg_squared_update_acc + }, + attrs={"epsilon": self._epsilon, + "rho": self._rho}) + + return adadelta_op + + +class RMSPropOptimizer(Optimizer): + """ + Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning + rate method. The original slides proposed RMSProp: Slide 29 of + http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf . + + The original equation is as follows: + + .. math:: + + r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\ + + w & = w - \\frac{\\eta} {\\sqrt{r(w,t) + \\epsilon}} \\nabla Q_{i}(w) + + The first equation calculates moving average of the squared gradient for + each weight. Then dividing the gradient by :math: `sqrt{v(w,t)}`. + + In some cases, adding a momentum term :math: `\\beta` is beneficial. + In our implementation, Nesterov momentum is used: + + .. math:: + + r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\ + + v(w, t) & = \\beta v(w, t-1) + \\frac{\\eta} {\\sqrt{v(w,t) + + \\epsilon}} \\nabla Q_{i}(w) + + w & = w - v(w, t) + + where, :math: `\\rho` is a hyperparameter and typical values are 0.9, 0.95 + and so on. :math: `beta` is the momentum term. :math: `\\epsilon` is a + smoothing term to avoid division by zero, usually set somewhere in range + from 1e-4 to 1e-8. + + + Args: + learning_rate(float): global leraning rate. + rho(float): rho is :math: `\\rho` in equation, set 0.95 by default. + epsilon(float): :math: `\\epsilon` in equation is smoothing term to + avoid division by zero, set 1e-6 by default. + momentum(float): :math: `\\beta` in equation is the momentum term, + set 0.0 by default. + + Raises: + ValueError: If learning_rate, rho, epsilon, momentum are None. + + Examples: + .. code-block:: python + + optimizer = fluid.optimizer.RMSProp(0.0001) + _, params_grads = optimizer.minimize(cost) + """ + + _momentum_acc_str = "momentum" + _mean_square_acc_str = "mean_square" + + def __init__(self, + learning_rate, + rho=0.95, + epsilon=1.0e-6, + momentum=0.0, + **kwargs): + super(RMSPropOptimizer, self).__init__( + learning_rate=learning_rate, **kwargs) + if learning_rate is None: + raise ValueError("learning_rate is not set.") + if rho is None: + raise ValueError("rho is not set.") + if epsilon is None: + raise ValueError("epsilon is not set.") + if momentum is None: + raise ValueError("momentum is not set.") + + self.type = "rmsprop" + self._rho = rho + self._epsilon = epsilon + self._momentum = momentum + + def _create_accumulators(self, block, parameters): + if not isinstance(block, framework.Block): + raise TypeError("block is not instance of framework.Block.") + + for p in parameters: + self._add_accumulator(self._momentum_acc_str, p) + self._add_accumulator(self._mean_square_acc_str, p) + + def _append_optimize_op(self, block, param_and_grad): + if not isinstance(block, framework.Block): + raise TypeError("block is not instance of framework.Block.") + + momentum_acc = self._get_accumulator(self._momentum_acc_str, + param_and_grad[0]) + mean_square_acc = self._get_accumulator(self._mean_square_acc_str, + param_and_grad[0]) + rmsprop_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "Moment": momentum_acc, + "MeanSquare": mean_square_acc, + "LearningRate": self._create_param_lr(param_and_grad), + }, + outputs={ + "ParamOut": param_and_grad[0], + "MomentOut": momentum_acc, + "MeanSquareOut": mean_square_acc + }, + attrs={ + "epsilon": self._epsilon, + "decay": self._rho, + "momentum": self._momentum + }) + + return rmsprop_op + + # We short the class name, since users will use the optimizer with the package # name. The sample code: # @@ -592,3 +804,145 @@ Adagrad = AdagradOptimizer Adam = AdamOptimizer Adamax = AdamaxOptimizer DecayedAdagrad = DecayedAdagradOptimizer +Adadelta = AdadeltaOptimizer +RMSProp = RMSPropOptimizer + + +class ModelAverage(Optimizer): + """Accumulate the average of parameters whtin sliding window. The average + result will be saved in temporary variables which can be applied to + parameter variables of current model by calling 'apply()' method. And the + 'restore()' method is used to restored the parameter values of current model. + + The size of average window is determined by average_window_rate, + min_average_window, max_average_window and current update times. + + Args: + params_grads: A list of parameter-grad variable pairs. + average_window_rate: The rate of average window. + min_average_window: The minimum size of average window. + max_average_window: The maximum size of average window. + + Examples: + ... + optimizer = fluid.optimizer.Momentum() + _, params_grads = optimizer.minimize(cost) + model_average = fluid.optimizer.ModelAverage(params_grads, 0.15, + min_average_window=10000, + max_average_window=20000) + for pass_id in range(args.pass_num): + for data in train_reader(): + exe.run(fluid.default_main_program()...) + + with model_average.apply(exe): + for data in test_reader(): + exe.run(inference_program...) + """ + + def __init__(self, + params_grads, + average_window_rate, + min_average_window=10000, + max_average_window=10000, + **kwargs): + super(ModelAverage, self).__init__(0.0, **kwargs) + self.average_window = average_window_rate + self.min_average_window = min_average_window + self.max_average_window = max_average_window + self.params_grads = params_grads + for param, grad in self.params_grads: + if grad is not None: + self._append_average_accumulate_op(param) + + self.apply_program = Program() + block = self.apply_program.global_block() + with program_guard(main_program=self.apply_program): + for param_grad in self.params_grads: + if param_grad[1] is not None: + self._add_average_apply_op(block, param_grad) + + self.restore_program = Program() + block = self.restore_program.global_block() + with program_guard(main_program=self.restore_program): + for param_grad in self.params_grads: + if param_grad[1] is not None: + self._add_average_restore_op(block, param_grad) + + def _add_average_apply_op(self, block, param_grad): + param = block.clone_variable(param_grad[0]) + grad = block.clone_variable(param_grad[1]) + sum_1 = block.clone_variable(self._get_accumulator('sum_1', param)) + sum_2 = block.clone_variable(self._get_accumulator('sum_2', param)) + sum_3 = block.clone_variable(self._get_accumulator('sum_3', param)) + num_accumulates = block.clone_variable( + self._get_accumulator('num_accumulates', param)) + old_num_accumulates = block.clone_variable( + self._get_accumulator('old_num_accumulates', param)) + num_updates = block.clone_variable( + self._get_accumulator('num_updates', param)) + # backup param value to grad + layers.assign(input=param, output=grad) + # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) + tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) + sum = layers.sum(x=[sum_1, sum_2, sum_3]) + tmp = layers.cast(x=tmp, dtype='float32') + sum = layers.cast(x=sum, dtype='float32') + layers.elementwise_div(x=sum, y=tmp, out=param) + + def _add_average_restore_op(self, block, param_grad): + param = block.clone_variable(param_grad[0]) + grad = block.clone_variable(param_grad[1]) + layers.assign(input=grad, output=param) + + def _append_average_accumulate_op(self, param): + self.helper = LayerHelper("average_accumulate") + sum_1 = self._add_accumulator('sum_1', param) + sum_2 = self._add_accumulator('sum_2', param) + sum_3 = self._add_accumulator('sum_3', param) + num_accumulates = self._add_accumulator( + 'num_accumulates', param, dtype='int64', shape=[1]) + old_num_accumulates = self._add_accumulator( + 'old_num_accumulates', param, dtype='int64', shape=[1]) + num_updates = self._add_accumulator( + 'num_updates', param, dtype='int64', shape=[1]) + + self.helper.append_op( + type='average_accumulates', + inputs={ + "param": param, + "in_sum_1": sum_1, + "in_sum_2": sum_2, + "in_sum_3": sum_3, + "in_num_accumulates": num_accumulates, + "in_old_num_accumulates": old_num_accumulates, + "in_num_updates": num_updates + }, + outputs={ + "out_sum_1": sum_1, + "out_sum_2": sum_2, + "out_sum_3": sum_3, + "out_num_accumulates": num_accumulates, + "out_old_num_accumulates": old_num_accumulates, + "out_num_updates": num_updates, + }, + attrs={ + "average_window": self.average_window, + "min_average_window": self.min_average_window, + "max_average_window": self.max_average_window, + }) + + @contextmanager + def apply(self, executor, need_restore=True): + """Apply average values to parameters of current model. + """ + executor.run(self.apply_program) + try: + yield + finally: + if need_restore: + self.restore(executor) + + def restore(self, executor): + """Restore parameter values of current model. + """ + executor.run(self.restore_program) diff --git a/python/paddle/fluid/recordio_writer.py b/python/paddle/fluid/recordio_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..5accaacd5361165d30b92c71ae4fd62e23e44e07 --- /dev/null +++ b/python/paddle/fluid/recordio_writer.py @@ -0,0 +1,48 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import core +import contextlib + +__all__ = ['convert_reader_to_recordio_file'] + + +@contextlib.contextmanager +def create_recordio_writer(filename, + compressor=core.RecordIOWriter.Compressor.Snappy, + max_num_records=1000): + writer = core.RecordIOWriter(filename, compressor, max_num_records) + yield writer + writer.close() + + +def convert_reader_to_recordio_file( + filename, + reader_creator, + feeder, + compressor=core.RecordIOWriter.Compressor.Snappy, + max_num_records=1000, + feed_order=None): + if feed_order is None: + feed_order = feeder.feed_names + counter = 0 + with create_recordio_writer(filename, compressor, + max_num_records) as writer: + for batch in reader_creator(): + res = feeder.feed(batch) + for each in feed_order: + writer.append_tensor(res[each]) + writer.complete_append_tensor() + counter += 1 + return counter diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index a29f9a208ebefc75b531030c9f0de9487f2b136c..604c6f9ab36c2332223d1ba943d67113922615b3 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -13,6 +13,7 @@ # limitations under the License. import framework +from . import core __all__ = [ 'append_regularization_ops', @@ -43,16 +44,20 @@ def append_regularization_ops(parameters_and_grads, regularization=None): """ params_and_grads = [] for param, grad in parameters_and_grads: + # If no gradient then we don't need to do anything + if grad is None: + params_and_grads.append((param, grad)) + continue + regularization_term = None if param.regularizer is not None: # Add variable for regularization term in grad block - regularization_term = param.regularizer(param, grad.block) + regularization_term = param.regularizer(param, grad, grad.block) elif regularization is not None: - regularization_term = regularization(param, grad.block) + regularization_term = regularization(param, grad, grad.block) - # If no gradient or no regularization specified, - # then we don't need to do anything - if grad is None or regularization_term is None: + # If no regularization specified, then we don't need to do anything + if regularization_term is None: params_and_grads.append((param, grad)) continue @@ -82,7 +87,7 @@ class WeightDecayRegularizer(object): def __init__(self): pass - def __call__(self, param, block): + def __call__(self, param, grad, block): """Add corresponding weight decay operations to the network """ raise NotImplementedError() @@ -102,7 +107,7 @@ class L2DecayRegularizer(WeightDecayRegularizer): super(L2DecayRegularizer, self).__init__() self._regularization_coeff = regularization_coeff - def __call__(self, param, block): + def __call__(self, param, grad, block): """Add L2 weight decay ops to network Adds L2 weight decay ops. @@ -117,8 +122,23 @@ class L2DecayRegularizer(WeightDecayRegularizer): """ assert isinstance(param, framework.Parameter) assert isinstance(block, framework.Block) + decay = block.create_var( dtype="float32", shape=param.shape, lod_level=param.lod_level) + + if grad.type == core.VarDesc.VarType.SELECTED_ROWS: + decay = block.create_var( + dtype="float32", + shape=param.shape, + type=core.VarDesc.VarType.SELECTED_ROWS) + block.append_op( + type='lookup_table', + inputs={'W': param, + 'Ids': grad}, + outputs={'Out': decay}, + attrs={'is_sparse': True}) + param = decay + # Append Op to calculate decay block.append_op( type='scale', @@ -141,7 +161,7 @@ class L1DecayRegularizer(WeightDecayRegularizer): super(L1DecayRegularizer, self).__init__() self._regularization_coeff = regularization_coeff - def __call__(self, param, block): + def __call__(self, param, grad, block): """Add L1 weight decay ops to network Adds L1 weight decay ops. @@ -158,6 +178,19 @@ class L1DecayRegularizer(WeightDecayRegularizer): assert isinstance(block, framework.Block) decay = block.create_var( dtype="float32", shape=param.shape, lod_level=param.lod_level) + + if grad.type == core.VarDesc.VarType.SELECTED_ROWS: + decay = block.create_var( + dtype="float32", + shape=param.shape, + type=core.VarDesc.VarType.SELECTED_ROWS) + block.append_op( + type='lookup_table', + inputs={'W': param, + 'Ids': grad}, + outputs={'Out': decay}, + attrs={'is_sparse': True}) + # Append sign op block.append_op( type='sign', inputs={"X": param}, outputs={"Out": decay}) diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index caa9596a100de4f9364467690db1e80ee227c3c1..3a1a0859ecfd4ac5337e2112f8b22e32d8474f22 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -118,12 +118,12 @@ def decoder_decode(context, is_sparse): is_sparse=is_sparse) # use rnn unit to update rnn - current_state = pd.fc(input=[pre_ids_emb, pre_state_expanded], + current_state = pd.fc(input=[pre_state_expanded, pre_ids_emb], size=decoder_size, act='tanh') - + current_state_with_lod = pd.lod_reset(x=current_state, y=pre_score) # use score to do beam search - current_score = pd.fc(input=current_state, + current_score = pd.fc(input=current_state_with_lod, size=target_dict_dim, act='softmax') topk_scores, topk_indices = pd.topk(current_score, k=50) @@ -181,7 +181,10 @@ def train_main(use_cuda, is_sparse, is_local=True): cost = pd.cross_entropy(input=rnn_out, label=label) avg_cost = pd.mean(cost) - optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) + optimizer = fluid.optimizer.Adagrad( + learning_rate=1e-4, + regularization=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.1)) optimize_ops, params_grads = optimizer.minimize(avg_cost) train_data = paddle.batch( diff --git a/python/paddle/fluid/tests/test_concurrency.py b/python/paddle/fluid/tests/test_concurrency.py index 9f7bf63c5e017251e87af94690ff32c47c538c6b..924895a9afac610059bac5f617c49712441339cc 100644 --- a/python/paddle/fluid/tests/test_concurrency.py +++ b/python/paddle/fluid/tests/test_concurrency.py @@ -15,9 +15,9 @@ import unittest import paddle.fluid as fluid import paddle.fluid.core as core -from paddle.fluid import framework, unique_name +from paddle.fluid import framework, unique_name, layer_helper from paddle.fluid.executor import Executor -from paddle.fluid.layers import fill_constant +from paddle.fluid.layers import fill_constant, assign, While, elementwise_add, Print class TestRoutineOp(unittest.TestCase): @@ -86,8 +86,7 @@ class TestRoutineOp(unittest.TestCase): self.assertEqual(leftmost_data[0][0], n + 1) def _create_one_dim_tensor(self, value): - one_dim_tensor = fill_constant( - shape=[1], dtype=core.VarDesc.VarType.INT64, value=value) + one_dim_tensor = fill_constant(shape=[1], dtype='int', value=value) one_dim_tensor.stop_gradient = True return one_dim_tensor @@ -95,6 +94,180 @@ class TestRoutineOp(unittest.TestCase): return framework.default_main_program().current_block().create_var( name=unique_name.generate(name), type=type, dtype=dtype) + def _create_persistable_tensor(self, name, type, dtype): + return framework.default_main_program().current_block().create_var( + name=unique_name.generate(name), + type=type, + dtype=dtype, + persistable=True) + + def test_select(self): + with framework.program_guard(framework.Program()): + ch1 = fluid.make_channel( + dtype=core.VarDesc.VarType.LOD_TENSOR, capacity=1) + + result1 = self._create_tensor('return_value', + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.FP64) + + input_value = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.FP64, value=10) + + with fluid.Select() as select: + with select.case(fluid.channel_send, ch1, input_value): + # Execute something. + pass + + with select.default(): + pass + + # This should not block because we are using a buffered channel. + result1, status = fluid.channel_recv(ch1, result1) + fluid.channel_close(ch1) + + cpu = core.CPUPlace() + exe = Executor(cpu) + + result = exe.run(fetch_list=[result1]) + self.assertEqual(result[0][0], 10) + + def test_fibonacci(self): + """ + Mimics Fibonacci Go example: https://tour.golang.org/concurrency/5 + """ + with framework.program_guard(framework.Program()): + quit_ch_input_var = self._create_persistable_tensor( + 'quit_ch_input', core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.INT32) + quit_ch_input = fill_constant( + shape=[1], + dtype=core.VarDesc.VarType.INT32, + value=0, + out=quit_ch_input_var) + + result = self._create_persistable_tensor( + 'result', core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.INT32) + fill_constant( + shape=[1], + dtype=core.VarDesc.VarType.INT32, + value=0, + out=result) + + x = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) + y = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.INT32, value=1) + + while_cond = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.BOOL, value=True) + + while_false = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.BOOL, value=False) + + x_tmp = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) + + def fibonacci(channel, quit_channel): + while_op = While(cond=while_cond) + with while_op.block(): + result2 = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) + x_to_send_tmp = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.INT32, value=0) + + # TODO(abhinav): Need to perform copy when doing a channel send. + # Once this is complete, we can remove these lines + assign(input=x, output=x_to_send_tmp) + + with fluid.Select() as select: + with select.case(fluid.channel_send, channel, + x_to_send_tmp): + assign(input=x, output=x_tmp) + assign(input=y, output=x) + assign(elementwise_add(x=x_tmp, y=y), output=y) + + with select.case(fluid.channel_recv, quit_channel, + result2): + # Quit + helper = layer_helper.LayerHelper('assign') + helper.append_op( + type='assign', + inputs={'X': [while_false]}, + outputs={'Out': [while_cond]}) + + ch1 = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) + quit_ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) + + with fluid.Go(): + for i in xrange(10): + fluid.channel_recv(ch1, result) + Print(result) + + fluid.channel_send(quit_ch, quit_ch_input) + + fibonacci(ch1, quit_ch) + + fluid.channel_close(ch1) + fluid.channel_close(quit_ch) + + cpu = core.CPUPlace() + exe = Executor(cpu) + + exe_result = exe.run(fetch_list=[result]) + self.assertEqual(exe_result[0][0], 34) + + def test_ping_pong(self): + """ + Mimics Ping Pong example: https://gobyexample.com/channel-directions + """ + with framework.program_guard(framework.Program()): + result = self._create_tensor('return_value', + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.FP64) + + ping_result = self._create_tensor('ping_return_value', + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.FP64) + + pong_result = self._create_tensor('pong_return_value', + core.VarDesc.VarType.LOD_TENSOR, + core.VarDesc.VarType.FP64) + + def ping(ch, message): + message_to_send_tmp = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.FP64, value=0) + + assign(input=message, output=message_to_send_tmp) + fluid.channel_send(ch, message_to_send_tmp) + + def pong(ch1, ch2): + fluid.channel_recv(ch1, ping_result) + assign(input=ping_result, output=pong_result) + fluid.channel_send(ch2, pong_result) + + pings = fluid.make_channel( + dtype=core.VarDesc.VarType.LOD_TENSOR, capacity=1) + pongs = fluid.make_channel( + dtype=core.VarDesc.VarType.LOD_TENSOR, capacity=1) + + msg = fill_constant( + shape=[1], dtype=core.VarDesc.VarType.FP64, value=9) + + ping(pings, msg) + pong(pings, pongs) + + fluid.channel_recv(pongs, result) + + fluid.channel_close(pings) + fluid.channel_close(pongs) + + cpu = core.CPUPlace() + exe = Executor(cpu) + + exe_result = exe.run(fetch_list=[result]) + self.assertEqual(exe_result[0][0], 9) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/.gitignore b/python/paddle/fluid/tests/unittests/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ad02bdecf436bba925e2e3b7efb20c878df70dfd --- /dev/null +++ b/python/paddle/fluid/tests/unittests/.gitignore @@ -0,0 +1,4 @@ +mnist.recordio +mnist_0.recordio +mnist_1.recordio +mnist_2.recordio diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index f96c2ca4f0593b6c2624d449304f23425c69ab93..0ad273c7161977e18f91f952fd3a9dc144bf73f0 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -11,7 +11,6 @@ list(REMOVE_ITEM TEST_OPS test_lstm_unit_op) # # FIXME(qijun) https://github.com list(REMOVE_ITEM TEST_OPS test_nce) # IXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/7778 list(REMOVE_ITEM TEST_OPS test_recurrent_op) # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/6152 list(REMOVE_ITEM TEST_OPS test_cond_op) # FIXME(qijun): https://github.com/PaddlePaddle/Paddle/issues/5101#issuecomment-339814957 -list(REMOVE_ITEM TEST_OPS test_detection_output_op) # FIXME: detection_output_op will be rewritten. This unittest should be list(REMOVE_ITEM TEST_OPS op_test) # op_test is a helper python file, not a test list(REMOVE_ITEM TEST_OPS decorators) # decorators is a helper python file, not a test diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index f7e02595ec3b41ae7bb32353c258736968ca78d4..8393f7827b1c7d361ebea72f2cfc6033268772f0 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -469,6 +469,28 @@ class OpTest(unittest.TestCase): tensor.set_lod(lod) return tensor + @staticmethod + def np_dtype_to_fluid_dtype(input): + """Change the dtype of float16 numpy array + + numpy float16 is binded to paddle::platform::float16 + in tensor_py.h via the help of uint16 data type since + the internal memory representation of float16 is + uint16_t in paddle and np.uint16 in numpy, which are + themselves binded together by pybind. + + Args: + input: input numpy array + + Returns: + input: The dtype of input will be changed to np.uint16 if + it is originally np.float16, such that the internal memory + of input will be reinterpreted as of dtype np.uint16. + """ + if input.dtype == np.float16: + input.dtype = np.uint16 + return input + def _get_gradient(self, input_to_check, place, output_names, no_grad_set): prog = Program() block = prog.global_block() diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index eab41ebe711bd21bdc3b34ca83ab57388cc35ba2..1e3decfbaf0691e912b96b415b68353e626cf51e 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -14,6 +14,7 @@ import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest from scipy.special import expit @@ -212,18 +213,39 @@ class TestRound(OpTest): class TestRelu(OpTest): def setUp(self): self.op_type = "relu" - x = np.random.uniform(-1, 1, [11, 17]).astype("float32") + self.dtype = np.float32 + self.init_dtype() + + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) # The same reason with TestAbs x[np.abs(x) < 0.005] = 0.02 - self.inputs = {'X': x} - self.outputs = {'Out': np.maximum(self.inputs['X'], 0)} + out = np.maximum(x, 0) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} def test_check_output(self): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return self.check_grad(['X'], 'Out', max_relative_error=0.007) + def init_dtype(self): + pass + + +class TestFP16Relu(TestRelu): + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + class TestBRelu(OpTest): def setUp(self): diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 80e6fa6df3c21aa19feb571916f11c41ccd6bb10..10aa63e18a6eeaa44e5b12f7532998dca2bc5e9f 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -31,6 +31,37 @@ def get_backward_op(scope, op, no_grad_set): return backward_op +def _reference_testing(x, scale, offset, mean, var, epsilon, data_format): + x_shape = x.shape + if len(x_shape) == 2: + if data_format == "NCHW": + x = np.reshape(x, (x.shape[0], x.shape[1], 1, 1)) + else: + x = np.reshape(x, (x.shape[0], 1, 1, x.shape[1])) + + if data_format == "NCHW": + n, c, h, w = x.shape + mean_tile = np.reshape(mean, (1, c, 1, 1)) + mean_tile = np.tile(mean_tile, (n, 1, h, w)) + var_tile = np.reshape(var, (1, c, 1, 1)) + var_tile = np.tile(var_tile, (n, 1, h, w)) + normalized = (x - mean_tile) / np.sqrt(var_tile + epsilon) + scale_tile = np.reshape(scale, (1, c, 1, 1)) + scale_tile = np.tile(scale_tile, (n, 1, h, w)) + offset_tile = np.reshape(offset, (1, c, 1, 1)) + offset_tile = np.reshape(offset_tile, (1, c, 1, 1)) + y = normalized * scale_tile + offset_tile + elif data_format == "NHWC": + normalized = (x - mean) / np.sqrt(var + epsilon) + y = normalized * scale + offset + else: + raise ValueError("Unknown data order.") + + if len(x_shape) == 2: + y = np.reshape(y, x_shape) + return y + + def _reference_training(x, scale, offset, epsilon, data_format): x_shape = x.shape if len(x_shape) == 2: @@ -155,11 +186,159 @@ def set_output_grad(scope, outputs, place, feed_dict=None): __set_tensor__(output, data) -class TestBatchNormOp(OpTest): +class TestBatchNormOpInference(OpTest): + def setUp(self): + self.dtype = np.float32 + def __assert_close(self, tensor, np_array, msg, atol=1e-4): self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) - def test_python(self): + def check_with_place(self, place, data_layout, dtype, shape): + epsilon = 0.00001 + if len(shape) == 2: + x_shape = shape + c = x_shape[1] + else: + n, h, w, c = shape[0], shape[1], shape[2], shape[3] + if data_layout == "NHWC": + x_shape = [n, h, w, c] + elif data_layout == "NCHW": + x_shape = [n, c, h, w] + else: + raise ValueError("Unknown data layout.") + scale_shape = [c] + + x_val = np.random.random_sample(x_shape).astype(dtype) + scale_val = np.random.random_sample(scale_shape).astype(np.float32) + bias_val = np.random.random_sample(scale_shape).astype(np.float32) + + mean = np.zeros(scale_shape).astype(np.float32) + variance = np.ones(scale_shape).astype(np.float32) + + y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance, + epsilon, data_layout).astype(dtype) + + scope = core.Scope() + + # create input + x_tensor = create_or_get_tensor(scope, "x_val", + OpTest.np_dtype_to_fluid_dtype(x_val), + place) + scale_tensor = create_or_get_tensor( + scope, "scale_val", + OpTest.np_dtype_to_fluid_dtype(scale_val), place) + bias_tensor = create_or_get_tensor( + scope, "bias_val", OpTest.np_dtype_to_fluid_dtype(bias_val), place) + mean_tensor = create_or_get_tensor(scope, "mean", + OpTest.np_dtype_to_fluid_dtype(mean), + place) + variance_tensor = create_or_get_tensor( + scope, "variance", OpTest.np_dtype_to_fluid_dtype(variance), place) + + # create output + y_tensor = create_or_get_tensor(scope, "y_out", None, place) + saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None, + place) + saved_variance_tensor = create_or_get_tensor(scope, "saved_variance", + None, place) + mean_out_tensor = mean_tensor + variance_out_tensor = variance_tensor + + batch_norm_op = Operator( + "batch_norm", + # inputs + X="x_val", + Scale="scale_val", + Bias="bias_val", + Mean="mean", + Variance="variance", + # outputs + Y="y_out", + MeanOut="mean", + VarianceOut="variance", + SavedMean="saved_mean", + SavedVariance="saved_variance", + # attrs + is_test=True, + data_layout=data_layout, + epsilon=epsilon) + + batch_norm_op.run(scope, place) + + # check inference result + self.__assert_close( + y_tensor, + y_out, + "inference output are different at " + str(place) + ", " + + data_layout + ", " + str(np.dtype(dtype)) + + str(np.array(y_tensor)) + str(y_out), + atol=1e-3) + + def test_check_output(self): + places = [core.CPUPlace()] + if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"): + places.append(core.CUDAPlace(0)) + + for place in places: + for data_format in ["NCHW", "NHWC"]: + self.check_with_place(place, data_format, self.dtype, + [2, 3, 4, 5]) + self.check_with_place(place, data_format, self.dtype, [2, 3]) + + +class TestFP16BatchNormOpInference(TestBatchNormOpInference): + def setUp(self): + self.dtype = np.float16 + + def test_check_output(self): + places = [] + if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + places.append(place) + + for place in places: + for data_format in ["NCHW", "NHWC"]: + self.check_with_place(place, data_format, self.dtype, + [2, 3, 4, 5]) + self.check_with_place(place, data_format, self.dtype, [2, 3]) + + +class TestBatchNormOpTraining(OpTest): + def __assert_close(self, tensor, np_array, msg, atol=1e-4): + self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + + def test_python_testing(self): + data_format = "NHWC" + epsilon = 0.00001 + + n, h, w, c = 2, 3, 4, 5 + x_shape = [n, h, w, c] + scale_shape = [c] + + x_val = np.random.random_sample(x_shape).astype(np.float32) + scale_val = np.random.random_sample(scale_shape).astype(np.float32) + bias_val = np.random.random_sample(scale_shape).astype(np.float32) + + mean = np.zeros(scale_shape).astype(np.float32) + variance = np.ones(scale_shape).astype(np.float32) + + y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance, + epsilon, "NHWC") + + # running N, C, H, W case + # should produce the same results + x_shape2 = [n, c, h, w] + x_val2 = np.transpose(x_val, (0, 3, 1, 2)) + y_out2 = _reference_testing(x_val2, scale_val, bias_val, mean, variance, + epsilon, "NCHW") + + # transfer (N, C, H, W) back to (N, H, W, C) + y_out2_trans = np.transpose(y_out2, (0, 2, 3, 1)) + self.__assert_close(y_out, y_out2_trans, "inference output") + print 'python: NHWC, NCHW, inference checking passed' + + def test_python_training(self): data_format = "NHWC" epsilon = 0.00001 momentum = 0.9 @@ -197,7 +376,7 @@ class TestBatchNormOp(OpTest): # transfer (N, C, H, W) back to (N, H, W, C) y_out2_trans = np.transpose(y_out2, (0, 2, 3, 1)) - self.__assert_close(y_out, y_out2_trans, "batch variance") + self.__assert_close(y_out, y_out2_trans, "batch output") print 'python: NHWC, NCHW, forward checking passed' # test backward now diff --git a/python/paddle/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py index 8fb8d03828393ccfe57c0848d79b960c641ad39a..b8d3ed3aa3eb0e47e79f46cdf681a3b9cca46036 100644 --- a/python/paddle/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/fluid/tests/unittests/test_cast_op.py @@ -18,7 +18,7 @@ import numpy as np import paddle.fluid.core as core -class TestCastOp(op_test.OpTest): +class TestCastOp1(op_test.OpTest): def setUp(self): ipt = np.random.random(size=[10, 10]) self.inputs = {'X': ipt.astype('float32')} @@ -36,5 +36,36 @@ class TestCastOp(op_test.OpTest): self.check_grad(['X'], ['Out']) +class TestCastOp2(op_test.OpTest): + def setUp(self): + ipt = np.random.random(size=[10, 10]) + # numpy float16 is binded to fluid float16 via uint16 + self.inputs = {'X': ipt.astype('float16').view(np.uint16)} + self.outputs = {'Out': ipt.astype('float32')} + self.attrs = { + 'in_dtype': int(core.VarDesc.VarType.FP16), + 'out_dtype': int(core.VarDesc.VarType.FP32) + } + self.op_type = 'cast' + + def test_check_output(self): + self.check_output(atol=1e-3) + + +class TestCastOp3(op_test.OpTest): + def setUp(self): + ipt = np.random.random(size=[10, 10]) + self.inputs = {'X': ipt.astype('float32')} + self.outputs = {'Out': ipt.astype('float16')} + self.attrs = { + 'in_dtype': int(core.VarDesc.VarType.FP32), + 'out_dtype': int(core.VarDesc.VarType.FP16) + } + self.op_type = 'cast' + + def test_check_output(self): + self.check_output(atol=1e-3) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index a49fecf09509f7b1d9f758eebcf90bf9fbf7669f..4b6e3fb69a12095c77f343515fe3b6d1f3fccb14 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -63,9 +63,11 @@ def conv2d_forward_naive(input, filter, group, conv_param): class TestConv2dOp(OpTest): def setUp(self): + self.op_type = "conv2d" self.use_cudnn = False self.use_mkldnn = False - self.init_op_type() + self.dtype = np.float32 + self.init_kernel_type() self.init_group() self.init_dilation() self.init_test_case() @@ -75,12 +77,16 @@ class TestConv2dOp(OpTest): 'pad': self.pad, 'dilation': self.dilations } - input = np.random.random(self.input_size).astype("float32") - filter = np.random.random(self.filter_size).astype("float32") + + input = np.random.random(self.input_size).astype(self.dtype) + filter = np.random.random(self.filter_size).astype(self.dtype) output = conv2d_forward_naive(input, filter, self.groups, - conv2d_param).astype('float32') + conv2d_param).astype(self.dtype) - self.inputs = {'Input': input, 'Filter': filter} + self.inputs = { + 'Input': OpTest.np_dtype_to_fluid_dtype(input), + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + } self.attrs = { 'strides': self.stride, 'paddings': self.pad, @@ -99,6 +105,8 @@ class TestConv2dOp(OpTest): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return if self.use_cudnn: place = core.CUDAPlace(0) self.check_grad_with_place( @@ -111,6 +119,8 @@ class TestConv2dOp(OpTest): set(['Input', 'Filter']), 'Output', max_relative_error=0.02) def test_check_grad_no_filter(self): + if self.dtype == np.float16: + return if self.use_cudnn: place = core.CUDAPlace(0) self.check_grad_with_place( @@ -126,6 +136,8 @@ class TestConv2dOp(OpTest): no_grad_set=set(['Filter'])) def test_check_grad_no_input(self): + if self.dtype == np.float16: + return if self.use_cudnn: place = core.CUDAPlace(0) self.check_grad_with_place( @@ -154,8 +166,8 @@ class TestConv2dOp(OpTest): def init_group(self): self.groups = 1 - def init_op_type(self): - self.op_type = "conv2d" + def init_kernel_type(self): + pass class TestWithPad(TestConv2dOp): @@ -227,39 +239,105 @@ class TestWithInput1x1Filter1x1(TestConv2dOp): #----------------Conv2dCUDNN---------------- class TestCUDNN(TestConv2dOp): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "conv2d" + + +class TestFP16CUDNN(TestConv2dOp): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) class TestCUDNNWithPad(TestWithPad): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "conv2d" + + +class TestFP16CUDNNWithPad(TestWithPad): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) class TestCUDNNWithStride(TestWithStride): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "conv2d" + + +class TestFP16CUDNNWithStride(TestWithStride): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) class TestCUDNNWithGroup(TestWithGroup): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "conv2d" + + +class TestFP16CUDNNWithGroup(TestWithGroup): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) class TestCUDNNWith1x1(TestWith1x1): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "conv2d" + + +class TestFP16CUDNNWith1x1(TestWith1x1): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) class TestCUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "conv2d" + + +class TestFP16CUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) class TestDepthwiseConv(TestConv2dOp): @@ -295,21 +373,18 @@ class TestDepthwiseConv2(TestConv2dOp): #----------------Conv2dMKLDNN---------------- class TestMKLDNN(TestConv2dOp): - def init_op_type(self): + def init_kernel_type(self): self.use_mkldnn = True - self.op_type = "conv2d" class TestMKLDNNWithPad(TestWithPad): - def init_op_type(self): + def init_kernel_type(self): self.use_mkldnn = True - self.op_type = "conv2d" class TestMKLDNNWithStride(TestWithStride): - def init_op_type(self): + def init_kernel_type(self): self.use_mkldnn = True - self.op_type = "conv2d" if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_debugger.py b/python/paddle/fluid/tests/unittests/test_debugger.py new file mode 100644 index 0000000000000000000000000000000000000000..2b7bbf9218f9b8fd8f5b29ac3cbc2f9680f471eb --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_debugger.py @@ -0,0 +1,58 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid import debuger +from paddle.fluid.framework import Program + + +class TestDebugger(unittest.TestCase): + def test_debug_str(self): + p = Program() + b = p.current_block() + + #selected_rows + b.create_var( + name='selected_rows', + dtype="float32", + shape=[5, 10], + type=core.VarDesc.VarType.SELECTED_ROWS) + + #tensor array + b.create_var( + name='tensor_array', + shape=[5, 10], + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY) + + #operator + mul_x = b.create_parameter( + dtype="float32", shape=[5, 10], lod_level=0, name="mul.x") + mul_y = b.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") + mul_out = b.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") + b.append_op( + type="mul", + inputs={"X": mul_x, + "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}) + + print(debuger.pprint_program_codes(p.desc)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_detection_output_op.py b/python/paddle/fluid/tests/unittests/test_detection_output_op.py deleted file mode 100644 index 94681319144ee3e0d51b57944f5692183578c01b..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/test_detection_output_op.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -import numpy as np -from op_test import OpTest - - -class TestUnpoolOp(OpTest): - def setUp(self): - self.op_type = "detection_output" - self.init_test_case() - - #loc.shape ((1, 4, 4, 1, 1)) - #conf.shape ((1, 4, 2, 1, 1)) - - loc = np.array([[[[[0.1]], [[0.1]], [[0.1]], [[0.1]]], - [[[0.1]], [[0.1]], [[0.1]], [[0.1]]], - [[[0.1]], [[0.1]], [[0.1]], [[0.1]]], - [[[0.1]], [[0.1]], [[0.1]], [[0.1]]]]]) - conf = np.array([[[[[0.1]], [[0.9]]], [[[0.2]], [[0.8]]], - [[[0.3]], [[0.7]]], [[[0.4]], [[0.6]]]]]) - priorbox = np.array([ - 0.1, 0.1, 0.5, 0.5, 0.1, 0.1, 0.2, 0.2, 0.2, 0.2, 0.6, 0.6, 0.1, - 0.1, 0.2, 0.2, 0.3, 0.3, 0.7, 0.7, 0.1, 0.1, 0.2, 0.2, 0.4, 0.4, - 0.8, 0.8, 0.1, 0.1, 0.2, 0.2 - ]) - - output = np.array([ - 0, 1, 0.68997443, 0.099959746, 0.099959746, 0.50804031, 0.50804031 - ]) - self.inputs = { - 'Loc': loc.astype('float32'), - 'Conf': conf.astype('float32'), - 'PriorBox': priorbox.astype('float32') - } - self.attrs = { - 'num_classes': self.num_classes, - 'top_k': self.top_k, - 'nms_top_k': self.nms_top_k, - 'background_label_id': self.background_label_id, - 'nms_threshold': self.nms_threshold, - 'confidence_threshold': self.confidence_threshold, - } - self.outputs = {'Out': output.astype('float32')} - - def test_check_output(self): - self.check_output() - - def init_test_case(self): - self.num_classes = 2 - self.top_k = 10 - self.nms_top_k = 20 - self.background_label_id = 0 - self.nms_threshold = 0.01 - self.confidence_threshold = 0.01 - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index 60930a612c128cbf18e89711b9246d148e41ec58..eaa3435a86462236a99489749abe877648677053 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -14,6 +14,7 @@ import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest @@ -82,5 +83,37 @@ class TestDropoutOp5(OpTest): self.check_output() +class TestFP16DropoutOp(OpTest): + def setUp(self): + self.op_type = "dropout" + self.init_test_case() + + x = np.random.random(self.input_size).astype("float16") + out = x * (1.0 - self.prob) + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.attrs = { + 'dropout_prob': self.prob, + 'fix_seed': self.fix_seed, + 'is_test': True + } + self.outputs = {'Out': out} + + def init_test_case(self): + self.input_size = [32, 64] + self.prob = 0.35 + self.fix_seed = True + + def test_check_output(self): + if core.is_compiled_with_cuda() and core.op_support_gpu("dropout"): + self.check_output_with_place(core.CUDAPlace(0), atol=1e-3) + + +class TestFP16DropoutOp2(TestFP16DropoutOp): + def init_test_case(self): + self.input_size = [32, 64, 3] + self.prob = 0.75 + self.fix_seed = False + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index 5b2384e94d788342c692fcb8e33f3a2ff663ab53..1f52bd90d0d49bda6c180019e90ebd923c91439c 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -13,158 +13,243 @@ # limitations under the License. import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest -class TestElementwiseOp(OpTest): +class TestElementwiseAddOp(OpTest): def setUp(self): self.op_type = "elementwise_add" + self.dtype = np.float32 + self.axis = -1 + self.init_dtype() + self.init_input_output() + self.init_axis() + self.inputs = { - 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32") + 'X': OpTest.np_dtype_to_fluid_dtype(self.x), + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) } - self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['Y'])} + self.attrs = {'axis': self.axis} + self.outputs = {'Out': self.out} def test_check_output(self): self.check_output() def test_check_grad_normal(self): + if self.dtype == np.float16: + return self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005) def test_check_grad_ingore_x(self): + if self.dtype == np.float16: + return self.check_grad( ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) def test_check_grad_ingore_y(self): + if self.dtype == np.float16: + return self.check_grad( ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) + def init_input_output(self): + self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + self.out = np.add(self.x, self.y) -class TestElementwiseAddOp_scalar(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_add" - self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(1).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} + def init_dtype(self): + pass + def init_axis(self): + pass -class TestElementwiseAddOp_scalar2(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_add" - self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(1, 1).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} +class TestFP16ElementwiseAddOp(TestElementwiseAddOp): + def init_dtype(self): + self.dtype = np.float16 -class TestElementwiseAddOp_Vector(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_add" - self.inputs = { - 'X': np.random.random((32, )).astype("float32"), - 'Y': np.random.random((32, )).astype("float32") - } - self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['Y'])} + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) -class TestElementwiseAddOp_broadcast_0(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_add" - self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(2).astype(np.float32) - } +class TestElementwiseAddOp_scalar(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(1).astype(self.dtype) + self.out = self.x + self.y - self.attrs = {'axis': 0} - self.outputs = { - 'Out': self.inputs['X'] + self.inputs['Y'].reshape(2, 1, 1) - } +class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(1).astype(self.dtype) + self.out = self.x + self.y -class TestElementwiseAddOp_broadcast_1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_add" - self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(3).astype(np.float32) - } - self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 3, 1) - } +class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(1, 1).astype(self.dtype) + self.out = self.x + self.y -class TestElementwiseAddOp_broadcast_2(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_add" - self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(4).astype(np.float32) - } +class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(1, 1).astype(self.dtype) + self.out = self.x + self.y - self.outputs = { - 'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 1, 4) - } +class TestElementwiseAddOp_Vector(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.random((32, )).astype(self.dtype) + self.y = np.random.random((32, )).astype(self.dtype) + self.out = np.add(self.x, self.y) -class TestElementwiseAddOp_broadcast_3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_add" - self.inputs = { - 'X': np.random.rand(2, 3, 4, 5).astype(np.float32), - 'Y': np.random.rand(3, 4).astype(np.float32) - } - self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 3, 4, 1) - } +class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.random((32, )).astype(self.dtype) + self.y = np.random.random((32, )).astype(self.dtype) + self.out = np.add(self.x, self.y) -class TestElementwiseAddOp_broadcast_4(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_add" - self.inputs = { - 'X': np.random.rand(2, 3, 4, 5).astype(np.float32), - 'Y': np.random.rand(2, 1).astype(np.float32) - } +class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(2).astype(self.dtype) + self.out = self.x + self.y.reshape(2, 1, 1) - self.attrs = {'axis': 0} - self.outputs = { - 'Out': self.inputs['X'] + self.inputs['Y'].reshape(2, 1, 1, 1) - } + def init_axis(self): + self.axis = 0 -class TestElementwiseAddOp_rowwise_add_0(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_add" - self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(3, 4).astype(np.float32) - } +class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(2).astype(self.dtype) + self.out = self.x + self.y.reshape(2, 1, 1) - self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 3, 4) - } + def init_axis(self): + self.axis = 0 -class TestElementwiseAddOp_rowwise_add_1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_add" - self.inputs = { - 'X': np.random.rand(2, 1).astype(np.float32), - 'Y': np.random.rand(1).astype(np.float32) - } +class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(3).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 3, 1) - self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 1) - } + def init_axis(self): + self.axis = 1 + + +class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(3).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 3, 1) + + def init_axis(self): + self.axis = 1 + + +class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(4).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 1, 4) + + +class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(4).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 1, 4) + + +class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(3, 4).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 3, 4, 1) + + def init_axis(self): + self.axis = 1 + + +class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(3, 4).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 3, 4, 1) + + def init_axis(self): + self.axis = 1 + + +class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(2, 1).astype(self.dtype) + self.out = self.x + self.y.reshape(2, 1, 1, 1) + + def init_axis(self): + self.axis = 0 + + +class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(2, 1).astype(self.dtype) + self.out = self.x + self.y.reshape(2, 1, 1, 1) + + def init_axis(self): + self.axis = 0 + + +class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(3, 4).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 3, 4) + + def init_axis(self): + self.axis = 1 + + +class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(3, 4).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 3, 4) + + def init_axis(self): + self.axis = 1 + + +class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 1).astype(self.dtype) + self.y = np.random.rand(1).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 1) + + def init_axis(self): + self.axis = 1 + + +class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 1).astype(self.dtype) + self.y = np.random.rand(1).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 1) + + def init_axis(self): + self.axis = 1 if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py index 4958bef3ef4d101f934a2776efc21efdd24a9a4d..e1272c1d6dd7131b55ecf33fa0de0fc78a3ac5a7 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py +++ b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py @@ -16,7 +16,6 @@ import unittest import numpy import paddle.fluid.core as core - from paddle.fluid.executor import Executor from paddle.fluid.layers import mul, data diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 6944cca394fbc1ddde09dfeb0bc82e357a3cd225..b5fd59cf3a1bea50b799c3ace8f3b9cea088b9d5 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -181,8 +181,8 @@ class TestBook(unittest.TestCase): with program_guard(program): x = layers.data(name='x', shape=[10], dtype='float32') y = layers.data( - name='y', shape=[10, 20], dtype='float32', lod_level=1) - self.assertIsNotNone(layers.sequence_expand(x=x, y=y)) + name='y', shape=[10, 20], dtype='float32', lod_level=2) + self.assertIsNotNone(layers.sequence_expand(x=x, y=y, ref_level=1)) print(str(program)) def test_lstm_unit(self): @@ -220,7 +220,7 @@ class TestBook(unittest.TestCase): seq_data = layers.data( name='seq_data', shape=[10, 10], dtype='float32', lod_level=1) seq = layers.fc(input=seq_data, size=20) - self.assertIsNotNone(layers.sequence_softmax(x=seq)) + self.assertIsNotNone(layers.sequence_softmax(seq)) print(str(program)) def test_softmax(self): @@ -228,7 +228,7 @@ class TestBook(unittest.TestCase): with program_guard(program): data = layers.data(name='data', shape=[10], dtype='float32') hid = layers.fc(input=data, size=20) - self.assertIsNotNone(layers.softmax(x=hid)) + self.assertIsNotNone(layers.softmax(hid)) print(str(program)) def test_get_places(self): @@ -327,6 +327,15 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(loss) print(str(program)) + def test_lod_reset(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[10], dtype='float32') + y = layers.data( + name='y', shape=[10, 20], dtype='float32', lod_level=2) + print(layers.lod_reset(x=x, y=y)) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_lod_reset_op.py b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py index 3bf8230f8748dd87ec3c85b0cbd78df2e695a96b..6b6d4c824aeae319dacf224408ce96a0d9c5bb35 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_reset_op.py +++ b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py @@ -42,7 +42,7 @@ class TestLodResetOpByInput(OpTest): target_lod_0 = [0, 4, 7, 10] self.inputs = { 'X': (x, lod), - 'TargetLoD': np.array([target_lod_0]).astype('int32') + 'Y': np.array([target_lod_0]).astype('int32') } self.outputs = {'Out': (x, [target_lod_0])} @@ -50,7 +50,7 @@ class TestLodResetOpByInput(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(["X"], "Out", no_grad_set=set("TargetLoD")) + self.check_grad(["X"], "Out", no_grad_set=set("Y")) class TestLodResetOpBoth(OpTest): @@ -62,7 +62,7 @@ class TestLodResetOpBoth(OpTest): target_lod_0_in = [0, 4, 7, 10] self.inputs = { 'X': (x, lod), - 'TargetLoD': np.array(target_lod_0_in).astype('int32') + 'Y': np.array(target_lod_0_in).astype('int32') } self.attrs = {'target_lod': target_lod_0_attr} self.outputs = {'Out': (x, [target_lod_0_in])} @@ -71,7 +71,24 @@ class TestLodResetOpBoth(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(["X"], "Out", no_grad_set=set("TargetLoD")) + self.check_grad(["X"], "Out", no_grad_set=set("Y")) + + +class TestLodResetOpYIsLoDTensor(OpTest): + def setUp(self): + self.op_type = "lod_reset" + x = np.random.random((10, 20)).astype("float32") + lod = [[0, 3, 5, 10]] + y = np.random.random((10, 10)).astype("float32") + target_lod_0 = [[0, 4, 7, 10]] + self.inputs = {'X': (x, lod), 'Y': (y, target_lod_0)} + self.outputs = {'Out': (x, target_lod_0)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out", no_grad_set=set("Y")) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index 03a5bd24a133e703855400532517c293196b64f0..ed920ad388ff0e01887404e70fe82565b4cd28fa 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -15,6 +15,8 @@ import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core +from paddle.fluid.op import Operator class TestLookupTableOp(OpTest): @@ -47,5 +49,52 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): pass +class TestLookupTableIdsIsSelectedRows(OpTest): + def check_with_place(self, place): + scope = core.Scope() + + # create and initialize Variable + height = 10 + rows = [0, 4, 4, 7] + row_numel = 12 + + # create and initialize W Variable + W = scope.var('W').get_tensor() + W_array = np.full((height, row_numel), 1.0).astype("float32") + for i in range(height): + W_array[i] *= i + W.set(W_array, place) + + # create and initialize Ids Variable + ids_selected_rows = scope.var('Ids').get_selected_rows() + ids_selected_rows.set_height(len(rows)) + ids_selected_rows.set_rows(rows) + np_array = np.ones((len(rows), row_numel)).astype("float32") + ids_tensor = ids_selected_rows.get_tensor() + ids_tensor.set(np_array, place) + + # create Out Variable + Out = scope.var('Out').get_selected_rows() + + # create and run lookup_table operator + concat_rows_op = Operator("lookup_table", W='W', Ids='Ids', Out='Out') + concat_rows_op.run(scope, place) + + # get result from Out + Out_tensor = Out.get_tensor() + result_array = np.array(Out_tensor) + + # all(): return True if all elements of the iterable are true (or if the iterable is empty) + for idx, row in enumerate(rows): + assert (row == result_array[idx]).all() + + def test_concat_rows(self): + places = [core.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(core.CUDAPlace(0)) + for place in places: + self.check_with_place(place) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_lrn_op.py b/python/paddle/fluid/tests/unittests/test_lrn_op.py index 7f2352c5882ce36d8d681a737806f3ee0e3ace98..2268eafdbd08cd0d6a175d19cedd79b7b984289b 100644 --- a/python/paddle/fluid/tests/unittests/test_lrn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lrn_op.py @@ -41,7 +41,7 @@ class TestLRNOp(OpTest): mid.fill(self.k) for m in range(0, self.N): for i in range(0, self.C): - for c in range(start, end + 1): + for c in range(start, end): ch = i + c if ch < 0 or ch >= self.C: continue @@ -87,5 +87,15 @@ class TestLRNOp(OpTest): self.check_grad(['X'], 'Out', max_relative_error=0.01) +class TestLRNMKLDNNOp(TestLRNOp): + def get_attrs(self): + attrs = TestLRNOp.get_attrs(self) + attrs['use_mkldnn'] = True + return attrs + + def test_check_output(self): + self.check_output(atol=0.002) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mul_op.py b/python/paddle/fluid/tests/unittests/test_mul_op.py index 9d1da420c7f70bd2a89d183a5f0a2b145f0ff475..40440bea1267112b84b66002a0bf921be3029265 100644 --- a/python/paddle/fluid/tests/unittests/test_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_mul_op.py @@ -14,6 +14,7 @@ import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest @@ -69,5 +70,42 @@ class TestMulOp2(OpTest): ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) +class TestFP16MulOp1(OpTest): + def setUp(self): + self.op_type = "mul" + x = np.random.random((32, 84)).astype("float16") + y = np.random.random((84, 100)).astype("float16") + self.inputs = {'X': x.view(np.uint16), 'Y': y.view(np.uint16)} + self.outputs = {'Out': np.dot(x, y)} + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-1) + + +class TestFP16MulOp2(OpTest): + def setUp(self): + self.op_type = "mul" + x = np.random.random((15, 4, 12, 10)).astype("float16") + y = np.random.random((4, 30, 8, 2, 9)).astype("float16") + self.inputs = {'X': x.view(np.uint16), 'Y': y.view(np.uint16)} + self.attrs = { + 'x_num_col_dims': 2, + 'y_num_col_dims': 2, + } + result = np.dot( + x.reshape(15 * 4, 12 * 10), y.reshape(4 * 30, 8 * 2 * 9)) + result = result.reshape(15, 4, 8, 2, 9) + self.outputs = {'Out': result} + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-1) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py b/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..8add353303e3626bbce68199a100306d4858766a --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py @@ -0,0 +1,65 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import paddle.fluid as fluid +import paddle.v2 as paddle +import paddle.v2.dataset.mnist as mnist + + +class TestMultipleReader(unittest.TestCase): + def setUp(self): + self.batch_size = 64 + self.pass_num = 3 + # Convert mnist to recordio file + with fluid.program_guard(fluid.Program(), fluid.Program()): + data_file = paddle.batch(mnist.train(), batch_size=self.batch_size) + feeder = fluid.DataFeeder( + feed_list=[ + fluid.layers.data( + name='image', shape=[784]), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + self.num_batch = fluid.recordio_writer.convert_reader_to_recordio_file( + './mnist.recordio', data_file, feeder) + + def test_main(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data_file = fluid.layers.open_recordio_file( + filename='./mnist.recordio', + shapes=[(-1, 784), (-1, 1)], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + data_file = fluid.layers.create_multi_pass_reader( + reader=data_file, pass_num=self.pass_num) + img, label = fluid.layers.read_file(data_file) + + if fluid.core.is_compiled_with_cuda(): + place = fluid.CUDAPlace(0) + else: + place = fluid.CPUPlace() + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + batch_count = 0 + while not data_file.eof(): + img_val, = exe.run(fetch_list=[img]) + batch_count += 1 + self.assertLessEqual(img_val.shape[0], self.batch_size) + data_file.reset() + self.assertEqual(batch_count, self.num_batch * self.pass_num) diff --git a/python/paddle/fluid/tests/unittests/test_multiple_reader.py b/python/paddle/fluid/tests/unittests/test_multiple_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..69f8acf81efaba8fc0f3df4cfe3a42dc4e477df2 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_multiple_reader.py @@ -0,0 +1,74 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import paddle.fluid as fluid +import paddle.v2 as paddle +import paddle.v2.dataset.mnist as mnist +from shutil import copyfile + + +class TestMultipleReader(unittest.TestCase): + def setUp(self): + self.batch_size = 64 + # Convert mnist to recordio file + with fluid.program_guard(fluid.Program(), fluid.Program()): + reader = paddle.batch(mnist.train(), batch_size=self.batch_size) + feeder = fluid.DataFeeder( + feed_list=[ # order is image and label + fluid.layers.data( + name='image', shape=[784]), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + self.num_batch = fluid.recordio_writer.convert_reader_to_recordio_file( + './mnist_0.recordio', reader, feeder) + copyfile('./mnist_0.recordio', './mnist_1.recordio') + copyfile('./mnist_0.recordio', './mnist_2.recordio') + + def main(self, thread_num): + file_list = [ + './mnist_0.recordio', './mnist_1.recordio', './mnist_2.recordio' + ] + with fluid.program_guard(fluid.Program(), fluid.Program()): + data_files = fluid.layers.open_files( + filenames=file_list, + thread_num=thread_num, + shapes=[(-1, 784), (-1, 1)], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + img, label = fluid.layers.read_file(data_files) + + if fluid.core.is_compiled_with_cuda(): + place = fluid.CUDAPlace(0) + else: + place = fluid.CPUPlace() + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + batch_count = 0 + while not data_files.eof(): + img_val, = exe.run(fetch_list=[img]) + batch_count += 1 + self.assertLessEqual(img_val.shape[0], self.batch_size) + data_files.reset() + self.assertEqual(batch_count, self.num_batch * 3) + + def test_main(self): + self.main(thread_num=3) # thread number equals to file number + self.main(thread_num=10) # thread number is larger than file number + self.main(thread_num=2) # thread number is less than file number diff --git a/python/paddle/fluid/tests/unittests/test_parallel_op.py b/python/paddle/fluid/tests/unittests/test_parallel_op.py index 1a7551c57b26f576ab286e7b18177b9120261623..79bea148f9398152a02d70946cdc5fff1f47ba6b 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_op.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_op.py @@ -15,6 +15,7 @@ import unittest import paddle.fluid as fluid +import paddle.fluid.profiler as profiler import numpy @@ -60,20 +61,23 @@ class BaseParallelForTest(unittest.TestCase): feed=feed, fetch=fetch, place=gpu, - use_parallel=False) + use_parallel=False, + use_gpu=True) result_gpu_parallel = self._run_test_impl_( callback=callback, feed=feed, fetch=fetch, place=gpu, - use_parallel=True) + use_parallel=True, + use_gpu=True) result_gpu_nccl = self._run_test_impl_( callback=callback, feed=feed, fetch=fetch, place=gpu, use_parallel=True, - use_nccl=True) + use_nccl=True, + use_gpu=True) self._assert_same_(fetch, result_cpu, result_cpu_parallel, result_gpu, result_gpu_parallel, result_gpu_nccl) else: @@ -85,7 +89,8 @@ class BaseParallelForTest(unittest.TestCase): fetch, place, use_parallel=False, - use_nccl=False): + use_nccl=False, + use_gpu=False): """ Run a single test, returns the fetch values Args: @@ -132,7 +137,12 @@ class BaseParallelForTest(unittest.TestCase): exe = fluid.Executor(place) exe.run(startup) - return exe.run(main, feed=feed, fetch_list=fetch) + if use_gpu: + profile_type = 'GPU' + else: + profile_type = 'CPU' + with profiler.profiler(profile_type, 'total', '/tmp/profiler'): + return exe.run(main, feed=feed, fetch_list=fetch) def _assert_same_(self, fetch, *args): """ diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 964d78f1966aa10e36eeaabe943d44e002d50293..764fa575fba1615de3171e848890b3836e640849 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -78,20 +78,22 @@ def avg_pool2D_forward_naive(x, class TestPool2d_Op(OpTest): def setUp(self): + self.op_type = "pool2d" self.use_cudnn = False self.use_mkldnn = False + self.dtype = np.float32 self.init_test_case() self.init_global_pool() - self.init_op_type() + self.init_kernel_type() self.init_pool_type() self.init_ceil_mode() if self.global_pool: self.paddings = [0 for _ in range(len(self.paddings))] - input = np.random.random(self.shape).astype("float32") + input = np.random.random(self.shape).astype(self.dtype) output = self.pool2D_forward_naive(input, self.ksize, self.strides, self.paddings, self.global_pool, - self.ceil_mode).astype("float32") - self.inputs = {'X': input} + self.ceil_mode).astype(self.dtype) + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} self.attrs = { 'strides': self.strides, @@ -105,7 +107,7 @@ class TestPool2d_Op(OpTest): 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter } - self.outputs = {'Out': output.astype('float32')} + self.outputs = {'Out': output} def test_check_output(self): if self.use_cudnn: @@ -115,6 +117,8 @@ class TestPool2d_Op(OpTest): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return if self.use_cudnn and self.pool_type != "max": place = core.CUDAPlace(0) self.check_grad_with_place( @@ -128,8 +132,8 @@ class TestPool2d_Op(OpTest): self.strides = [1, 1] self.paddings = [0, 0] - def init_op_type(self): - self.op_type = "pool2d" + def init_kernel_type(self): + pass def init_pool_type(self): self.pool_type = "avg" @@ -149,9 +153,6 @@ class TestCase1(TestPool2d_Op): self.strides = [1, 1] self.paddings = [0, 0] - def init_op_type(self): - self.op_type = "pool2d" - def init_pool_type(self): self.pool_type = "avg" self.pool2D_forward_naive = avg_pool2D_forward_naive @@ -167,9 +168,6 @@ class TestCase2(TestPool2d_Op): self.strides = [1, 1] self.paddings = [1, 1] - def init_op_type(self): - self.op_type = "pool2d" - def init_pool_type(self): self.pool_type = "avg" self.pool2D_forward_naive = avg_pool2D_forward_naive @@ -179,27 +177,18 @@ class TestCase2(TestPool2d_Op): class TestCase3(TestPool2d_Op): - def init_op_type(self): - self.op_type = "pool2d" - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase4(TestCase1): - def init_op_type(self): - self.op_type = "pool2d" - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive class TestCase5(TestCase2): - def init_op_type(self): - self.op_type = "pool2d" - def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive @@ -207,39 +196,105 @@ class TestCase5(TestCase2): #--------------------test pool2d-------------------- class TestCUDNNCase1(TestPool2d_Op): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "pool2d" + + +class TestFP16CUDNNCase1(TestPool2d_Op): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) class TestCUDNNCase2(TestCase1): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "pool2d" + + +class TestFP16CUDNNCase2(TestCase1): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) class TestCUDNNCase3(TestCase2): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "pool2d" + + +class TestFP16CUDNNCase3(TestCase2): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) class TestCUDNNCase4(TestCase3): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "pool2d" + + +class TestFP16CUDNNCase4(TestCase3): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) class TestCUDNNCase5(TestCase4): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "pool2d" + + +class TestFP16CUDNNCase5(TestCase4): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) class TestCUDNNCase6(TestCase5): - def init_op_type(self): + def init_kernel_type(self): self.use_cudnn = True - self.op_type = "pool2d" + + +class TestFP16CUDNNCase6(TestCase5): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) class TestCeilModeCase1(TestCUDNNCase1): @@ -264,39 +319,33 @@ class TestCeilModeCase4(TestCase2): #--------------------test pool2d MKLDNN-------------------- class TestMKLDNNCase1(TestPool2d_Op): - def init_op_type(self): + def init_kernel_type(self): self.use_mkldnn = True - self.op_type = "pool2d" class TestMKLDNNCase2(TestCase1): - def init_op_type(self): + def init_kernel_type(self): self.use_mkldnn = True - self.op_type = "pool2d" class TestMKLDNNCase3(TestCase2): - def init_op_type(self): + def init_kernel_type(self): self.use_mkldnn = True - self.op_type = "pool2d" class TestMKLDNNCase4(TestCase3): - def init_op_type(self): + def init_kernel_type(self): self.use_mkldnn = True - self.op_type = "pool2d" class TestMKLDNNCase5(TestCase4): - def init_op_type(self): + def init_kernel_type(self): self.use_mkldnn = True - self.op_type = "pool2d" class TestMKLDNNCase6(TestCase5): - def init_op_type(self): + def init_kernel_type(self): self.use_mkldnn = True - self.op_type = "pool2d" if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index 1da6b94eea30e65913ce713e0e5e355507534161..cf6fe14a86aa1ab6ea3f60ad15f33d708e9b803a 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -31,8 +31,22 @@ class TestProfiler(unittest.TestCase): with fluid.program_guard(main_program, startup_program): image = fluid.layers.data(name='x', shape=[784], dtype='float32') - hidden1 = fluid.layers.fc(input=image, size=128, act='relu') - hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') + hidden1 = fluid.layers.fc(input=image, size=64, act='relu') + i = layers.zeros(shape=[1], dtype='int64') + counter = fluid.layers.zeros( + shape=[1], dtype='int64', force_cpu=True) + until = layers.fill_constant([1], dtype='int64', value=10) + data_arr = layers.array_write(hidden1, i) + cond = fluid.layers.less_than(x=counter, y=until) + while_op = fluid.layers.While(cond=cond) + with while_op.block(): + hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu') + layers.array_write(hidden_n, i, data_arr) + fluid.layers.increment(x=counter, value=1, in_place=True) + layers.less_than(x=counter, y=until, cond=cond) + + hidden_n = layers.array_read(data_arr, i) + hidden2 = fluid.layers.fc(input=hidden_n, size=64, act='relu') predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') label = fluid.layers.data(name='y', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict, label=label) diff --git a/python/paddle/fluid/tests/unittests/test_recordio_reader.py b/python/paddle/fluid/tests/unittests/test_recordio_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..24a0074d9b9621d902d12eb8cb29d9b65be22ed3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_recordio_reader.py @@ -0,0 +1,81 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import paddle.fluid as fluid +import paddle.v2 as paddle +import paddle.v2.dataset.mnist as mnist + + +class TestRecordIO(unittest.TestCase): + def setUp(self): + # Convert mnist to recordio file + with fluid.program_guard(fluid.Program(), fluid.Program()): + reader = paddle.batch(mnist.train(), batch_size=32) + feeder = fluid.DataFeeder( + feed_list=[ # order is image and label + fluid.layers.data( + name='image', shape=[784]), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + self.num_batches = fluid.recordio_writer.convert_reader_to_recordio_file( + './mnist.recordio', reader, feeder) + + def test_main(self, decorator_callback=None): + # use new program + with fluid.program_guard(fluid.Program(), fluid.Program()): + data_file = fluid.layers.open_recordio_file( + './mnist.recordio', + shapes=[[-1, 784], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + if decorator_callback is not None: + data_file = decorator_callback(data_file) + img, label = fluid.layers.read_file(data_file) + + hidden = fluid.layers.fc(input=img, size=100, act='tanh') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + + fluid.optimizer.Adam(learning_rate=1e-3).minimize(avg_loss) + + if fluid.core.is_compiled_with_cuda(): + place = fluid.CUDAPlace(0) + else: + place = fluid.CPUPlace() + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + avg_loss_np = [] + + # train a pass + batch_id = 0 + while not data_file.eof(): + tmp, = exe.run(fetch_list=[avg_loss]) + avg_loss_np.append(tmp) + batch_id += 1 + data_file.reset() + self.assertEqual(batch_id, self.num_batches) + self.assertLess(avg_loss_np[-1], avg_loss_np[0]) + + def test_shuffle_reader(self): + self.test_main(decorator_callback=lambda reader: fluid.layers.create_shuffle_reader(reader, buffer_size=200)) + + def test_double_buffer_reader(self): + self.test_main(decorator_callback=lambda reader: fluid.layers.create_double_buffer_reader(reader, + place='cuda:0' if fluid.core.is_compiled_with_cuda() else 'cpu')) diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 5e656bddb7927b6e7935255c120e5e858505125a..9b0cc3534dc551e7fdf7ef8111cad1c172f8bfa4 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -70,6 +70,19 @@ class TestMinOp(OpTest): self.check_output() +class TestProdOp(OpTest): + def setUp(self): + self.op_type = "reduce_prod" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.outputs = {'Out': self.inputs['X'].prod(axis=0)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + class TestKeepDimReduce(OpTest): def setUp(self): self.op_type = "reduce_sum" diff --git a/python/paddle/fluid/tests/unittests/test_scatter_op.py b/python/paddle/fluid/tests/unittests/test_scatter_op.py index bb02a40d449860cf6c0576662e79a5e36e6e0635..fb1728743630b3ea908ae835444eff7fd71b72c8 100644 --- a/python/paddle/fluid/tests/unittests/test_scatter_op.py +++ b/python/paddle/fluid/tests/unittests/test_scatter_op.py @@ -25,7 +25,7 @@ class TestScatterOp(OpTest): updates_np = np.random.random((2, 3)).astype("float32") output_np = np.copy(ref_np) output_np[index_np] = updates_np - self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} + self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np} self.outputs = {'Out': output_np} def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_sequence_expand.py b/python/paddle/fluid/tests/unittests/test_sequence_expand.py index 957fa5d2c4a795cfd01047c1b7845674e4c1d549..7feb509c4d6f5768552fc2515081f7e68f420967 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_expand.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_expand.py @@ -27,12 +27,36 @@ class TestSequenceExpand(OpTest): def compute(self): x = self.inputs['X'] x_data, x_lod = x if type(x) == tuple else (x, None) - n = 1 + x_data.shape[0] if not x_lod else len(x_lod[0]) y_data, y_lod = self.inputs['Y'] - repeats = [((y_lod[-1][i + 1] - y_lod[-1][i])) - for i in range(len(y_lod[-1]) - 1)] - out = x_data.repeat(repeats, axis=0) - self.outputs = {'Out': out} + + if hasattr(self, 'attrs'): + ref_level = self.attrs['ref_level'] + else: + ref_level = len(y_lod) - 1 + + out = np.zeros(shape=((0, ) + x_data.shape[1:]), dtype=x_data.dtype) + + if x_lod is None: + x_idx = [i for i in xrange(x_data.shape[0] + 1)] + else: + x_idx = x_lod[0] + out_lod = [[0]] + + for i in xrange(1, len(y_lod[ref_level])): + repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1] + x_len = x_idx[i] - x_idx[i - 1] + if repeat_num > 0: + x_sub = x_data[x_idx[i - 1]:x_idx[i], :] + x_sub = np.repeat(x_sub, repeat_num, axis=0) + out = np.vstack((out, x_sub)) + if x_lod is not None: + for j in xrange(repeat_num): + out_lod[0].append(out_lod[0][-1] + x_len) + + if x_lod is None: + self.outputs = {'Out': out} + else: + self.outputs = {'Out': (out, out_lod)} def setUp(self): self.op_type = 'sequence_expand' @@ -52,7 +76,8 @@ class TestSequenceExpandCase1(TestSequenceExpand): x_lod = [[0, 2, 5]] y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float32') y_lod = [[0, 2, 5], [0, 2, 4, 7, 10, 13]] - self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} + self.inputs = {'X': x_data, 'Y': (y_data, y_lod)} + self.attrs = {'ref_level': 0} class TestSequenceExpandCase2(TestSequenceExpand): @@ -60,8 +85,9 @@ class TestSequenceExpandCase2(TestSequenceExpand): x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float32') x_lod = [[0, 1]] y_data = np.random.uniform(0.1, 1, [2, 2, 2]).astype('float32') - y_lod = [[0, 2]] + y_lod = [[0, 2], [0, 2]] self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} + self.attrs = {'ref_level': 0} class TestSequenceExpandCase3(TestSequenceExpand): @@ -75,14 +101,9 @@ class TestSequenceExpandCase3(TestSequenceExpand): class TestSequenceExpandCase4(TestSequenceExpand): def set_data(self): - x_data = np.array( - [0.1, 0.3, 0.2, 0.15, 0.25, 0.2, 0.15, 0.25, 0.1, 0.3]).reshape( - [2, 5]).astype('float32') - x_lod = [[ - 0, - 1, - 2, - ]] + data = [0.1, 0.3, 0.2, 0.15, 0.25, 0.2, 0.15, 0.25, 0.1, 0.3] + x_data = np.array(data).reshape([5, 2]).astype('float32') + x_lod = [[0, 2, 5]] y_data = np.random.uniform(0.1, 1, [2, 1]).astype('float32') y_lod = [[0, 1, 2], [0, 1, 2]] self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} diff --git a/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py b/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py index 9e5c1e7a3d0bdf514de11e797d7139f577002c52..d6dc99bb3106feee33daa52bffb386f07cc16de5 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py @@ -16,11 +16,15 @@ import unittest import numpy as np from op_test import OpTest from test_softmax_op import stable_softmax +import paddle.fluid.core as core class TestSequenceSoftmaxOp(OpTest): def setUp(self): self.op_type = "sequence_softmax" + self.use_cudnn = False + self.init_op_type() + x = np.random.uniform(0.1, 1, (11, 1)).astype("float32") lod = [[0, 4, 5, 8, 11]] @@ -34,12 +38,31 @@ class TestSequenceSoftmaxOp(OpTest): self.inputs = {"X": (x, lod)} self.outputs = {"Out": out} + self.attrs = {'use_cudnn': self.use_cudnn, } + + def init_op_type(self): + pass def test_check_output(self): - self.check_output() + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + else: + self.check_output() def test_check_grad(self): - self.check_grad(["X"], "Out", max_relative_error=0.01) + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, ["X"], "Out", max_relative_error=0.01) + else: + self.check_grad(["X"], "Out", max_relative_error=0.01) + + +# ----------------cudnn Sequencesoftmax---------------- +class TestSequenceSoftmaxCUDNNOp(TestSequenceSoftmaxOp): + def init_op_type(self): + self.use_cudnn = True if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 8f8312edca7e2d98eb4e881f671c6afdda01c57a..33d60c7e31ce0817ad26ea1c1c974339936052d3 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -15,6 +15,7 @@ import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core def stable_softmax(x): @@ -27,18 +28,61 @@ def stable_softmax(x): class TestSoftmaxOp(OpTest): def setUp(self): self.op_type = "softmax" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32") - } - self.outputs = { - 'Out': np.apply_along_axis(stable_softmax, 1, self.inputs['X']) + self.use_cudnn = False + self.use_mkldnn = False + self.dtype = np.float32 + self.init_kernel_type() + + x = np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype) + out = np.apply_along_axis(stable_softmax, 1, x) + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + self.attrs = { + 'use_cudnn': self.use_cudnn, + 'use_mkldnn': self.use_mkldnn } + def init_kernel_type(self): + pass + def test_check_output(self): - self.check_output() + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + else: + self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out') + if self.dtype == np.float16: + return + if self.use_cudnn: + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, ["X"], "Out", max_relative_error=0.01) + else: + self.check_grad(["X"], "Out", max_relative_error=0.01) + + +class TestSoftmaxCUDNNOp(TestSoftmaxOp): + def init_kernel_type(self): + self.use_cudnn = True + + +class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + + +class TestSoftmaxMKLDNNOp(TestSoftmaxOp): + def init_kernel_type(self): + self.use_mkldnn = True if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index 889fea2ce66e64d439b51498722e571f48cd1f96..c0d9fc8f22a7c4f791d80a9cad87d003b5d54299 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -26,7 +26,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): def setUp(self): self.op_type = "softmax_with_cross_entropy" - batch_size = 2 + batch_size = 41 class_num = 37 logits = np.random.uniform(0.1, 1.0, @@ -59,7 +59,7 @@ class TestSoftmaxWithCrossEntropyOp2(OpTest): def setUp(self): self.op_type = "softmax_with_cross_entropy" - batch_size = 2 + batch_size = 41 class_num = 37 logits = np.random.uniform(0.1, 1.0, diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index eac2cb316835fda0a52ac9895eaa80914d0f1e5b..3684d1e8f73a21d9c6f2a5985f8b40ed6984057b 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -2747,17 +2747,17 @@ def img_pool_layer(input, .. math:: - w & = 1 + \\frac{ceil(input\_width + 2 * padding - pool\_size)}{stride} + w & = 1 + ceil(\\frac{input\_width + 2 * padding - pool\_size}{stride}) - h & = 1 + \\frac{ceil(input\_height + 2 * padding\_y - pool\_size\_y)}{stride\_y} + h & = 1 + ceil(\\frac{input\_height + 2 * padding\_y - pool\_size\_y}{stride\_y}) - ceil_mode=False: .. math:: - w & = 1 + \\frac{floor(input\_width + 2 * padding - pool\_size)}{stride} + w & = 1 + floor(\\frac{input\_width + 2 * padding - pool\_size}{stride}) - h & = 1 + \\frac{floor(input\_height + 2 * padding\_y - pool\_size\_y)}{stride\_y} + h & = 1 + floor(\\frac{input\_height + 2 * padding\_y - pool\_size\_y}{stride\_y}) The example usage is: diff --git a/python/setup.py.in b/python/setup.py.in index f830039a3af581d593d510326f15139377cb25f1..4cb5409524457b7bc5a99c88a0dbbfc8834923fa 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -62,20 +62,22 @@ write_version_py(filename='@PADDLE_SOURCE_DIR@/python/paddle/version.py') packages=['paddle', - 'paddle.proto', - 'paddle.trainer', - 'paddle.trainer_config_helpers', 'paddle.utils', - 'paddle.v2', - 'paddle.v2.dataset', - 'paddle.v2.reader', - 'paddle.v2.master', - 'paddle.v2.plot', 'paddle.fluid', 'paddle.fluid.proto', 'paddle.fluid.proto.profiler', - 'paddle.fluid.layers', - 'py_paddle'] + 'paddle.fluid.layers'] + +if '${WITH_FLUID}'== 'OFF': + packages+=['paddle.proto', + 'paddle.trainer', + 'paddle.trainer_config_helpers', + 'paddle.v2', + 'paddle.v2.dataset', + 'paddle.v2.reader', + 'paddle.v2.master', + 'paddle.v2.plot', + 'py_paddle'] with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: setup_requires = f.read().splitlines() @@ -84,11 +86,29 @@ if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: setup_requires+=['opencv-python'] # the prefix is sys.prefix which should always be usr -paddle_bin_dir = 'opt/paddle/bin' -paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', - '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model', - '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main', - '${PADDLE_BINARY_DIR}/paddle/scripts/paddle'] +paddle_bins = '' +if '${WITH_FLUID}'== 'OFF': + paddle_bin_dir = 'opt/paddle/bin' + paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', + '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model', + '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main', + '${PADDLE_BINARY_DIR}/paddle/scripts/paddle'] + +package_data={'paddle.fluid': ['core.so']} +if '${WITH_FLUID}'== 'OFF': + package_data['paddle.v2.master']=['libpaddle_master.so'] + package_data['py_paddle']=['*.py','_swig_paddle.so'] + +package_dir={ + '': '${CMAKE_CURRENT_SOURCE_DIR}', + # The paddle.fluid.proto will be generated while compiling. + # So that package points to other directory. + 'paddle.fluid.proto.profiler': '${PADDLE_BINARY_DIR}/paddle/fluid/platform', + 'paddle.fluid.proto': '${PADDLE_BINARY_DIR}/paddle/fluid/framework', +} +if '${WITH_FLUID}'== 'OFF': + package_dir['py_paddle']='${PADDLE_SOURCE_DIR}/paddle/py_paddle' + paddle_rt_lib_dir = 'lib' paddle_rt_libs = ['${WARPCTC_LIBRARIES}'] @@ -101,19 +121,8 @@ setup(name='${PACKAGE_NAME}', install_requires=setup_requires, packages=packages, ext_modules=[Extension('_foo', ['stub.cc'])], - package_data={ - 'paddle.v2.master': ['libpaddle_master.so'], - 'paddle.fluid': ['core.so'], - 'py_paddle':['*.py','_swig_paddle.so'] - }, - package_dir={ - '': '${CMAKE_CURRENT_SOURCE_DIR}', - # The paddle.fluid.proto will be generated while compiling. - # So that package points to other directory. - 'paddle.fluid.proto.profiler': '${PADDLE_BINARY_DIR}/paddle/fluid/platform', - 'paddle.fluid.proto': '${PADDLE_BINARY_DIR}/paddle/fluid/framework', - 'py_paddle': '${PADDLE_SOURCE_DIR}/paddle/py_paddle' - }, + package_data=package_data, + package_dir=package_dir, scripts=paddle_bins, data_files=[(paddle_rt_lib_dir, paddle_rt_libs)] ) diff --git a/tools/timeline.py b/tools/timeline.py index ee83a1baecdd4243bb6c546486a837393980fb65..f4083c824e7333a74661d096d4954609f767c83e 100644 --- a/tools/timeline.py +++ b/tools/timeline.py @@ -121,27 +121,34 @@ class Timeline(object): def _allocate_pids(self): for event in self._profile_pb.events: - if event.device_id not in self._devices: - pid = self._allocate_pid() - self._devices[event.device_id] = pid - if event.device_id >= 0: - self._chrome_trace.emit_pid("gpu:%s:stream:%d" % - (pid, event.stream_id), pid) - elif event.device_id == -1: - self._chrome_trace.emit_pid("cpu:thread_hash:%d" % - event.stream_id, pid) + if event.type == profiler_pb2.Event.CPU: + if (event.device_id, "CPU") not in self._devices: + pid = self._allocate_pid() + self._devices[(event.device_id, "CPU")] = pid + self._chrome_trace.emit_pid("cpu:block:%d" % + (event.device_id), pid) + elif event.type == profiler_pb2.Event.GPUKernel: + if (event.device_id, "GPUKernel") not in self._devices: + pid = self._allocate_pid() + self._devices[(event.device_id, "GPUKernel")] = pid + self._chrome_trace.emit_pid("gpu:%d" % (event.device_id), + pid) def _allocate_events(self): for event in self._profile_pb.events: - pid = self._devices[event.device_id] + if event.type == profiler_pb2.Event.CPU: + type = "CPU" + elif event.type == profiler_pb2.Event.GPUKernel: + type = "GPUKernel" + pid = self._devices[(event.device_id, type)] args = {'name': event.name} if event.memcopy.bytes > 0: args = {'mem_bytes': event.memcopy.bytes} # TODO(panyx0718): Chrome tracing only handles ms. However, some # ops takes micro-seconds. Hence, we keep the ns here. - self._chrome_trace.emit_region(event.start_ns, - (event.end_ns - event.start_ns) / - 1.0, pid, 0, 'Op', event.name, args) + self._chrome_trace.emit_region( + event.start_ns, (event.end_ns - event.start_ns) / 1.0, pid, + event.sub_device_id, 'Op', event.name, args) def generate_chrome_trace(self): self._allocate_pids()