diff --git a/CMakeLists.txt b/CMakeLists.txt index eabacbf7ccd02252bc5e1bc19391ae75722242ee..f6bf506662da638988111d4dcdf1d7974b3ea725 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,7 +38,6 @@ if(NOT CMAKE_CROSSCOMPILING) endif(NOT CMAKE_CROSSCOMPILING) find_package(Git REQUIRED) find_package(Threads REQUIRED) - include(simd) ################################ Configurations ####################################### @@ -77,7 +76,7 @@ option(WITH_INFERENCE_API_TEST "Test fluid inference high-level api interface" option(WITH_SYSTEM_BLAS "Use system blas library" OFF) option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VERSION}) option(WITH_FAST_MATH "Make use of fast math library, might affect the precision to some extent" ON) -option(WITH_PREBUILD_OPENBLAS "Make use of the pre-built openblas library" ON) +option(WITH_PREBUILD_OPENBLAS "Make use of the pre-built openblas library" ${WIN32}) # PY_VERSION if(NOT PY_VERSION) diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake index 964d5fd45b350db2e5948574f53a427e53484ff4..00b7d5bfbec301555641a60007f8894d72135e7a 100644 --- a/cmake/cuda.cmake +++ b/cmake/cuda.cmake @@ -172,18 +172,21 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF) # Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc. # So, don't set these flags here. + if (NOT WIN32) # windows msvc2015 support c++11 natively. -# -std=c++11 -fPIC not recoginize by msvc, -Xcompiler will be added by cmake. +# -std=c++11 -fPIC not recoginize by msvc list(APPEND CUDA_NVCC_FLAGS "-std=c++11") -list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC") +# in cuda9, suppress cuda warning on eigen with "-w" +list(APPEND CUDA_NVCC_FLAGS "-w" "-Xcompiler -fPIC") +else(NOT WIN32) +list(APPEND CUDA_NVCC_FLAGS "-w" "-Xcompiler -fPIC" "-Xcompiler /w") endif(NOT WIN32) if(WITH_FAST_MATH) # Make use of fast math library. https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html list(APPEND CUDA_NVCC_FLAGS "--use_fast_math") -endif() -# in cuda9, suppress cuda warning on eigen -list(APPEND CUDA_NVCC_FLAGS "-w") +endif(WITH_FAST_MATH) + # Set :expt-relaxed-constexpr to suppress Eigen warnings list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr") diff --git a/cmake/cudnn.cmake b/cmake/cudnn.cmake index 09bec347dbd569203103eccc7dbc0521c291bc0a..6720e1f3008c8fb9944781c4ba75d629b51c2b28 100644 --- a/cmake/cudnn.cmake +++ b/cmake/cudnn.cmake @@ -53,7 +53,6 @@ find_library(CUDNN_LIBRARY NAMES ${CUDNN_LIB_NAME} # libcudnn_static.a NO_DEFAULT_PATH DOC "Path to cuDNN library.") - if(CUDNN_INCLUDE_DIR AND CUDNN_LIBRARY) set(CUDNN_FOUND ON) else() @@ -88,7 +87,7 @@ if(CUDNN_FOUND) if(NOT CUDNN_MAJOR_VERSION) set(CUDNN_VERSION "???") - else() + else() math(EXPR CUDNN_VERSION "${CUDNN_MAJOR_VERSION} * 1000 + ${CUDNN_MINOR_VERSION} * 100 + ${CUDNN_PATCHLEVEL_VERSION}") diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index 7c19183df45d325343d0cecf82152773ffa57f39..8b9b66958a7c86e260089a393ab16aded1ab488c 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -33,42 +33,23 @@ MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") -if (WIN32) - set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}" CACHE PATH "boost include directory." FORCE) -else(WIN32) - set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}/${BOOST_TAR}" CACHE PATH "boost include directory." FORCE) -endif (WIN32) -set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1) +set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}" CACHE PATH "boost include directory." FORCE) +set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1) include_directories(${BOOST_INCLUDE_DIR}) -if (WIN32) - ExternalProject_Add( - ${BOOST_PROJECT} - ${EXTERNAL_PROJECT_LOG_ARGS} - DOWNLOAD_DIR ${BOOST_DOWNLOAD_DIR} - URL ${BOOST_URL} - DOWNLOAD_NO_PROGRESS 0 - PREFIX ${BOOST_SOURCES_DIR} - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - UPDATE_COMMAND "" - ) -else() - ExternalProject_Add( - ${BOOST_PROJECT} - ${EXTERNAL_PROJECT_LOG_ARGS} - DOWNLOAD_DIR ${BOOST_DOWNLOAD_DIR} - DOWNLOAD_COMMAND "wget --no-check-certificate ${BOOST_URL} -c -q -O ${BOOST_TAR}.tar.gz - && tar zxf ${BOOST_TAR}.tar.gz" - DOWNLOAD_NO_PROGRESS 0 - PREFIX ${BOOST_SOURCES_DIR} - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - UPDATE_COMMAND "" - ) -endif () + +ExternalProject_Add( + ${BOOST_PROJECT} + ${EXTERNAL_PROJECT_LOG_ARGS} + DOWNLOAD_DIR ${BOOST_DOWNLOAD_DIR} + URL ${BOOST_URL} + DOWNLOAD_NO_PROGRESS 0 + PREFIX ${BOOST_SOURCES_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + UPDATE_COMMAND "" + ) if (${CMAKE_VERSION} VERSION_LESS "3.3.0" OR NOT WIN32) set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/boost_dummy.c) diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index 7a0369b9dfcf5b156d34962e3a60a8f569a4b86b..5ed78bcf7545cc336aed60e4a024a895c7913c81 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -50,6 +50,10 @@ ExternalProject_Add( -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} ) + +ADD_LIBRARY(gflags STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES}) +ADD_DEPENDENCIES(gflags extern_gflags) IF(WIN32) IF(NOT EXISTS "${GFLAGS_INSTALL_DIR}/lib/libgflags.lib") add_custom_command(TARGET extern_gflags POST_BUILD @@ -57,9 +61,6 @@ IF(WIN32) ) ENDIF() ENDIF(WIN32) -ADD_LIBRARY(gflags STATIC IMPORTED GLOBAL) -SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES}) -ADD_DEPENDENCIES(gflags extern_gflags) LIST(APPEND external_project_dependencies gflags) diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake index d335298742c73bf1fe44e6a778ab3c142711063d..bfb04916dc9adeb62e7310b9e4d168a74b67a989 100644 --- a/cmake/external/gtest.cmake +++ b/cmake/external/gtest.cmake @@ -51,6 +51,7 @@ IF(WITH_TESTING) -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + -DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} -DCMAKE_INSTALL_PREFIX=${GTEST_INSTALL_DIR} -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_GMOCK=ON @@ -70,6 +71,5 @@ IF(WITH_TESTING) ADD_LIBRARY(gtest_main STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET gtest_main PROPERTY IMPORTED_LOCATION ${GTEST_MAIN_LIBRARIES}) ADD_DEPENDENCIES(gtest_main extern_gtest) - LIST(APPEND external_project_dependencies gtest gtest_main) ENDIF(WITH_TESTING) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 2b46936c18a104506bf3b9d4b16a2fd0c6d2551e..844863a4258992ff2acf1acb9a37540b56cb9a1f 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -147,6 +147,7 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) # linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas) SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c) FILE(WRITE ${dummyfile} "const char *dummy_cblas = \"${dummyfile}\";") + ADD_LIBRARY(cblas STATIC ${dummyfile}) IF("${CBLAS_PROVIDER}" STREQUAL "MKLML") diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 343e44ab4bc21c1a656048b675062f1b897bbc77..a652b844c656ddac3b20557d3b0cc67980a8597e 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -144,11 +144,14 @@ set(GPU_COMMON_FLAGS -Wno-error=unused-function # Warnings in Numpy Header. -Wno-error=array-bounds # Warnings in Eigen::array ) - else(NOT WIN32) set(COMMON_FLAGS + -fPIC + -fno-omit-frame-pointer "/w") #disable all warnings. set(GPU_COMMON_FLAGS + -fPIC + -fno-omit-frame-pointer "/w") #disable all warnings endif(NOT WIN32) @@ -164,8 +167,8 @@ endif(APPLE) if(LINUX) set(GPU_COMMON_FLAGS -Wall - -Wextra -Werror + -Wextra ${GPU_COMMON_FLAGS}) endif(LINUX) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 174e5b2d175e58369781febc568ce8f82b605af4..1d702d1f8fd43f575c8508892e50aa806a858b24 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -238,6 +238,7 @@ function(cc_library TARGET_NAME) # add libxxx.lib prefix in windows set(${TARGET_NAME}_LIB_NAME "${CMAKE_STATIC_LIBRARY_PREFIX}${TARGET_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE STRING "output library name for target ${TARGET_NAME}") endif(WIN32) + if(cc_library_SRCS) if(cc_library_SHARED OR cc_library_shared) # build *.so add_library(${TARGET_NAME} SHARED ${cc_library_SRCS}) @@ -304,12 +305,6 @@ function(sep_library TARGET_NAME) set(options STATIC static SHARED shared) set(oneValueArgs "") set(multiValueArgs SRCS DEPS) - set(${TARGET_NAME}_dummy_flag "") - if(${sep_library_STATIC}) - set(${TARGET_NAME}_dummy_flag "STATIC") - elseif(${sep_library_SHARED}) - set(${TARGET_NAME}_dummy_flag "SHARED") - endif() cmake_parse_arguments(sep_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) set(dummy_index 1) set(dummy_offset 1) @@ -321,10 +316,7 @@ function(sep_library TARGET_NAME) list(LENGTH dummy_list listlen ) if ((${listlen} GREATER ${dummy_limit}) OR (${dummy_offset} EQUAL ${sep_all_len})) message("create dummy library ${TARGET_NAME}_dummy_lib_${dummy_index} for ${TARGET_NAME}") - # set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/${TARGET_NAME}_dummy_${dummy_index}.c) - # file(WRITE ${dummyfile} "const char *dummy_${TARGET_NAME}_${dummy_index} = \"${dummyfile}\";") - # cc_library(${TARGET_NAME}_dummy_lib_${dummy_index} ${${TARGET_NAME}_dummy_flag} SRCS ${dummyfile} DEPS ${dummy_list}) - cc_library(${TARGET_NAME}_dummy_lib_${dummy_index} ${${TARGET_NAME}_dummy_flag} DEPS ${dummy_list}) + cc_library(${TARGET_NAME}_dummy_lib_${dummy_index} STATIC DEPS ${dummy_list}) foreach(i ${dummy_list}) list(REMOVE_AT dummy_list 0) endforeach() @@ -333,7 +325,11 @@ function(sep_library TARGET_NAME) endif() MATH(EXPR dummy_offset "${dummy_offset}+1") endforeach() - cc_library(${TARGET_NAME} ${${TARGET_NAME}_dummy_flag} SRCS ${sep_library_SRCS} DEPS ${${TARGET_NAME}_dummy_list}) + if(${sep_library_SHARED}) + cc_library(${TARGET_NAME} SHARED SRCS ${sep_library_SRCS} DEPS ${${TARGET_NAME}_dummy_list}) + else(${sep_library_SHARED}) + cc_library(${TARGET_NAME} STATIC SRCS ${sep_library_SRCS} DEPS ${${TARGET_NAME}_dummy_list}) + endif(${sep_library_SHARED}) endfunction(sep_library) function(cc_binary TARGET_NAME) @@ -355,7 +351,11 @@ function(cc_test TARGET_NAME) set(multiValueArgs SRCS DEPS ARGS) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_executable(${TARGET_NAME} ${cc_test_SRCS}) + if(WIN32) # in windows deps. shlwapi library. + target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog shlwapi) + else(WIN32) target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) + endif(WIN32) add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME} ${cc_test_ARGS} @@ -426,7 +426,11 @@ function(nv_test TARGET_NAME) set(multiValueArgs SRCS DEPS) cmake_parse_arguments(nv_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cuda_add_executable(${TARGET_NAME} ${nv_test_SRCS}) + if(WIN32) + target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog shlwapi) + else(WIN32) target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) + endif(WIN32) add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) add_test(${TARGET_NAME} ${TARGET_NAME}) if (nv_test_SERIAL) diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index 8af88833dbac81a97a8f3c7b4ad48e3216a15254..6b89ec4b4198638999350ca8ed81b6b3cbbab00c 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -31,8 +31,7 @@ function(copy TARGET) foreach(index RANGE ${len}) list(GET copy_lib_SRCS ${index} src) list(GET copy_lib_DSTS ${index} dst) - - if (WIN32) + if (WIN32) # windows cmd shell will not expand wildcard automatically. # below expand the files,libs and copy them by rules. file(GLOB header_files ${src} "*.h") @@ -47,14 +46,14 @@ function(copy TARGET) COMMAND ${CMAKE_COMMAND} -E make_directory "${dst}" ) foreach(src_file ${src_files}) - add_custom_command(TARGET ${TARGET} PRE_BUILD + add_custom_command(TARGET ${TARGET} PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy "${src_file}" "${dst}" COMMENT "copying ${src_file} -> ${dst}") endforeach() - else() # not windows - add_custom_command(TARGET ${TARGET} PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory "${dst}" - COMMAND ${CMAKE_COMMAND} -E copy "${src_files}" "${dst}" + else(WIN32) # not windows + add_custom_command(TARGET ${TARGET} PRE_BUILD + COMMAND mkdir -p "${dst}" + COMMAND cp -r "${src}" "${dst}" COMMENT "copying ${src} -> ${dst}") endif(WIN32) endforeach() diff --git a/cmake/version.cmake b/cmake/version.cmake index ac10bdf067be549fe90112aef73fd6e1fbe0ac48..fbf559f76bb73db4f90896817cecab91e5915c78 100644 --- a/cmake/version.cmake +++ b/cmake/version.cmake @@ -44,5 +44,5 @@ while ("${PADDLE_VERSION}" STREQUAL "") endif() endwhile() -add_definitions(-DPADDLE_VERSION=${PADDLE_VERSION}) +add_definitions(-DPADDLE_VERSION="${PADDLE_VERSION}") message(STATUS "Paddle version is ${PADDLE_VERSION}") diff --git a/doc/fluid/dev/contribute_to_paddle_cn.md b/doc/fluid/dev/contribute_to_paddle_cn.md new file mode 120000 index 0000000000000000000000000000000000000000..bcb71b3da1f1fad3def2d820b6f9b7330a6e4d54 --- /dev/null +++ b/doc/fluid/dev/contribute_to_paddle_cn.md @@ -0,0 +1 @@ +../../v2/dev/contribute_to_paddle_cn.md diff --git a/doc/fluid/dev/contribute_to_paddle_en.md b/doc/fluid/dev/contribute_to_paddle_en.md new file mode 120000 index 0000000000000000000000000000000000000000..16679a406334cadce45f7a838773ea8b781c67de --- /dev/null +++ b/doc/fluid/dev/contribute_to_paddle_en.md @@ -0,0 +1 @@ +../../v2/dev/contribute_to_paddle_en.md diff --git a/doc/fluid/new_docs/advanced_usage/development/contribute_to_paddle.md b/doc/fluid/new_docs/advanced_usage/development/contribute_to_paddle.md new file mode 120000 index 0000000000000000000000000000000000000000..9f1af6133fdaa6d82b4e0bda0efaca10c750f80e --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/contribute_to_paddle.md @@ -0,0 +1 @@ +../../../dev/contribute_to_paddle_cn.md diff --git a/doc/fluid/new_docs/advanced_usage/development/cpu_profiling_cn.md b/doc/fluid/new_docs/advanced_usage/development/cpu_profiling_cn.md new file mode 120000 index 0000000000000000000000000000000000000000..8c13564629a4e0851bb7540f23df468a78b7bae2 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/cpu_profiling_cn.md @@ -0,0 +1 @@ +../../../howto/optimization/cpu_profiling_cn.md diff --git a/doc/fluid/new_docs/advanced_usage/development/host_memory_profiling_cn.md b/doc/fluid/new_docs/advanced_usage/development/host_memory_profiling_cn.md new file mode 120000 index 0000000000000000000000000000000000000000..5501686e9823c1b270b46f38c946ddd18d8f5c05 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/host_memory_profiling_cn.md @@ -0,0 +1 @@ +../../../howto/optimization/host_memory_profiling_cn.md diff --git a/doc/fluid/new_docs/advanced_usage/development/new_op.md b/doc/fluid/new_docs/advanced_usage/development/new_op.md new file mode 120000 index 0000000000000000000000000000000000000000..a0d1af57ba6f836cec151501a6fdb68bc3756d8d --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/new_op.md @@ -0,0 +1 @@ +../../../dev/new_op_cn.md diff --git a/doc/fluid/new_docs/advanced_usage/development/timeline_cn.md b/doc/fluid/new_docs/advanced_usage/development/timeline_cn.md new file mode 120000 index 0000000000000000000000000000000000000000..1a782fd363a70b28cff18ef1dc5a2c084b3aa3ae --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/timeline_cn.md @@ -0,0 +1 @@ +../../../howto/optimization/timeline_cn.md diff --git a/doc/v2/dev/contribute_to_paddle_en.md b/doc/v2/dev/contribute_to_paddle_en.md index c97564d93a7f0a753a23cd97d2467d595bd154ff..72723396444c0a6cc0516f6f2379b2d868ba59f7 120000 --- a/doc/v2/dev/contribute_to_paddle_en.md +++ b/doc/v2/dev/contribute_to_paddle_en.md @@ -1 +1 @@ -../../../CONTRIBUTING.md \ No newline at end of file +../../../CONTRIBUTING.md diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index bda2069d14307e4d2f9a0e369ef8b9d16911fdb6..88a2c740e08f7c2c6ac831c65a0ef992064b3a61 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -174,6 +174,7 @@ paddle.fluid.layers.mean ArgSpec(args=['x', 'name'], varargs=None, keywords=None paddle.fluid.layers.mul ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None)) paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=['x', 'label', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.maxout ArgSpec(args=['x', 'groups', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.space_to_depth ArgSpec(args=['x', 'blocksize', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.affine_grid ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.sequence_reverse ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.affine_channel ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None)) diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.h b/paddle/fluid/framework/details/broadcast_op_handle_test.h index 1a2a9ac328c4a9b89bfb89106af81b9fb3ed3028..4305eb65733a7c871450949ce2c48cab013bac81 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.h +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.h @@ -37,8 +37,9 @@ struct TestBroadcastOpHandle { std::vector local_scopes_; std::vector param_scopes_; Scope g_scope_; - std::unique_ptr op_handle_; - std::vector> vars_; + OpHandleBase* op_handle_; + std::vector vars_; + std::vector> nodes_; std::vector place_list_; bool use_gpu_; #ifdef PADDLE_WITH_CUDA @@ -90,6 +91,7 @@ struct TestBroadcastOpHandle { } void InitBroadcastOp(size_t input_scope_idx) { + nodes_.clear(); for (size_t j = 0; j < place_list_.size(); ++j) { local_scopes_.push_back(&(g_scope_.NewScope())); Scope& local_scope = local_scopes_.back()->NewScope(); @@ -101,39 +103,39 @@ struct TestBroadcastOpHandle { } param_scopes_[input_scope_idx]->Var("input"); - std::unique_ptr n = - ir::CreateNodeForTest("node0", ir::Node::Type::kOperation); + nodes_.emplace_back( + ir::CreateNodeForTest("node0", ir::Node::Type::kOperation)); if (use_gpu_) { #ifdef PADDLE_WITH_CUDA - op_handle_.reset(new BroadcastOpHandle(n.get(), local_scopes_, - place_list_, nccl_ctxs_.get())); + op_handle_ = new BroadcastOpHandle(nodes_.back().get(), local_scopes_, + place_list_, nccl_ctxs_.get()); #else PADDLE_THROW("CUDA is not support."); #endif } else { #ifdef PADDLE_WITH_CUDA - op_handle_.reset(new BroadcastOpHandle(n.get(), local_scopes_, - place_list_, nccl_ctxs_.get())); + op_handle_ = new BroadcastOpHandle(nodes_.back().get(), local_scopes_, + place_list_, nccl_ctxs_.get()); #else - op_handle_.reset( - new BroadcastOpHandle(n.get(), local_scopes_, place_list_)); + op_handle_ = new BroadcastOpHandle(nodes_.back().get(), local_scopes_, + place_list_); #endif } - std::unique_ptr v = - ir::CreateNodeForTest("node1", ir::Node::Type::kVariable); - auto* in_var_handle = new VarHandle(v.get(), 1, input_scope_idx, "input", - place_list_[input_scope_idx]); + nodes_.emplace_back( + ir::CreateNodeForTest("node1", ir::Node::Type::kVariable)); + auto* in_var_handle = new VarHandle(nodes_.back().get(), 1, input_scope_idx, + "input", place_list_[input_scope_idx]); vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); // add dummy var - std::unique_ptr v2 = - ir::CreateNodeForTest("node2", ir::Node::Type::kVariable); - vars_.emplace_back(new DummyVarHandle(v2.get())); + nodes_.emplace_back( + ir::CreateNodeForTest("node2", ir::Node::Type::kVariable)); + vars_.emplace_back(new DummyVarHandle(nodes_.back().get())); DummyVarHandle* dummy_var_handle = - static_cast(vars_.back().get()); + static_cast(vars_.back()); dummy_var_handle->ClearGeneratedOp(); op_handle_->AddInput(dummy_var_handle); @@ -141,20 +143,20 @@ struct TestBroadcastOpHandle { if (!use_gpu_) { op_handle_->SetDeviceContext(place_list_[j], ctxs_[j].get()); } - std::unique_ptr v3 = - ir::CreateNodeForTest("node3", ir::Node::Type::kVariable); + nodes_.emplace_back( + ir::CreateNodeForTest("node3", ir::Node::Type::kVariable)); VarHandle* out_var_handle = - new VarHandle(v3.get(), 2, j, "out", place_list_[j]); + new VarHandle(nodes_.back().get(), 2, j, "out", place_list_[j]); vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); } // add dummy var - std::unique_ptr v4 = - ir::CreateNodeForTest("node4", ir::Node::Type::kVariable); - vars_.emplace_back(new DummyVarHandle(v4.get())); + nodes_.emplace_back( + ir::CreateNodeForTest("node4", ir::Node::Type::kVariable)); + vars_.emplace_back(new DummyVarHandle(nodes_.back().get())); DummyVarHandle* out_dummy_var_handle = - static_cast(vars_.back().get()); + static_cast(vars_.back()); out_dummy_var_handle->ClearGeneratedOp(); op_handle_->AddOutput(out_dummy_var_handle); } diff --git a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc index 2b2329b9698908fdbe3385f1d555d756c47fc5c0..ca11c0083961f9b3d04e33113a0d685d508918f9 100644 --- a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc @@ -16,6 +16,7 @@ #include #include "paddle/fluid/framework/details/fetch_op_handle.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { @@ -32,13 +33,11 @@ FastThreadedSSAGraphExecutor::FastThreadedSSAGraphExecutor( pool_(strategy.num_threads_ + 1), // add one more thread for generate op_deps fetch_ctxs_(places) { - auto &ops = graph_->Get("ops"); - - for (auto &op : ops) { + for (auto &op : ir::FilterByNodeWrapper(*graph_)) { int dep = static_cast(op->NotReadyInputSize()); - op_deps_.emplace(op.get(), dep); + op_deps_.emplace(op, dep); if (dep == 0) { - bootstrap_ops_.emplace_back(op.get()); + bootstrap_ops_.emplace_back(op); } } @@ -54,13 +53,13 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( paddle::framework::FeedFetchList fetches; fetches.resize(fetch_tensors.size()); std::unordered_map> fetched_vars; - std::vector> fetch_ops; + std::vector fetch_ops; for (auto &fetch_var_name : fetch_tensors) { for (auto &var_map : graph_->Get("vars")) { auto it = var_map.find(fetch_var_name); if (it != var_map.end()) { - fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); + fetched_vars[fetch_var_name].push_back(*it->second.rbegin()); } } } @@ -110,7 +109,10 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( complete_q->Pop(); } } - exception_.ReThrow(); + if (exception_.IsCaught()) { + ClearFetchOp(graph_.get(), &fetch_ops); + exception_.ReThrow(); + } } num_complete += num_comp; } diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index fe18b2060c5cd7e157374da53c5a985f70545ab7..648adae06facb504042d8286f6eab5d98e99c015 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -28,11 +28,7 @@ FetchOpHandle::FetchOpHandle(ir::Node *node, FeedFetchList *data, size_t offset, offset_(offset), local_scopes_(local_scopes) {} -FetchOpHandle::~FetchOpHandle() { - for (auto *input_var : inputs_) { - input_var->RemoveOutput(this, this->Node()); - } -} +FetchOpHandle::~FetchOpHandle() {} void FetchOpHandle::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { PADDLE_THROW("Nobody should wait FetchOp. Unexpceted Error"); diff --git a/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc b/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc index 0f12bd2b4e857648342aeb5ad33b6c0fe01c9c73..541993c74332cc483a8b854a6b8f227c7c9a19a9 100644 --- a/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc @@ -22,8 +22,10 @@ namespace details { struct TestFusedBroadcastOpHandle : TestBroadcastOpHandle { std::vector out_varnames_; + std::vector> nodes_; void InitFusedBroadcastOp(std::vector input_scope_idxes) { + nodes_.clear(); // initialize scope and var for (size_t i = 0; i < place_list_.size(); ++i) { local_scopes_.push_back(&(g_scope_.NewScope())); @@ -39,41 +41,41 @@ struct TestFusedBroadcastOpHandle : TestBroadcastOpHandle { } // create op handle node - std::unique_ptr n = - ir::CreateNodeForTest("fused_broadcast", ir::Node::Type::kOperation); + nodes_.emplace_back( + ir::CreateNodeForTest("fused_broadcast", ir::Node::Type::kOperation)); if (use_gpu_) { #ifdef PADDLE_WITH_CUDA - op_handle_.reset(new FusedBroadcastOpHandle( - n.get(), local_scopes_, place_list_, nccl_ctxs_.get())); + op_handle_ = new FusedBroadcastOpHandle( + nodes_.back().get(), local_scopes_, place_list_, nccl_ctxs_.get()); #else PADDLE_THROW("CUDA is not supported."); #endif } else { #ifdef PADDLE_WITH_CUDA - op_handle_.reset(new FusedBroadcastOpHandle( - n.get(), local_scopes_, place_list_, nccl_ctxs_.get())); + op_handle_ = new FusedBroadcastOpHandle( + nodes_.back().get(), local_scopes_, place_list_, nccl_ctxs_.get()); #else - op_handle_.reset( - new FusedBroadcastOpHandle(n.get(), local_scopes_, place_list_)); + op_handle_ = new FusedBroadcastOpHandle(nodes_.back().get(), + local_scopes_, place_list_); #endif } for (size_t i = 0; i < input_scope_idxes.size(); ++i) { // add input var handle - std::unique_ptr in_node = - ir::CreateNodeForTest("in_node" + i, ir::Node::Type::kVariable); + nodes_.emplace_back( + ir::CreateNodeForTest("in_node" + i, ir::Node::Type::kVariable)); VarHandle* in_var_handle = - new VarHandle(in_node.get(), 1, input_scope_idxes[i], "in_var" + i, - place_list_[input_scope_idxes[i]]); + new VarHandle(nodes_.back().get(), 1, input_scope_idxes[i], + "in_var" + i, place_list_[input_scope_idxes[i]]); vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); // add output var handle for (size_t j = 0; j < place_list_.size(); ++j) { - std::unique_ptr out_node = - ir::CreateNodeForTest("out_node" + i, ir::Node::Type::kVariable); - VarHandle* out_var_handle = - new VarHandle(out_node.get(), 2, j, "out_var" + i, place_list_[j]); + nodes_.emplace_back( + ir::CreateNodeForTest("out_node" + i, ir::Node::Type::kVariable)); + VarHandle* out_var_handle = new VarHandle( + nodes_.back().get(), 2, j, "out_var" + i, place_list_[j]); vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); } diff --git a/paddle/fluid/framework/details/gather_op_handle_test.cc b/paddle/fluid/framework/details/gather_op_handle_test.cc index ed67e88ff6a7fe9efd93e5dfd4d7bdf4c43aac2e..e8cb7feb8bea92a7486b8a9d84ba4b9e2b93dbfb 100644 --- a/paddle/fluid/framework/details/gather_op_handle_test.cc +++ b/paddle/fluid/framework/details/gather_op_handle_test.cc @@ -31,9 +31,10 @@ struct TestGatherOpHandle { std::vector local_scopes_; std::vector param_scopes_; Scope g_scope_; - std::unique_ptr op_handle_; - std::vector> vars_; + OpHandleBase* op_handle_; + std::vector vars_; std::vector gpu_list_; + std::vector> nodes_; void WaitAll() { for (size_t j = 0; j < ctxs_.size(); ++j) { @@ -70,7 +71,7 @@ struct TestGatherOpHandle { } void InitGatherOp(size_t input_scope_idx) { - std::vector> nodes; + nodes_.clear(); for (size_t j = 0; j < gpu_list_.size(); ++j) { local_scopes_.push_back(&(g_scope_.NewScope())); Scope& local_scope = local_scopes_.back()->NewScope(); @@ -82,44 +83,45 @@ struct TestGatherOpHandle { } param_scopes_[input_scope_idx]->Var("out"); - nodes.emplace_back( + nodes_.emplace_back( ir::CreateNodeForTest("node", ir::Node::Type::kOperation).release()); - op_handle_.reset( - new GatherOpHandle(nodes.back().get(), local_scopes_, gpu_list_)); + op_handle_ = + new GatherOpHandle(nodes_.back().get(), local_scopes_, gpu_list_); // add input for (size_t j = 0; j < gpu_list_.size(); ++j) { op_handle_->SetDeviceContext(gpu_list_[j], ctxs_[j].get()); - nodes.emplace_back( + nodes_.emplace_back( ir::CreateNodeForTest("node1", ir::Node::Type::kVariable).release()); auto* in_var_handle = - new VarHandle(nodes.back().get(), 1, j, "input", gpu_list_[j]); + new VarHandle(nodes_.back().get(), 1, j, "input", gpu_list_[j]); vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); } // add dummy var - nodes.emplace_back( + nodes_.emplace_back( ir::CreateNodeForTest("node2", ir::Node::Type::kVariable).release()); - vars_.emplace_back(new DummyVarHandle(nodes.back().get())); + vars_.emplace_back(new DummyVarHandle(nodes_.back().get())); DummyVarHandle* in_dummy_var_handle = - static_cast(vars_.back().get()); + static_cast(vars_.back()); in_dummy_var_handle->ClearGeneratedOp(); op_handle_->AddInput(in_dummy_var_handle); // add output - nodes.emplace_back( + nodes_.emplace_back( ir::CreateNodeForTest("node3", ir::Node::Type::kVariable).release()); - auto* out_var_handle = new VarHandle(nodes.back().get(), 2, input_scope_idx, - "out", gpu_list_[input_scope_idx]); + auto* out_var_handle = + new VarHandle(nodes_.back().get(), 2, input_scope_idx, "out", + gpu_list_[input_scope_idx]); vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); // add dummy var - nodes.emplace_back( + nodes_.emplace_back( ir::CreateNodeForTest("node4", ir::Node::Type::kVariable).release()); - vars_.emplace_back(new DummyVarHandle(nodes.back().get())); + vars_.emplace_back(new DummyVarHandle(nodes_.back().get())); DummyVarHandle* dummy_var_handle = - static_cast(vars_.back().get()); + static_cast(vars_.back()); op_handle_->AddOutput(dummy_var_handle); } diff --git a/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc b/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc index 169ce3ae7ca497e40d99b1c16633e35e1e4f1009..67aad9f94f088f4b50e1ce2728d83de98a3c60ad 100644 --- a/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc +++ b/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc @@ -16,6 +16,7 @@ #include "paddle/fluid/framework/details/computation_op_handle.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" #include "paddle/fluid/framework/details/op_graph_view.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { @@ -35,10 +36,10 @@ static bool IsLockAndRecordEventFreeComputationOpHandle( std::unique_ptr ModifyOpLockAndRecordEventPass::ApplyImpl( std::unique_ptr ir_graph) const { - auto &all_ops = ir_graph->Get(kGraphOps); + auto all_ops = ir::FilterByNodeWrapper(*ir_graph); OpGraphView graph_view(all_ops); for (auto &op : all_ops) { - auto *compute_op = dynamic_cast(op.get()); + auto *compute_op = dynamic_cast(op); if (compute_op == nullptr) continue; bool is_lock_and_record_event_free = IsLockAndRecordEventFreeComputationOpHandle(compute_op, graph_view); diff --git a/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc index c9c255864a2477ed29873f8521acce37fa928c06..c8ea18804630fea4ada98062256730dbf4c24860 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/details/multi_devices_graph_check_pass.h" #include #include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { @@ -36,20 +37,20 @@ bool SSAGraghBuilderWithChecker::IsValidGraph(const ir::Graph *graph) const { for (auto &var_map : graph->Get(kGraphVars)) { for (auto &name_pair : var_map) { for (auto &version_pair : name_pair.second) { - insert_pending_var(version_pair.get()); + insert_pending_var(version_pair); } } } for (auto &var : graph->Get(kGraphDepVars)) { - insert_pending_var(var.get()); + insert_pending_var(var); } - for (auto &op : graph->Get(kGraphOps)) { + for (OpHandleBase *op : ir::FilterByNodeWrapper(*graph)) { if (op->Inputs().empty()) { - ready_ops.insert(op.get()); + ready_ops.insert(op); } else { - pending_ops.insert({op.get(), op.get()->NoDupInputSize()}); + pending_ops.insert({op, op->NoDupInputSize()}); } } @@ -89,6 +90,4 @@ bool SSAGraghBuilderWithChecker::IsValidGraph(const ir::Graph *graph) const { REGISTER_PASS(multi_devices_check_pass, paddle::framework::details::SSAGraghBuilderWithChecker) .RequireGraphAttr(paddle::framework::details::kGraphVars) - .RequireGraphAttr(paddle::framework::details::kGraphDepVars) - .RequireGraphAttr(paddle::framework::details::kGraphOps) - .RequireGraphAttr(paddle::framework::details::kShardedVarDevice); + .RequireGraphAttr(paddle::framework::details::kGraphDepVars); diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index f3819887a196a7c8bf35897467bb9d68b428094e..67d29a42d750441cfe422db0d7b75c0064f3c9ac 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -34,7 +34,14 @@ namespace paddle { namespace framework { namespace details { + namespace { +// TODO(panyx0718): Clean this up as well. +// all operators. NOTE that even we use a vector here, the operators is +// unordered. +typedef std::vector GraphOps; +const char kGraphOps[] = "ops"; + void PolishGraphToSupportDataHazards(ir::Graph *graph) { for (auto &var_map : graph->Get(kGraphVars)) { for (auto &name_pair : var_map) { @@ -92,7 +99,7 @@ VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node, } var_holder.emplace_back(var); } else { - var = var_holder.rbegin()->get(); + var = *var_holder.rbegin(); } return var; } @@ -154,7 +161,7 @@ void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result, ir::Node *node, size_t place_id) const { auto p = places_[place_id]; - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); op_handle->SetDeviceContext(p, platform::DeviceContextPool::Instance().Get(p)); @@ -303,7 +310,6 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( result.Set(kGraphVars, new GraphVars(places_.size())); result.Set(kGraphDepVars, new GraphDepVars); result.Set(kGraphOps, new GraphOps); - result.Set(kShardedVarDevice, new ShardedVarDevice); // find send/recv vars so that we can place the distributed training // related op in the place 0 @@ -317,11 +323,13 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( bool is_forwarding = true; bool is_dist_train = false; + std::unordered_map sharded_var_device; + for (ir::Node *node : sorted_ops) { if (boost::get( node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) == static_cast(OpRole::kRPC)) { - int op_dev_id = CreateRPCOp(&result, node); + int op_dev_id = CreateRPCOp(&result, node, &sharded_var_device); PADDLE_ENFORCE(op_dev_id != -1, "Can not schedule the RPC operator to the right place."); if (node->Op()->Type() == "recv") { @@ -337,7 +345,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( } else if (boost::get(node->Op()->GetAttr( OpProtoAndCheckerMaker::OpRoleAttrName())) == static_cast(OpRole::kDist)) { - int op_dev_id = CreateDistTrainOp(&result, node); + int op_dev_id = CreateDistTrainOp(&result, node, &sharded_var_device); if (node->Op()->Type() == "concat") { auto origin_param_name = node->Op()->OutputArgumentNames()[0]; bcast_var_name_set[op_dev_id].emplace(origin_param_name); @@ -356,12 +364,11 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( // the block. is_forwarding = false; } else { - int op_dev_id = GetOpDeviceID(result, node); + int op_dev_id = GetOpDeviceID(result, node, sharded_var_device); if (op_dev_id != -1) { // This op only runs on one specific device. CreateComputationalOp(&result, node, op_dev_id); for (ir::Node *n : node->outputs) { - graph->Get(kShardedVarDevice) - .emplace(n->Name(), op_dev_id); + sharded_var_device.emplace(n->Name(), op_dev_id); } } else { // This op runs on all devices, and its output may have parameter's @@ -398,8 +405,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( case BuildStrategy::ReduceStrategy::kReduce: cur_device_id = GetAppropriateDeviceID({g_name}); CreateReduceOp(&result, g_name, cur_device_id); - graph->Get(kShardedVarDevice) - .emplace(g_name, cur_device_id); + sharded_var_device.emplace(g_name, cur_device_id); if (!is_dist_train) { bcast_var_name_set[cur_device_id].emplace(p_name); } @@ -458,7 +464,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( * Only variables should be the leaves of graph. */ AddOutputToLeafOps(&result); - PADDLE_ENFORCE(!ir::HasCircle(result)); + result.Erase(kGraphOps); return graph; } @@ -498,7 +504,7 @@ void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result, result->Get(kGraphOps).emplace_back(op_handle); auto *in = - result->Get(kGraphVars).at(src_dev_id).at(p_name).back().get(); + result->Get(kGraphVars).at(src_dev_id).at(p_name).back(); op_handle->AddInput(in); for (size_t i = 0; i < places_.size(); ++i) { @@ -535,7 +541,7 @@ void MultiDevSSAGraphBuilder::CreateFusedBroadcastOp( for (size_t dev_id = 0; dev_id < bcast_varnames.size(); ++dev_id) { for (auto &p_name : bcast_varnames[dev_id]) { auto *in = - result->Get(kGraphVars).at(dev_id).at(p_name).back().get(); + result->Get(kGraphVars).at(dev_id).at(p_name).back(); op_handle->AddInput(in); for (size_t out_dev_id = 0; out_dev_id < places_.size(); ++out_dev_id) { auto &p = places_[out_dev_id]; @@ -571,7 +577,7 @@ void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result, result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), local_scopes_, places_)); #endif - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); for (size_t i = 0; i < places_.size(); ++i) { auto &p = places_[i]; @@ -579,7 +585,7 @@ void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result, auto &vars = result->Get(kGraphVars)[i][og]; PADDLE_ENFORCE(!vars.empty()); auto &prev_grad = vars.back(); - op_handle->AddInput(prev_grad.get()); + op_handle->AddInput(prev_grad); auto var = new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable), @@ -600,14 +606,14 @@ void MultiDevSSAGraphBuilder::InsertDataBalanceOp( result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation), local_scopes_, places_)); #endif - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); for (size_t i = 0; i < places_.size(); ++i) { auto &p = places_[i]; SetCommunicationContext(op_handle, p); for (const std::string &d_name : datas) { auto &vars = result->Get(kGraphVars)[i][d_name]; PADDLE_ENFORCE(!vars.empty()); - op_handle->AddInput(vars.back().get()); + op_handle->AddInput(vars.back()); auto var = new VarHandle( result->CreateEmptyNode(d_name, ir::Node::Type::kVariable), vars.size(), i, d_name, p); @@ -617,8 +623,9 @@ void MultiDevSSAGraphBuilder::InsertDataBalanceOp( } } -int MultiDevSSAGraphBuilder::GetOpDeviceID(const ir::Graph &graph, - ir::Node *node) const { +int MultiDevSSAGraphBuilder::GetOpDeviceID( + const ir::Graph &graph, ir::Node *node, + const std::unordered_map &sharded_var_device) const { if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) { return -1; } @@ -631,15 +638,15 @@ int MultiDevSSAGraphBuilder::GetOpDeviceID(const ir::Graph &graph, node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); PADDLE_ENFORCE_EQ(param_grad.size(), 2U); - int dev_id = GetVarDeviceID(graph, param_grad[1]); + int dev_id = GetVarDeviceID(graph, param_grad[1], sharded_var_device); PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]", node->Op()->Type(), param_grad[0], param_grad[1]); return dev_id; } -int MultiDevSSAGraphBuilder::GetVarDeviceID(const ir::Graph &graph, - const std::string &varname) const { - auto &sharded_var_device = graph.Get(kShardedVarDevice); +int MultiDevSSAGraphBuilder::GetVarDeviceID( + const ir::Graph &graph, const std::string &varname, + const std::unordered_map &sharded_var_device) const { auto got = sharded_var_device.find(varname); return got == sharded_var_device.end() ? -1 : got->second; } @@ -690,7 +697,7 @@ VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result, result->CreateEmptyNode("reduce", ir::Node::Type::kOperation), local_scopes_, places_)); #endif - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); for (size_t i = 0; i < places_.size(); ++i) { auto &p = places_[i]; @@ -698,7 +705,7 @@ VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result, auto &vars = result->Get(kGraphVars)[i][og]; PADDLE_ENFORCE(!vars.empty()); auto &prev_grad = vars.back(); - op_handle->AddInput(prev_grad.get()); + op_handle->AddInput(prev_grad); } auto &vars = result->Get(kGraphVars)[dst_dev_id][og]; auto var = @@ -709,8 +716,9 @@ VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result, return var; } -int MultiDevSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, - ir::Node *node) const { +int MultiDevSSAGraphBuilder::CreateDistTrainOp( + ir::Graph *result, ir::Node *node, + std::unordered_map *sharded_var_device) const { int op_dev_id = -1; std::vector input_var_names; std::vector output_var_names; @@ -725,23 +733,22 @@ int MultiDevSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, node->Op()->Type() == "split_selected_rows" || node->Op()->Type() == "split_ids") { // TODO(paddle-dev): getting the first var is not safe. - op_dev_id = GetVarDeviceID(*result, input_var_names[0]); + op_dev_id = + GetVarDeviceID(*result, input_var_names[0], *sharded_var_device); if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) { op_dev_id = GetAppropriateDeviceID(input_var_names); for (auto &varname : input_var_names) { - result->Get(kShardedVarDevice) - .emplace(varname, op_dev_id); + sharded_var_device->emplace(varname, op_dev_id); } } for (auto &varname : output_var_names) { - result->Get(kShardedVarDevice) - .emplace(varname, op_dev_id); + sharded_var_device->emplace(varname, op_dev_id); } } else if (node->Op()->Type() == "concat") { - op_dev_id = GetVarDeviceID(*result, input_var_names[0]); + op_dev_id = + GetVarDeviceID(*result, input_var_names[0], *sharded_var_device); for (auto &varname : output_var_names) { - result->Get(kShardedVarDevice) - .emplace(varname, op_dev_id); + sharded_var_device->emplace(varname, op_dev_id); } } else { LOG(ERROR) << "got unexpected dist op: " << node->Op()->Type(); @@ -759,14 +766,14 @@ int MultiDevSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, } void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) { - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); for (ir::Node *input : node->inputs) { VarHandle *var = nullptr; for (int place_offset = 0; place_offset < num_places; ++place_offset) { auto &var_holders = result->Get(kGraphVars)[place_offset]; auto &var_holder = var_holders[input->Name()]; if (!var_holder.empty()) { - var = var_holder.rbegin()->get(); + var = *var_holder.rbegin(); op_handle->AddInput(var); } } @@ -774,12 +781,14 @@ void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) { } // Create RPC related op handles that connects its in ops and out ops. -int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, - ir::Node *node) const { +int MultiDevSSAGraphBuilder::CreateRPCOp( + ir::Graph *result, ir::Node *node, + std::unordered_map *sharded_var_device) const { int op_dev_id = -1; if (node->Op()->Type() == "send") { // TODO(paddle-dev): getting the first var is not safe. - op_dev_id = GetVarDeviceID(*result, node->inputs[0]->Name()); + op_dev_id = + GetVarDeviceID(*result, node->inputs[0]->Name(), *sharded_var_device); PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]), "This hack no longer holds, please fix."); // the variable name which contains .block means it was splited by @@ -797,11 +806,9 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, VLOG(10) << "send grad " << input_var_names[0] << " origin " << send_param_grad[1] << " place: " << op_dev_id; for (auto &varname : input_var_names) { - result->Get(kShardedVarDevice) - .emplace(varname, op_dev_id); + sharded_var_device->emplace(varname, op_dev_id); } - result->Get(kShardedVarDevice) - .emplace(send_param_grad[1], op_dev_id); + sharded_var_device->emplace(send_param_grad[1], op_dev_id); } } else if (node->Op()->Type() == "recv") { std::vector output_var_names; @@ -811,7 +818,8 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, auto recv_param_grad = boost::get>( node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); if (recv_param_grad.size() == 2U) { - op_dev_id = GetVarDeviceID(*result, recv_param_grad[1]); + op_dev_id = + GetVarDeviceID(*result, recv_param_grad[1], *sharded_var_device); VLOG(10) << "recv param " << recv_param_grad[0] << " get grad place: " << recv_param_grad[1] << " place: " << op_dev_id; @@ -819,8 +827,7 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, op_dev_id = GetAppropriateDeviceID(output_var_names); } for (auto &varname : output_var_names) { - result->Get(kShardedVarDevice) - .emplace(varname, op_dev_id); + sharded_var_device->emplace(varname, op_dev_id); } } else { // send_barrier, fetch_barrier will run on place 0; @@ -839,7 +846,7 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, // send_barrier, recv, fetch_barrier's inputs are deps var, get them from // all places auto p = places_[op_dev_id]; - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); op_handle->SetDeviceContext(p, platform::DeviceContextPool::Instance().Get(p)); @@ -847,7 +854,8 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, for (ir::Node *output : node->outputs) { int outvar_dev_id = op_dev_id; if (node->Op()->Type() == "fetch_barrier") { - outvar_dev_id = GetVarDeviceID(*result, output->Name()); + outvar_dev_id = + GetVarDeviceID(*result, output->Name(), *sharded_var_device); PADDLE_ENFORCE_NE(outvar_dev_id, -1); } p = places_[outvar_dev_id]; diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.h b/paddle/fluid/framework/details/multi_devices_graph_pass.h index 03b2de2f04da4bac8d342a76c80fd12beaeba4b7..f3ec2d29415240b7012f458070223469d0947166 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.h +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.h @@ -44,12 +44,18 @@ class MultiDevSSAGraphBuilder : public ir::Pass { mutable platform::NCCLContextMap *nccl_ctxs_; #endif - int GetVarDeviceID(const ir::Graph &graph, const std::string &varname) const; + int GetVarDeviceID( + const ir::Graph &graph, const std::string &varname, + const std::unordered_map &sharded_var_device) const; bool IsScaleLossOp(ir::Node *node) const; - int CreateRPCOp(ir::Graph *result, ir::Node *node) const; - int CreateDistTrainOp(ir::Graph *result, ir::Node *node) const; + int CreateRPCOp( + ir::Graph *result, ir::Node *node, + std::unordered_map *sharded_var_device) const; + int CreateDistTrainOp( + ir::Graph *result, ir::Node *node, + std::unordered_map *sharded_var_device) const; std::vector FindDistTrainSendVars( const std::vector &nodes) const; @@ -69,7 +75,9 @@ class MultiDevSSAGraphBuilder : public ir::Pass { void CreateComputationalOp(ir::Graph *result, ir::Node *node, int dev_id) const; - int GetOpDeviceID(const ir::Graph &graph, ir::Node *node) const; + int GetOpDeviceID( + const ir::Graph &graph, ir::Node *node, + const std::unordered_map &sharded_var_device) const; void InsertAllReduceOp(ir::Graph *result, const std::string &og) const; diff --git a/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc index 361c91dc78c08a2cbf84ee88211d389c1e2312e5..8f92f0948d7d397ab0f20c01eae9e313f739adec 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h" #include #include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { @@ -62,7 +63,7 @@ void GraphvizSSAGraphPrinter::Print(const ir::Graph &graph, }); size_t op_id = 0; - for (auto &op : graph.Get(kGraphOps)) { + for (auto &op : ir::FilterByNodeWrapper(graph)) { std::string op_name = "op_" + std::to_string(op_id++); sout << op_name << " [label=\"" << op->Name() << "\", shape=rect]" << std::endl; diff --git a/paddle/fluid/framework/details/multi_devices_helper.h b/paddle/fluid/framework/details/multi_devices_helper.h index 175c5a9950be69d7bf6ae9e386af762007a18a51..1a2b75fbc0c28984ce5cf00e0a2ce0f804349bb1 100644 --- a/paddle/fluid/framework/details/multi_devices_helper.h +++ b/paddle/fluid/framework/details/multi_devices_helper.h @@ -35,23 +35,14 @@ namespace details { // The outside vector is the device vector. Each element of this vector is a // map from variable name to variables. The variables, who have the same name, // will have a differsent version. The offset in the -// `std::vector>` is the version of varaibles. -typedef std::vector< - std::unordered_map>>> +// `std::vector` is the version of varaibles. +typedef std::vector>> GraphVars; const char kGraphVars[] = "vars"; // aux variables to represent dependency. Useful to resolve data hazard. -typedef std::unordered_set> GraphDepVars; +typedef std::unordered_set GraphDepVars; const char kGraphDepVars[] = "dep_vars"; - -// all operators. NOTE that even we use a vector here, the operators is -// unordered. -typedef std::vector> GraphOps; -const char kGraphOps[] = "ops"; - -typedef std::unordered_map ShardedVarDevice; -const char kShardedVarDevice[] = "sharded_var_device"; } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/op_graph_view.cc b/paddle/fluid/framework/details/op_graph_view.cc index 65dafd376f7c687410270e35f105ff595fe78f59..4838c4198ff35ba3fb562f3a7c0563ee60179e3b 100644 --- a/paddle/fluid/framework/details/op_graph_view.cc +++ b/paddle/fluid/framework/details/op_graph_view.cc @@ -20,19 +20,16 @@ namespace paddle { namespace framework { namespace details { -OpGraphView::OpGraphView( - const std::vector> &ops) { - Build(ops); -} +OpGraphView::OpGraphView(const std::vector &ops) { Build(ops); } -void OpGraphView::Build(const std::vector> &ops) { +void OpGraphView::Build(const std::vector &ops) { for (auto &op : ops) { - preceding_ops_[op.get()]; - pending_ops_[op.get()]; + preceding_ops_[op]; + pending_ops_[op]; for (auto &var : op->Outputs()) { for (auto &pending_op : var->PendingOps()) { - preceding_ops_[pending_op].insert(op.get()); - pending_ops_[op.get()].insert(pending_op); + preceding_ops_[pending_op].insert(op); + pending_ops_[op].insert(pending_op); } } } @@ -41,8 +38,6 @@ void OpGraphView::Build(const std::vector> &ops) { "There are duplicate ops in graph."); } -size_t OpGraphView::OpNumber() const { return preceding_ops_.size(); } - std::unordered_set OpGraphView::AllOps() const { std::unordered_set ret; for (auto &pair : preceding_ops_) { @@ -60,12 +55,6 @@ void OpGraphView::EnforceHasOp(OpHandleBase *op) const { op == nullptr ? "nullptr" : op->DebugString()); } -const std::unordered_set &OpGraphView::PrecedingOps( - OpHandleBase *op) const { - EnforceHasOp(op); - return preceding_ops_.at(op); -} - const std::unordered_set &OpGraphView::PendingOps( OpHandleBase *op) const { EnforceHasOp(op); diff --git a/paddle/fluid/framework/details/op_graph_view.h b/paddle/fluid/framework/details/op_graph_view.h index 398c019be00a6ff5f5b39fdcbe97339341b1685b..afb3e8e59461eeba10d7027fc70b89cc170c1805 100644 --- a/paddle/fluid/framework/details/op_graph_view.h +++ b/paddle/fluid/framework/details/op_graph_view.h @@ -26,21 +26,16 @@ namespace details { class OpGraphView { public: - explicit OpGraphView(const std::vector> &ops); - - size_t OpNumber() const; + explicit OpGraphView(const std::vector &ops); std::unordered_set AllOps() const; - const std::unordered_set &PrecedingOps( - OpHandleBase *op) const; - const std::unordered_set &PendingOps(OpHandleBase *op) const; bool HasOp(OpHandleBase *op) const; private: - void Build(const std::vector> &ops); + void Build(const std::vector &ops); void EnforceHasOp(OpHandleBase *op) const; std::unordered_map> diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index d09b94a3fd32952985a37cf4246c7640d2db4f56..ba12ca3c61c05b3e856fffa8353d4ec5bf79bc39 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -31,7 +31,10 @@ constexpr char kLocalExecScopeName[] = "@LCOAL_SCOPE@"; // It's responsible for populating necessary fields of ir::Node. class OpHandleBase { public: - explicit OpHandleBase(ir::Node *node) : node_(node) {} + // Owned by `node`. No need to be deleted explicitly. + explicit OpHandleBase(ir::Node *node) : node_(node) { + node_->WrappedBy(this); + } virtual ~OpHandleBase(); diff --git a/paddle/fluid/framework/details/reduce_op_handle_test.cc b/paddle/fluid/framework/details/reduce_op_handle_test.cc index 3a9a58412391b188c5e804b41fa47b3607a36bd1..72299c0bfa916d3b92e1c5020ddd69dadad3701d 100644 --- a/paddle/fluid/framework/details/reduce_op_handle_test.cc +++ b/paddle/fluid/framework/details/reduce_op_handle_test.cc @@ -30,8 +30,8 @@ struct TestReduceOpHandle { Scope g_scope_; std::vector local_scopes_; std::vector param_scopes_; - std::unique_ptr op_handle_; - std::vector> vars_; + OpHandleBase *op_handle_; + std::vector vars_; std::vector gpu_list_; std::vector> ctxs_; diff --git a/paddle/fluid/framework/details/reference_count_pass.cc b/paddle/fluid/framework/details/reference_count_pass.cc index 0b994ced7f751f056fec076e3dea8d14d0bed991..08783fb5f8b18329c9167edb0dac39b7dd42a746 100644 --- a/paddle/fluid/framework/details/reference_count_pass.cc +++ b/paddle/fluid/framework/details/reference_count_pass.cc @@ -19,6 +19,7 @@ #include "paddle/fluid/framework/details/computation_op_handle.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" #include "paddle/fluid/framework/details/reference_count_pass.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { @@ -71,14 +72,13 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( // Step 2: Find all variables in non-computation ops which refers to variables // in computation ops std::unordered_set names; - std::unordered_map> + std::unordered_map compute_ref_cnt_map; auto get_ref_cnts_from_compute_op = [&]( - const std::unique_ptr &op, - const std::vector &vars) { + OpHandleBase *op, const std::vector &vars) { std::vector var_names_in_op; - auto *compute_op = dynamic_cast(op.get()); + auto *compute_op = dynamic_cast(op); if (compute_op == nullptr || !platform::is_gpu_place(compute_op->GetPlace())) return var_names_in_op; @@ -121,9 +121,8 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( }; auto update_ref_cnts_from_non_compute_op = [&]( - const std::unique_ptr &op, - const std::vector &vars) { - if (dynamic_cast(op.get()) != nullptr) return; + OpHandleBase *op, const std::vector &vars) { + if (dynamic_cast(op) != nullptr) return; for (VarHandleBase *var_handle_base : vars) { auto *var_handle = dynamic_cast(var_handle_base); if (var_handle == nullptr || !var_handle->Node()->IsVar()) continue; @@ -151,21 +150,21 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( ref_cnt_node, next_compute_op->GetScope(), place, {var_name}, gcs[place.device].get(), cur_ref_cnts[place.device].get()); AddDependencyBetween(next_compute_op, ref_cnt_handle, graph.get()); - compute_ref_cnt_map[next_compute_op].reset(ref_cnt_handle); + compute_ref_cnt_map[next_compute_op] = ref_cnt_handle; } } } } }; - auto &all_ops = graph->Get(kGraphOps); + auto all_ops = ir::FilterByNodeWrapper(*graph); for (auto &op : all_ops) { auto in_var_names = get_ref_cnts_from_compute_op(op, op->Inputs()); auto out_var_names = get_ref_cnts_from_compute_op(op, op->Outputs()); if (in_var_names.empty() && out_var_names.empty()) continue; in_var_names.insert(in_var_names.end(), out_var_names.begin(), out_var_names.end()); - auto *compute_op = dynamic_cast(op.get()); + auto *compute_op = dynamic_cast(op); auto place = boost::get(compute_op->GetPlace()); ir::Node *ref_cnt_node = graph->CreateEmptyNode("reference_count", ir::Node::Type::kOperation); @@ -173,7 +172,7 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( ref_cnt_node, compute_op->GetScope(), place, in_var_names, gcs[place.device].get(), cur_ref_cnts[place.device].get()); AddDependencyBetween(compute_op, ref_cnt_handle, graph.get()); - compute_ref_cnt_map[compute_op].reset(ref_cnt_handle); + compute_ref_cnt_map[compute_op] = ref_cnt_handle; } for (auto &op : all_ops) { @@ -181,11 +180,11 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( update_ref_cnts_from_non_compute_op(op, op->Outputs()); } - std::vector> new_all_ops; + std::vector new_all_ops; new_all_ops.reserve(compute_ref_cnt_map.size() + all_ops.size()); for (auto &op : all_ops) { new_all_ops.emplace_back(std::move(op)); - auto it = compute_ref_cnt_map.find(new_all_ops.back().get()); + auto it = compute_ref_cnt_map.find(new_all_ops.back()); if (it != compute_ref_cnt_map.end()) { // Add LeafNode to ReferenceCountOpHandle auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar()); diff --git a/paddle/fluid/framework/details/ssa_graph_executor.cc b/paddle/fluid/framework/details/ssa_graph_executor.cc index 780da5478ff34ecd7096d0ef62b72bf1088dd221..af2cbd5c876fdd7c27cd679f7e9412d1b0604ecc 100644 --- a/paddle/fluid/framework/details/ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/ssa_graph_executor.cc @@ -19,14 +19,16 @@ namespace framework { namespace details { SSAGraphExecutor::~SSAGraphExecutor() {} -void ClearFetchOp(ir::Graph* graph, - std::vector>* fetch_ops) { +void ClearFetchOp(ir::Graph* graph, std::vector* fetch_ops) { if (fetch_ops->empty()) return; for (auto& op : *fetch_ops) { for (auto& out_var : op->Node()->outputs) { graph->RemoveNode(out_var); } + for (auto& in_var : op->Inputs()) { + in_var->RemoveOutput(op, op->Node()); + } graph->RemoveNode(op->Node()); } fetch_ops->clear(); diff --git a/paddle/fluid/framework/details/ssa_graph_executor.h b/paddle/fluid/framework/details/ssa_graph_executor.h index d5cf7737d565c523995e6685b73c57e5a6f0197b..860eaa25b58e4579ad792ff18618de3b90707e8d 100644 --- a/paddle/fluid/framework/details/ssa_graph_executor.h +++ b/paddle/fluid/framework/details/ssa_graph_executor.h @@ -38,8 +38,7 @@ class SSAGraphExecutor { virtual FeedFetchList Run(const std::vector& fetch_tensors) = 0; }; -void ClearFetchOp(ir::Graph* graph, - std::vector>* fetch_ops); +void ClearFetchOp(ir::Graph* graph, std::vector* fetch_ops); } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 2d2bdb604f2d08adbaa0b38d04b8e377b2e6ab6c..677a2937945b03fa577317cb4f26e09354d06957 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" +#include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { @@ -51,25 +52,25 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( for (auto &var_map : graph_->Get(details::kGraphVars)) { for (auto &name_pair : var_map) { for (auto &version_pair : name_pair.second) { - InsertPendingVar(&pending_vars, ready_vars.get(), version_pair.get()); + InsertPendingVar(&pending_vars, ready_vars.get(), version_pair); } } } for (auto &var : graph_->Get(details::kGraphDepVars)) { - InsertPendingVar(&pending_vars, ready_vars.get(), var.get()); + InsertPendingVar(&pending_vars, ready_vars.get(), var); } - for (auto &op : graph_->Get(details::kGraphOps)) { + for (auto &op : ir::FilterByNodeWrapper(*graph_)) { if (op->Inputs().empty()) { // Special case, Op has no input. - ready_ops.insert(op.get()); + ready_ops.insert(op); } else { - InsertPendingOp(&pending_ops, op.get()); + InsertPendingOp(&pending_ops, op); } } // Step 2. Insert FetchOps - std::vector> fetch_ops; - std::unordered_set> fetch_dependencies; + std::vector fetch_ops; + std::unordered_set fetch_dependencies; FeedFetchList fetch_data(fetch_tensors.size()); InsertFetchOps(fetch_tensors, &fetch_ops, &fetch_dependencies, &pending_ops, @@ -109,6 +110,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( for (auto &run_op_future : run_op_futures_) { run_op_future.wait(); } + ClearFetchOp(graph_.get(), &fetch_ops); exception_holder_.ReThrow(); } else { continue; @@ -140,8 +142,8 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( void ThreadedSSAGraphExecutor::InsertFetchOps( const std::vector &fetch_tensors, - std::vector> *fetch_ops, - std::unordered_set> *fetch_dependencies, + std::vector *fetch_ops, + std::unordered_set *fetch_dependencies, std::unordered_map *pending_ops, std::unordered_set *pending_vars, BlockingQueue *ready_vars, FeedFetchList *fetch_data) { @@ -151,7 +153,7 @@ void ThreadedSSAGraphExecutor::InsertFetchOps( for (auto &var_map : graph_->Get(details::kGraphVars)) { auto it = var_map.find(fetch_var_name); if (it != var_map.end()) { - fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); + fetched_vars[fetch_var_name].push_back(*it->second.rbegin()); } } } diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index 5c0bc169eaf3f54596eb8e08b7bf80a82253c9b2..24da56c09e3e0f3894d58e5af8838c98e3e1e67c 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -70,13 +70,13 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { BlockingQueue *ready_vars, VarHandleBase *var) const; - void InsertFetchOps( - const std::vector &fetch_tensors, - std::vector> *fetch_ops, - std::unordered_set> *fetch_dependencies, - std::unordered_map *pending_ops, - std::unordered_set *pending_vars, - BlockingQueue *ready_vars, FeedFetchList *fetch_data); + void InsertFetchOps(const std::vector &fetch_tensors, + std::vector *fetch_ops, + std::unordered_set *fetch_dependencies, + std::unordered_map *pending_ops, + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, + FeedFetchList *fetch_data); private: ExecutionStrategy strategy_; diff --git a/paddle/fluid/framework/details/var_handle.cc b/paddle/fluid/framework/details/var_handle.cc index 5457870e9ff5d7cf67c9c7076b9aae94eeada779..30da029ca2a90e7faa6288557ff2f1aeb21cc1c6 100644 --- a/paddle/fluid/framework/details/var_handle.cc +++ b/paddle/fluid/framework/details/var_handle.cc @@ -20,6 +20,8 @@ namespace details { VarHandleBase::~VarHandleBase() {} +VarHandle::~VarHandle() { VLOG(4) << "deleting var handle " << DebugString(); } + std::string VarHandle::DebugString() const { std::stringstream ss; ss << name_ << ":" << place_; @@ -27,6 +29,10 @@ std::string VarHandle::DebugString() const { } std::string DummyVarHandle::DebugString() const { return node_->Name(); } + +DummyVarHandle::~DummyVarHandle() { + VLOG(4) << "deleting dummy var handle " << DebugString(); +} } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/var_handle.h b/paddle/fluid/framework/details/var_handle.h index a1f458c660ce9f73bc9ac2ed194091ad0b8f8400..3b007d7b1a52df765a2dbd41939f8f865123cb43 100644 --- a/paddle/fluid/framework/details/var_handle.h +++ b/paddle/fluid/framework/details/var_handle.h @@ -35,7 +35,10 @@ class OpHandleBase; // A variable can only be generated by a single operator. i.e. // This is a single assignment graph. struct VarHandleBase { - explicit VarHandleBase(ir::Node* node) : node_(node) {} + // Owned by `node`. No need to be deleted explicitly. + explicit VarHandleBase(ir::Node* node) : node_(node) { + node_->WrappedBy(this); + } virtual ~VarHandleBase(); @@ -94,6 +97,8 @@ struct VarHandleBase { struct VarHandle : public VarHandleBase { explicit VarHandle(ir::Node* node) : VarHandleBase(node) {} + virtual ~VarHandle(); + std::string DebugString() const override; VarHandle(ir::Node* node, size_t version, size_t scope_index, @@ -121,6 +126,8 @@ struct VarHandle : public VarHandleBase { struct DummyVarHandle : public VarHandleBase { explicit DummyVarHandle(ir::Node* node) : VarHandleBase(node) {} + virtual ~DummyVarHandle(); + std::string DebugString() const override; }; diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 8ed0ba1dfa68b3e22f370c3f2dd0f83c3e5506b0..93624b76ec6a0a197e0613fc5017779450bd1dd8 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include + #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/feed_fetch_method.h" @@ -46,6 +48,7 @@ ExecutorPrepareContext::~ExecutorPrepareContext() { VLOG(5) << "destroy ExecutorPrepareContext"; } +#ifndef _WIN32 template static void DeleteUnusedTensors(const Scope& scope, const OperatorBase* op, GarbageCollector* gc, @@ -80,6 +83,7 @@ static void DeleteUnusedTensors(const Scope& scope, const OperatorBase* op, gc->Add(erase_tensors); } } +#endif Executor::Executor(const platform::Place& place) : place_(place) {} @@ -367,6 +371,7 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, CreateVariables(ctx->prog_, local_scope, ctx->block_id_); } +#ifndef _WIN32 int64_t max_memory_size = GetEagerDeletionThreshold(); std::unique_ptr> gc; // WhileOp would set keep_kids to false @@ -408,6 +413,16 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, } else { platform::DeviceContextPool::Instance().Get(place_)->Wait(); } +#else // WIN32 + for (auto& op : ctx->ops_) { + op->Run(*local_scope, place_); + if (FLAGS_benchmark) { + VLOG(2) << "Memory used after operator " + op->Type() + " running: " + << memory::memory_usage(place_); + } + } + platform::DeviceContextPool::Instance().Get(place_)->Wait(); +#endif // NOT WIN32 if (local_scope != scope) { scope->DeleteScope(local_scope); diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 36b36d49c2728dbef93042158dffa26d8f56d529..a2a6c6bfb13be8a18e2a65343743f8b3e4518eb1 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -17,12 +17,14 @@ limitations under the License. */ #include #include #include -#include "paddle/fluid/framework/garbage_collector.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" +#ifndef _WIN32 +#include "paddle/fluid/framework/garbage_collector.h" +#endif namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 28231a53bad50fe9f19cfe3e73c3dc09aa3762cf..4cf973253cc4f1f22d2fc578a1ac3a8c95e479c9 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -53,6 +53,7 @@ set(GLOB_PASS_LIB ${PASS_LIBRARY} CACHE INTERNAL "Global PASS library") cc_library(pass_builder SRCS pass_builder.cc DEPS pass) +cc_test(node_test SRCS node_test.cc DEPS node) cc_test(pass_test SRCS pass_test.cc DEPS graph pass graph_helper) cc_test(graph_test SRCS graph_test.cc DEPS graph graph_helper op_registry) cc_test(graph_helper_test SRCS graph_helper_test.cc DEPS graph graph_helper op_registry) diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index 9d7aa5d32deb274fbf29481b0d4754c05d1e21b5..8830638ec8b70c3fcaaa83c2c3c819e2cc8ab795 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -102,6 +102,15 @@ class Graph { attr_dels_[attr_name] = []() {}; } + template + void Erase(const std::string &attr_name) { + PADDLE_ENFORCE(attrs_.count(attr_name) != 0, "%s not set in the graph", + attr_name); + attr_dels_[attr_name](); + attrs_.erase(attr_name); + attr_dels_.erase(attr_name); + } + const std::unordered_set &Nodes() const { return node_set_; } // Create a normal variable with non-null VarDesc. diff --git a/paddle/fluid/framework/ir/graph_helper.h b/paddle/fluid/framework/ir/graph_helper.h index ec46b38c01b8c369ab37b4fbd5497ec120d8db91..8d92c406689ab3a97596a8666ceb452aec4be170 100644 --- a/paddle/fluid/framework/ir/graph_helper.h +++ b/paddle/fluid/framework/ir/graph_helper.h @@ -37,6 +37,15 @@ std::vector TopologySortOperations(const Graph &graph); std::map> BuildOperationAdjList( const Graph &graph); +template +std::vector FilterByNodeWrapper(const Graph &graph) { + std::vector ret; + for (ir::Node *n : graph.Nodes()) { + if (n->IsWrappedBy()) ret.push_back(&n->Wrapper()); + } + return ret; +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/node.cc b/paddle/fluid/framework/ir/node.cc index 9277abe8c1b79c5f76f4610d0554bf337f329518..30879b1f36e30d429cbbfd6a6a8d8c588ae90333 100644 --- a/paddle/fluid/framework/ir/node.cc +++ b/paddle/fluid/framework/ir/node.cc @@ -17,7 +17,12 @@ limitations under the License. */ namespace paddle { namespace framework { namespace ir { +// msvc15 don't support constexpr in correct way. +#if !defined(_WIN32) constexpr char Node::kControlDepVarName[]; +#else +const char Node::kControlDepVarName[] = "__control_var"; +#endif int Node::count_ = 0; std::unique_ptr CreateNodeForTest(const std::string& name, diff --git a/paddle/fluid/framework/ir/node.h b/paddle/fluid/framework/ir/node.h index 2565fc2ab8f23b5a29149a409732b5f1a3e8fe77..b92245d862ab82affa89a2c3afef1d742bfcc1f0 100644 --- a/paddle/fluid/framework/ir/node.h +++ b/paddle/fluid/framework/ir/node.h @@ -15,7 +15,10 @@ limitations under the License. */ #pragma once #include +#include +#include #include + #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/var_desc.h" #include "paddle/fluid/platform/macros.h" @@ -24,11 +27,39 @@ namespace paddle { namespace framework { namespace ir { -// Node should normally created by Graph::CreateXXXNode(). +// Node should only created by Graph::CreateXXXNode(). +// 1. Every Node should be part of a graph. No dangling Node exists. +// 2. Node only contains members necessary for building graph structure. +// It doesn't contain other unrelated members, such as device, etc. +// +// Sometimes, for specific usages, Node needs to have additional members, +// such as device_placement, version in order to be executed. It is suggested +// to use composition pattern. +// +// class RunnableOp { +// RunnableOp(ir::Node* n) : n_(n) { n_.WrappedBy(this); } +// +// int any_thing_; +// } +// +// RunnableOp is owned by the ir::Node that composes it. In other words. +// ir::Node will be responsible for deleting RunnableOp, say, when ir::Node +// is deleted from the graph. class Node { public: + virtual ~Node() { + if (!wrapper_.empty()) { + VLOG(4) << "ir::Node deleting a wrapper node " << Name(); + wrapper_deleter_(); + } + } + enum class Type { kOperation, kVariable }; - static constexpr const char kControlDepVarName[] = "__control_var"; +#if !defined(_WIN32) // msvc not support constexpr correctly. + static constexpr char kControlDepVarName[] = "__control_var"; +#else + static const char kControlDepVarName[]; +#endif Type NodeType() const { return type_; } @@ -44,6 +75,29 @@ class Node { return op_desc_.get(); } + // Set the `wrapper` that wraps the Node. `wrapper` is owned by Node. + template + void WrappedBy(T* wrapper) { + if (!wrapper_.empty()) { + wrapper_deleter_(); + } + wrapper_ = wrapper; + wrapper_deleter_ = [wrapper]() { delete wrapper; }; + wrapper_type_ = std::type_index(typeid(T)); + } + + // Return a reference to the `wrapper`. + template + T& Wrapper() { + return *boost::any_cast(wrapper_); + } + + // Test if the Node is wrapped by type T. + template + bool IsWrappedBy() { + return std::type_index(typeid(T)) == wrapper_type_; + } + // Please don't use this API! int id() const { return id_; } @@ -95,6 +149,11 @@ class Node { static int count_; // Please don't use this API or make this public. static void ResetId() { count_ = 0; } + + boost::any wrapper_; + std::function wrapper_deleter_; + std::type_index wrapper_type_ = std::type_index(typeid(void)); + DISABLE_COPY_AND_ASSIGN(Node); }; diff --git a/paddle/fluid/framework/ir/node_test.cc b/paddle/fluid/framework/ir/node_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..694efadda078169c993457181c00f7b357a09e87 --- /dev/null +++ b/paddle/fluid/framework/ir/node_test.cc @@ -0,0 +1,80 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "gtest/gtest.h" +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/pass.h" + +namespace paddle { +namespace framework { +namespace ir { + +class RunnableOp { + public: + RunnableOp(Node* node, bool* alive) : node_(node), alive_(alive) { + node_->WrappedBy(this); + } + + virtual ~RunnableOp() { *alive_ = false; } + + private: + Node* node_; + bool* alive_; +}; + +class RunnableOp2 { + public: + RunnableOp2(Node* node, bool* alive) : node_(node), alive_(alive) { + node_->WrappedBy(this); + } + + virtual ~RunnableOp2() { *alive_ = false; } + + private: + Node* node_; + bool* alive_; +}; + +TEST(NodeTest, Basic) { + bool alive1 = true; + bool alive2 = true; + std::unique_ptr n1(CreateNodeForTest("n1", Node::Type::kVariable)); + std::unique_ptr n2(CreateNodeForTest("n2", Node::Type::kVariable)); + + EXPECT_FALSE(n1->IsWrappedBy()); + EXPECT_FALSE(n1->IsWrappedBy()); + EXPECT_FALSE(n2->IsWrappedBy()); + EXPECT_FALSE(n2->IsWrappedBy()); + + new RunnableOp(n1.get(), &alive1); + new RunnableOp2(n2.get(), &alive2); + + EXPECT_TRUE(n1->IsWrappedBy()); + EXPECT_FALSE(n1->IsWrappedBy()); + EXPECT_FALSE(n2->IsWrappedBy()); + EXPECT_TRUE(n2->IsWrappedBy()); + + EXPECT_TRUE(alive1); + EXPECT_TRUE(alive2); + + n1.reset(nullptr); + n2.reset(nullptr); + EXPECT_FALSE(alive1); + EXPECT_FALSE(alive2); +} + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/pass.h b/paddle/fluid/framework/ir/pass.h index e1767337abdb260ce703aa9f55057888f08799b0..1eca1207ca61c65f8b42aedcba00af6e32c45f63 100644 --- a/paddle/fluid/framework/ir/pass.h +++ b/paddle/fluid/framework/ir/pass.h @@ -21,6 +21,7 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/node.h" #include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/platform/port.h" #include "paddle/fluid/platform/variant.h" namespace paddle { @@ -195,6 +196,7 @@ struct PassRegistrar : public Registrar { __test_global_namespace_##uniq_name##__>::value, \ msg) +#if !defined(_WIN32) // Register a new pass that can be applied on the IR. #define REGISTER_PASS(pass_type, pass_class) \ STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ @@ -217,7 +219,30 @@ struct PassRegistrar : public Registrar { extern int TouchPassRegistrar_##pass_type(); \ static int use_pass_itself_##pass_type##_ __UNUSED__() = \ TouchPassRegistrar_##pass_type() +#else +#define REGISTER_PASS(pass_type, pass_class) \ + STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ + __reg_pass__##pass_type, \ + "REGISTER_PASS must be called in global namespace"); \ + static ::paddle::framework::ir::PassRegistrar \ + __pass_registrar_##pass_type##__(#pass_type); \ + int TouchPassRegistrar_##pass_type() { \ + __pass_registrar_##pass_type##__.Touch(); \ + return 0; \ + } \ + static ::paddle::framework::ir::PassRegistrar UNUSED( \ + &__pass_tmp_registrar_##pass_type##__) = \ + __pass_registrar_##pass_type##__ + +#define USE_PASS(pass_type) \ + STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ + __use_pass_itself_##pass_type, \ + "USE_PASS must be called in global namespace"); \ + extern int TouchPassRegistrar_##pass_type(); \ + static int UNUSED(use_pass_itself_##pass_type##_) = \ + TouchPassRegistrar_##pass_type() +#endif // !_WIN32 } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index f1d268548578fea12082e2edb213a3749eccbfaf..dd984445dba147eaa568fe0daa0cc609208b0e00 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -20,6 +20,11 @@ limitations under the License. */ #include #include +#if defined(_WIN32) +#define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h +#define GOOGLE_GLOG_DLL_DECL +#endif + #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/memory/memory.h" diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index da1711fc18b308f354887f831c044ed4489f87dd..332924b11a497c03945e0798d237640801205b9a 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -17,6 +17,10 @@ cc_library(paddle_fluid_api get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) get_property(cuda_modules GLOBAL PROPERTY CUDA_MODULES) +get_property(fluid_third_partys GLOBAL PROPERTY FLUID_THRID_PARTYS) +if (WIN32) +list(APPEND fluid_third_partys gflags glog protobuf cblas) +endif(WIN32) # paddle_fluid_origin exclude inference api interface if(WIN32) @@ -60,6 +64,7 @@ endif() if(WIN32) sep_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS} DEPS ${fluid_modules} paddle_fluid_api reset_tensor_array) + target_link_libraries(paddle_fluid_shared shlwapi) if(WITH_GPU AND NOT WITH_DSO) target_link_libraries(paddle_fluid_origin ${cuda_modules}) endif(WITH_GPU AND NOT WITH_DSO) diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index e8fb0775b45761f64fd6fd28306c35b76d1e40c4..3242aced39e82099f838a2adb612868ebc47c888 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -26,6 +26,7 @@ #include #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/variant.h" namespace paddle { @@ -102,7 +103,6 @@ struct Argument { std::unordered_map> attr_deleters_; }; -#define UNLIKELY(condition) __builtin_expect(static_cast(condition), 0) #define ANALYSIS_ARGUMENT_CHECK_FIELD(field__) \ if (UNLIKELY(!(field__))) { \ LOG(ERROR) << "field " << #field__ << " should be set."; \ diff --git a/paddle/fluid/inference/analysis/helper.h b/paddle/fluid/inference/analysis/helper.h index fe96d8604ca34bd5b31858cce0686aa24c2acf07..e20ddfa24fcc7c3c7214e6cd62466c13d95ad949 100644 --- a/paddle/fluid/inference/analysis/helper.h +++ b/paddle/fluid/inference/analysis/helper.h @@ -14,7 +14,6 @@ limitations under the License. */ #pragma once -#include #include #include #include @@ -26,6 +25,7 @@ limitations under the License. */ #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/variable.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/port.h" namespace paddle { namespace inference { @@ -124,24 +124,6 @@ T &GetFromScope(const framework::Scope &scope, const std::string &name) { return *var->GetMutable(); } -static void ExecShellCommand(const std::string &cmd, std::string *message) { - char buffer[128]; -#if !defined(_WIN32) - std::shared_ptr pipe(popen(cmd.c_str(), "r"), pclose); -#else - std::shared_ptr pipe(_popen(cmd.c_str(), "r"), _pclose); -#endif // _WIN32 - if (!pipe) { - LOG(ERROR) << "error running command: " << cmd; - return; - } - while (!feof(pipe.get())) { - if (fgets(buffer, 128, pipe.get()) != nullptr) { - *message += buffer; - } - } -} - static framework::proto::ProgramDesc LoadProgramDesc( const std::string &model_path) { std::ifstream fin(model_path, std::ios::in | std::ios::binary); @@ -163,16 +145,6 @@ static bool FileExists(const std::string &filepath) { return exists; } -static bool PathExists(const std::string &path) { - struct stat statbuf; - if (stat(path.c_str(), &statbuf) != -1) { - if (S_ISDIR(statbuf.st_mode)) { - return true; - } - } - return false; -} - } // namespace analysis } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 49a9ebe3ddec1e4fd59ae1155a706859e249d25c..5e55acf892718223263e9c689d64316dc6682780 100644 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -24,6 +24,7 @@ if(WITH_GPU AND TENSORRT_FOUND) endif() cc_library(reset_tensor_array SRCS details/reset_tensor_array.cc DEPS lod_tensor scope) +cc_library(helper SRCS helper.cc DEPS reset_tensor_array lod_tensor scope) cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS reset_tensor_array lod_tensor scope) cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api analysis naive_executor zero_copy_tensor) cc_library(zero_copy_tensor SRCS details/zero_copy_tensor.cc DEPS paddle_inference_api) diff --git a/paddle/fluid/inference/api/api.cc b/paddle/fluid/inference/api/api.cc index 01ea942d3c8d20180cfc9664b8601ba87a898e86..20fab8078fedf837564496aa296648bf5970a348 100644 --- a/paddle/fluid/inference/api/api.cc +++ b/paddle/fluid/inference/api/api.cc @@ -16,7 +16,6 @@ #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle_inference_api.h" namespace paddle { diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index a576ab13df08cb4bf13ab0a9007ef9410d1f7ee3..27f272f2d82fc7fc854f039b71db6c8343f153ef 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include #include #include #include @@ -24,6 +25,7 @@ limitations under the License. */ #include "paddle/fluid/inference/api/api_impl.h" #include "paddle/fluid/inference/api/details/reset_tensor_array.h" #include "paddle/fluid/inference/api/helper.h" +#include "paddle/fluid/inference/api/timer.h" #include "paddle/fluid/platform/cpu_helper.h" #include "paddle/fluid/platform/profiler.h" @@ -31,16 +33,6 @@ DEFINE_bool(profile, false, "Turn on profiler for fluid"); DECLARE_int32(paddle_num_threads); namespace paddle { -namespace { -using paddle::inference::Timer; - -template -std::string num2str(T a) { - std::stringstream istr; - istr << a; - return istr.str(); -} -} // namespace void NativePaddlePredictor::PrepareFeedFetch() { for (auto *op : inference_program_->Block(0).AllOps()) { @@ -63,7 +55,6 @@ void NativePaddlePredictor::PrepareFeedFetch() { bool NativePaddlePredictor::Init( std::shared_ptr parent_scope) { - VLOG(3) << "Predictor::init()"; #if !defined(_WIN32) if (FLAGS_profile) { LOG(WARNING) << "Profiler is actived, might affect the performance"; @@ -75,10 +66,6 @@ bool NativePaddlePredictor::Init( } #endif - // windows has no support for openblas multi-thread -#ifdef _WIN32 - FLAGS_paddle_num_threads = 1; -#endif // no matter with or without MKLDNN paddle::platform::SetNumThreads(FLAGS_paddle_num_threads); @@ -95,21 +82,21 @@ bool NativePaddlePredictor::Init( paddle::framework::InitDevices(false); scope_.reset(new paddle::framework::Scope()); } - executor_.reset(new paddle::framework::Executor(place_)); - // Initialize the inference program if (!config_.model_dir.empty()) { // Parameters are saved in separate files sited in // the specified `dirname`. inference_program_ = paddle::inference::Load(executor_.get(), scope_.get(), config_.model_dir); + } else if (!config_.prog_file.empty() && !config_.param_file.empty()) { // All parameters are saved in a single file. // The file names should be consistent with that used // in Python API `fluid.io.save_inference_model`. inference_program_ = paddle::inference::Load( executor_.get(), scope_.get(), config_.prog_file, config_.param_file); + } else { LOG(ERROR) << "fail to load inference model from " << config_.model_dir; return false; @@ -139,7 +126,7 @@ NativePaddlePredictor::~NativePaddlePredictor() { bool NativePaddlePredictor::Run(const std::vector &inputs, std::vector *output_data, int batch_size) { - VLOG(3) << "Predictor::predict"; + using Timer = paddle::inference::Timer; Timer timer; timer.tic(); // set feed variable @@ -151,11 +138,9 @@ bool NativePaddlePredictor::Run(const std::vector &inputs, } // Run the inference program // if share variables, we need not create variables - VLOG(4) << "Run prepared context"; executor_->RunPreparedContext(ctx_.get(), scope, false, /* don't create local scope each time*/ false /* don't create variable each time */); - VLOG(4) << "Finish prepared context"; // get fetch variable if (!GetFetch(output_data, scope)) { LOG(ERROR) << "fail to get fetches"; @@ -170,7 +155,6 @@ bool NativePaddlePredictor::Run(const std::vector &inputs, } std::unique_ptr NativePaddlePredictor::Clone() { - VLOG(3) << "Predictor::clone"; std::unique_ptr cls(new NativePaddlePredictor(config_)); if (!dynamic_cast(cls.get())->Init(scope_)) { @@ -188,7 +172,6 @@ std::unique_ptr NativePaddlePredictor::Clone() { bool NativePaddlePredictor::SetFeed(const std::vector &inputs, framework::Scope *scope) { - VLOG(3) << "Predictor::set_feed"; if (inputs.size() != feeds_.size()) { LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get " << inputs.size(); @@ -248,7 +231,6 @@ void NativePaddlePredictor::GetFetchOne(const framework::LoDTensor &fetch, bool NativePaddlePredictor::GetFetch(std::vector *outputs, framework::Scope *scope) { - VLOG(3) << "Predictor::get_fetch"; outputs->resize(fetchs_.size()); for (size_t i = 0; i < fetchs_.size(); ++i) { int idx = boost::get(fetchs_[i]->GetAttr("col")); @@ -273,25 +255,22 @@ bool NativePaddlePredictor::GetFetch(std::vector *outputs, template <> std::unique_ptr CreatePaddlePredictor< NativeConfig, PaddleEngineKind::kNative>(const NativeConfig &config) { - VLOG(3) << "create NativePaddlePredictor"; if (config.use_gpu) { // 1. GPU memeroy PADDLE_ENFORCE_GT( config.fraction_of_gpu_memory, 0.f, - "fraction_of_gpu_memory in the config should be set to range (0., 1.]"); + "fraction_of_gpu_memory in the config should be set to range (0.,1.]"); PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device); std::vector flags; if (config.fraction_of_gpu_memory >= 0.0f || config.fraction_of_gpu_memory <= 0.95f) { flags.push_back("dummpy"); std::string flag = "--fraction_of_gpu_memory_to_use=" + - num2str(config.fraction_of_gpu_memory); + std::to_string(config.fraction_of_gpu_memory); flags.push_back(flag); - VLOG(3) << "set flag: " << flag; framework::InitGflags(flags); } } - std::unique_ptr predictor(new NativePaddlePredictor(config)); if (!dynamic_cast(predictor.get())->Init(nullptr)) { return nullptr; diff --git a/paddle/fluid/inference/api/api_impl.h b/paddle/fluid/inference/api/api_impl.h index 4e4ab47ca9c5e37f2714ebd48d250c23c7e9b117..ed3bdd8de7f59b4c086eef48bf5b51da635ab572 100644 --- a/paddle/fluid/inference/api/api_impl.h +++ b/paddle/fluid/inference/api/api_impl.h @@ -31,10 +31,10 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/naive_executor.h" #include "paddle/fluid/inference/api/details/reset_tensor_array.h" -#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/io.h" #include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/profiler.h" +#include "paddle_inference_api.h" // NOLINT namespace paddle { diff --git a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt index 49683eab07a2f5bc008272038a27bdb277396284..a742ba71eea5bf3439c1156d97a143ed7febd098 100644 --- a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt +++ b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt @@ -6,13 +6,13 @@ option(WITH_STATIC_LIB "Compile demo with static/shared library, default use sta option(USE_TENSORRT "Compile demo with TensorRT." OFF) macro(safe_set_static_flag) - foreach(flag_var - CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE - CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) - if(${flag_var} MATCHES "/MD") - string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") - endif(${flag_var} MATCHES "/MD") - endforeach(flag_var) + foreach(flag_var + CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE + CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) + if(${flag_var} MATCHES "/MD") + string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") + endif(${flag_var} MATCHES "/MD") + endforeach(flag_var) endmacro() if (WIN32) @@ -37,26 +37,25 @@ if(NOT DEFINED DEMO_NAME) endif() -if(WITH_GPU) +if(WITH_GPU) # default gpu path if(NOT WIN32) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") else() if(CUDA_LIB STREQUAL "") - set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64") + set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64") endif() endif(NOT WIN32) endif() -include_directories("D:/Paddle/") include_directories("${PADDLE_LIB}") include_directories("${PADDLE_LIB}/third_party/install/protobuf/include") include_directories("${PADDLE_LIB}/third_party/install/glog/include") include_directories("${PADDLE_LIB}/third_party/install/gflags/include") include_directories("${PADDLE_LIB}/third_party/install/xxhash/include") if (NOT WIN32) -include_directories("${PADDLE_LIB}/third_party/install/snappy/include") -include_directories("${PADDLE_LIB}/third_party/install/snappystream/include") -include_directories("${PADDLE_LIB}/third_party/install/zlib/include") + include_directories("${PADDLE_LIB}/third_party/install/snappy/include") + include_directories("${PADDLE_LIB}/third_party/install/snappystream/include") + include_directories("${PADDLE_LIB}/third_party/install/zlib/include") endif(NOT WIN32) include_directories("${PADDLE_LIB}/third_party/boost") @@ -64,15 +63,15 @@ include_directories("${PADDLE_LIB}/third_party/eigen3") if (NOT WIN32) if (USE_TENSORRT AND WITH_GPU) - include_directories("${TENSORRT_INCLUDE_DIR}") - link_directories("${TENSORRT_LIB_DIR}") + include_directories("${TENSORRT_INCLUDE_DIR}") + link_directories("${TENSORRT_LIB_DIR}") endif() endif(NOT WIN32) if (NOT WIN32) -link_directories("${PADDLE_LIB}/third_party/install/snappy/lib") -link_directories("${PADDLE_LIB}/third_party/install/snappystream/lib") -link_directories("${PADDLE_LIB}/third_party/install/zlib/lib") + link_directories("${PADDLE_LIB}/third_party/install/snappy/lib") + link_directories("${PADDLE_LIB}/third_party/install/snappystream/lib") + link_directories("${PADDLE_LIB}/third_party/install/zlib/lib") endif(NOT WIN32) link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib") @@ -86,7 +85,7 @@ add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) if(WITH_MKL) include_directories("${PADDLE_LIB}/third_party/install/mklml/include") set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} - ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) + ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn") if(EXISTS ${MKLDNN_PATH}) include_directories("${MKLDNN_PATH}/include") @@ -99,25 +98,25 @@ endif() # Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a if(WITH_STATIC_LIB) set(DEPS - ${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) + ${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}) else() set(DEPS - ${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX}) + ${PADDLE_LIB}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() if (NOT WIN32) -set(EXTERNAL_LIB "-lrt -ldl -lpthread") -set(DEPS ${DEPS} + set(EXTERNAL_LIB "-lrt -ldl -lpthread") + set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf snappystream snappy z xxhash ${EXTERNAL_LIB}) else() -set(DEPS ${DEPS} + set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} ${CMAKE_STATIC_LIBRARY_PREFIX}glog ${CMAKE_STATIC_LIBRARY_PREFIX}gflags ${CMAKE_STATIC_LIBRARY_PREFIX}protobuf ${EXTERNAL_LIB}) -# NOTE(dzhwinter) shlwapi is deprecated. -set(DEPS ${DEPS} libcmt shlwapi) + # NOTE(dzhwinter) shlwapi will be deprecated. + set(DEPS ${DEPS} libcmt shlwapi) endif(NOT WIN32) if(WITH_GPU) @@ -129,8 +128,8 @@ if(WITH_GPU) set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX}) else() set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} ) - set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) - set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} ) + set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} ) + set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} ) endif() endif() diff --git a/paddle/fluid/inference/api/demo_ci/inference_icnet.cc b/paddle/fluid/inference/api/demo_ci/inference_icnet.cc new file mode 100644 index 0000000000000000000000000000000000000000..88e220c0b62a7737c614da0e2c46bb5af59f10f9 --- /dev/null +++ b/paddle/fluid/inference/api/demo_ci/inference_icnet.cc @@ -0,0 +1,99 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#define GOOGLE_GLOG_DLL_DECL +#include +#include +#include // NOLINT +#include +#include +#include // NOLINT +#include +#include "paddle/fluid/inference/paddle_inference_api.h" + +namespace paddle { + +NativeConfig GetConfig() { + NativeConfig config; + config.prog_file = "hs_lb_without_bn_cudnn/__model__"; + config.param_file = "hs_lb_without_bn_cudnn/__params__"; + config.fraction_of_gpu_memory = 0.0; + config.use_gpu = true; + config.device = 0; + return config; +} + +using Time = decltype(std::chrono::high_resolution_clock::now()); +Time TimeNow() { return std::chrono::high_resolution_clock::now(); } +double TimeDiff(Time t1, Time t2) { + typedef std::chrono::microseconds ms; + auto diff = t2 - t1; + ms counter = std::chrono::duration_cast(diff); + return counter.count() / 1000.0; +} + +std::vector PrepareData() { + int height = 449; + int width = 581; + std::vector data; + for (int i = 0; i < 3 * height * width; ++i) { + data.push_back(0.0); + } + PaddleTensor tensor; + tensor.shape = std::vector({batch_size, 3, height, width}); + tensor.data.Resize(sizeof(float) * batch_size * 3 * height * width); + std::copy(data.begin(), data.end(), static_cast(tensor.data.data())); + tensor.dtype = PaddleDType::FLOAT32; + std::vector paddle_tensor_feeds(1, tensor); + return std::move(paddle_tensor_feeds); +} + +void TestNaive(int batch_size, int thread_num) { + NativeConfig config = GetConfig(); + + int num_jobs = thread_num; // parallel jobs. + constexpr int epoches = 10; // each job run epoches. + std::vector threads; + std::vector> predictors; + for (int tid = 0; tid < num_jobs; ++tid) { + auto& pred = CreatePaddlePredictor(config); + predictors.emplace_back(std::move(pred)); + } + + auto time1 = TimeNow(); + for (int tid = 0; tid < num_jobs; ++tid) { + threads.emplace_back([&, tid]() { + auto& predictor = predictors[tid]; + PaddleTensor tensor_out; + std::vector outputs(1, tensor_out); + for (size_t i = 0; i < epoches; i++) { + ASSERT_TRUE(predictor->Run(paddle_tensor_feeds, &outputs)); + VLOG(3) << "tid : " << tid << " run: " << i << "finished"; + ASSERT_EQ(outputs.size(), 1UL); + } + }); + } + for (int i = 0; i < num_jobs; ++i) { + threads[i].join(); + } + auto time2 = TimeNow(); + VLOG(3) << "Thread num " << thread_num << "total time cost" + << (time2 - time1); +} +} // namespace paddle + +int main(int argc, char** argv) { + paddle::TestNaive(1, 1); // single thread. + paddle::TestNaive(1, 5); // 5 threads. + return 0; +} diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index 83910585b7eddc688d238127321cda65088341e8..9420c437501813963018d221bab9c5b5692b7622 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -14,40 +14,26 @@ #pragma once +#define GLOG_NO_ABBREVIATED_SEVERITIES +#define GOOGLE_GLOG_DLL_DECL #include #if !defined(_WIN32) #include #else #endif +#include #include // NOLINT #include #include #include #include #include -#include "paddle/fluid/string/printf.h" -#include "paddle_inference_api.h" +#include "paddle/fluid/inference/api/timer.h" +#include "paddle_inference_api.h" //NOLINT namespace paddle { namespace inference { -// Timer for timer -class Timer { - public: - std::chrono::high_resolution_clock::time_point start; - std::chrono::high_resolution_clock::time_point startu; - - void tic() { start = std::chrono::high_resolution_clock::now(); } - double toc() { - startu = std::chrono::high_resolution_clock::now(); - std::chrono::duration time_span = - std::chrono::duration_cast>(startu - - start); - double used_time_ms = static_cast(time_span.count()) * 1000.0; - return used_time_ms; - } -}; - static void split(const std::string &str, char sep, std::vector *pieces) { pieces->clear(); diff --git a/paddle/fluid/inference/api/timer.h b/paddle/fluid/inference/api/timer.h new file mode 100644 index 0000000000000000000000000000000000000000..2df5274dc1f2e7ad8e434f1da9d5ae6aee94c784 --- /dev/null +++ b/paddle/fluid/inference/api/timer.h @@ -0,0 +1,39 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include // NOLINT + +namespace paddle { +namespace inference { + +// Timer for timer +class Timer { + public: + std::chrono::high_resolution_clock::time_point start; + std::chrono::high_resolution_clock::time_point startu; + + void tic() { start = std::chrono::high_resolution_clock::now(); } + double toc() { + startu = std::chrono::high_resolution_clock::now(); + std::chrono::duration time_span = + std::chrono::duration_cast>(startu - + start); + double used_time_ms = static_cast(time_span.count()) * 1000.0; + return used_time_ms; + } +}; + +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/memory/detail/buddy_allocator.cc b/paddle/fluid/memory/detail/buddy_allocator.cc index 26ef27c3caafadb4801b0ae52133f6175655ce0a..ce283f0621b164d8b66d281065779fcdb0e48077 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.cc +++ b/paddle/fluid/memory/detail/buddy_allocator.cc @@ -11,7 +11,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - +#define GLOG_NO_ABBREVIATED_SEVERITIES +#define GOOGLE_GLOG_DLL_DECL #include "paddle/fluid/memory/detail/buddy_allocator.h" #include "glog/logging.h" diff --git a/paddle/fluid/memory/detail/meta_cache.cc b/paddle/fluid/memory/detail/meta_cache.cc index b86e4f38c42a26e155f276f9b73cbed1d0d83f7d..2a283733f5c25b0f666b1067c3e5151ce9f75873 100644 --- a/paddle/fluid/memory/detail/meta_cache.cc +++ b/paddle/fluid/memory/detail/meta_cache.cc @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#define GLOG_NO_ABBREVIATED_SEVERITIES +#define GOOGLE_GLOG_DLL_DECL #include "glog/logging.h" #include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/platform/assert.h" diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 1b96798d23cec34a1863f56c1e4027ce32b2eec5..92849bc2c081a4d3454e7d0a725387a2ee4a5db8 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define GLOG_NO_ABBREVIATED_SEVERITIES +#define GOOGLE_GLOG_DLL_DECL #include "paddle/fluid/memory/detail/system_allocator.h" diff --git a/paddle/fluid/operators/accuracy_op.h b/paddle/fluid/operators/accuracy_op.h index 803244dd48efc634bf5e654a35cb3dd572842882..8d3313db9687937db9b6f326348fcb273a615d65 100644 --- a/paddle/fluid/operators/accuracy_op.h +++ b/paddle/fluid/operators/accuracy_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include + #include "paddle/fluid/framework/op_registry.h" namespace paddle { diff --git a/paddle/fluid/operators/cast_op.h b/paddle/fluid/operators/cast_op.h index 8fa0416049f8fa128d7ab61f8350b41960f07263..ea710aaad5cf94aeb0db0d905c13decf742f60f9 100644 --- a/paddle/fluid/operators/cast_op.h +++ b/paddle/fluid/operators/cast_op.h @@ -54,6 +54,7 @@ class CastOpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input("X"); auto* out = context.Output("Out"); + framework::VisitDataType( static_cast( context.Attr("out_dtype")), diff --git a/paddle/fluid/operators/detection/roi_perspective_transform_op.cu b/paddle/fluid/operators/detection/roi_perspective_transform_op.cu index 862d664d42e03d2ae968ea0bdec8ae8e50bf7fb3..1f660d54d6de7cf987bff7e7c4a619c60086f016 100644 --- a/paddle/fluid/operators/detection/roi_perspective_transform_op.cu +++ b/paddle/fluid/operators/detection/roi_perspective_transform_op.cu @@ -35,12 +35,12 @@ namespace operators { template __device__ bool GT_E(T a, T b) { - return (a > b) || Eigen::numext::abs(a - b) < 1e-4; + return (a > b) || fabsf(static_cast(a - b)) < 1e-4; } template __device__ bool LT_E(T a, T b) { - return (a < b) || Eigen::numext::abs(a - b) < 1e-4; + return (a < b) || fabsf(static_cast(a - b)) < 1e-4; } template diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index d7444bcfe0b876b17a52a38628cbc526af50cb31..96ba6aa79cf3cb98cbd3bdcfbb818a10f19dec04 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -14,7 +14,6 @@ limitations under the License. */ #pragma once -#include #include #include #include diff --git a/paddle/fluid/operators/load_combine_op.cc b/paddle/fluid/operators/load_combine_op.cc index 0522a94195786c767194ec727d982a60451e7c62..59f44b112cddddff5ff423f462650615710856a7 100644 --- a/paddle/fluid/operators/load_combine_op.cc +++ b/paddle/fluid/operators/load_combine_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include +#include #include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device_context.h" @@ -32,9 +33,15 @@ class LoadCombineOp : public framework::OperatorBase { const platform::Place &place) const override { auto filename = Attr("file_path"); auto load_as_fp16 = Attr("load_as_fp16"); - - std::ifstream fin(filename); - PADDLE_ENFORCE(static_cast(fin), + auto format = Attr("format"); + std::unique_ptr fin; + if (format == "windows") { + fin.reset(new std::ifstream(filename, + std::ios_base::in | std::ios_base::binary)); + } else { + fin.reset(new std::ifstream(filename)); + } + PADDLE_ENFORCE(static_cast(*fin), "Cannot open file %s for load_combine op", filename); auto out_var_names = Outputs("Out"); @@ -54,11 +61,11 @@ class LoadCombineOp : public framework::OperatorBase { auto *tensor = out_var->GetMutable(); // Error checking - PADDLE_ENFORCE(static_cast(fin), "Cannot read more from file %s", + PADDLE_ENFORCE(static_cast(*fin), "Cannot read more from file %s", filename); // Get data from fin to tensor - DeserializeFromStream(fin, tensor, dev_ctx); + DeserializeFromStream(*fin, tensor, dev_ctx); auto in_dtype = framework::ToDataType(tensor->type()); auto out_dtype = @@ -103,6 +110,18 @@ class LoadCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { "LoDTensors will be loaded from \"file_path\".") .AddCustomChecker( [](const std::string &path) { return !path.empty(); }); + AddAttr("format", + R"DOC((windows|linux)" "saved model file format + windows and linux file newline symbol is +different. windows(newline is \n\r) or linux(newline is \r) +So if you set attribute format to windows, then we saved model file in binary. +It can be used both linux and windows. If you set format to linux, +it will save file in normal file, newline symbol is \r. Need to note +that these two format is not inter-compatible.)DOC") + .SetDefault("linux") + .AddCustomChecker([](const std::string &s) { + return s == "windows" || s == "linux"; + }); AddComment(R"DOC( LoadCombine Operator. diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index 51219504ffa2a778b56351f759e8a8dfb951ad91..e0e2c3dc4fa0af6bd6a58106364e21099d7bc517 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include +#include #include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/op_registry.h" @@ -34,8 +35,15 @@ class LoadOp : public framework::OperatorBase { // FIXME(yuyang18): We save variable to local file now, but we should change // it to save an output stream. auto filename = Attr("file_path"); - std::ifstream fin(filename); - PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s for load op", + auto format = Attr("format"); + std::unique_ptr fin; + if (format == "windows") { + fin.reset(new std::ifstream(filename, + std::ios_base::in | std::ios_base::binary)); + } else { + fin.reset(new std::ifstream(filename)); + } + PADDLE_ENFORCE(static_cast(*fin), "Cannot open file %s for load op", filename); auto out_var_name = Output("Out"); @@ -44,9 +52,9 @@ class LoadOp : public framework::OperatorBase { out_var_name); if (out_var->IsType()) { - LoadLodTensor(fin, place, out_var); + LoadLodTensor(*fin, place, out_var); } else if (out_var->IsType()) { - LoadSelectedRows(fin, place, out_var); + LoadSelectedRows(*fin, place, out_var); } else { PADDLE_ENFORCE( false, @@ -110,6 +118,18 @@ class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker { R"(Variable will be loaded from "file_path")") .AddCustomChecker( [](const std::string &path) { return !path.empty(); }); + AddAttr("format", + R"DOC((windows|linux)" "saved model file format + windows and linux file newline symbol is +different. windows(newline is \n\r) or linux(newline is \r) +So if you set attribute format to windows, then we saved model file in binary. +It can be used both linux and windows. If you set format to linux, +it will save file in normal file, newline symbol is \r. Need to note +that these two format is not inter-compatible.)DOC") + .SetDefault("linux") + .AddCustomChecker([](const std::string &s) { + return s == "windows" || s == "linux"; + }); AddComment( "Load operator will load a LoDTensor / SelectedRows variable from disk " "file."); diff --git a/paddle/fluid/operators/lstm_unit_op.h b/paddle/fluid/operators/lstm_unit_op.h index 4ead9c22934dde6e42f9ede47cc1ddf502948fc4..5d1d667fe1ec9a3d8978ae1dcbb4d92ad24fe96e 100644 --- a/paddle/fluid/operators/lstm_unit_op.h +++ b/paddle/fluid/operators/lstm_unit_op.h @@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index c7c09ec153ca5df9937083e1018265a420d55bcb..7f9a55acf84a7c2a85f57cfbd6390686150e7bdf 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -57,9 +57,6 @@ math_library(sequence_padding) math_library(sequence_pooling DEPS math_function) math_library(sequence_scale) math_library(softmax DEPS math_function) -if (NOT WIN32) - math_library(matrix_bit_code) -endif (NOT WIN32) math_library(unpooling) math_library(vol2col) @@ -76,12 +73,11 @@ endif() cc_test(concat_test SRCS concat_test.cc DEPS concat_and_split) cc_test(cpu_vec_test SRCS cpu_vec_test.cc DEPS blas cpu_info) if (NOT WIN32) + math_library(matrix_bit_code) set(JIT_KERNEL_SRCS jit_kernel.cc jit_kernel_blas.cc jit_kernel_exp.cc jit_kernel_rnn.cc jit_kernel_crf_decode.cc) set(JIT_KERNEL_DEPS cpu_info cblas gflags enforce) if(WITH_XBYAK) list(APPEND JIT_KERNEL_SRCS jit_gen.cc jit_code.cc) list(APPEND JIT_KERNEL_DEPS xbyak) endif() - cc_library(jit_kernel SRCS ${JIT_KERNEL_SRCS} DEPS ${JIT_KERNEL_DEPS}) - cc_test(jit_kernel_test SRCS jit_kernel_test.cc DEPS jit_kernel) -endif() +endif (NOT WIN32) diff --git a/paddle/fluid/operators/math/cpu_vec.h b/paddle/fluid/operators/math/cpu_vec.h index 0aed253c80fc28560716cbcfa70f74ef9c84f9b6..38df5776bfaa54fc4018cd592cc9cf0478132cf2 100644 --- a/paddle/fluid/operators/math/cpu_vec.h +++ b/paddle/fluid/operators/math/cpu_vec.h @@ -18,10 +18,6 @@ limitations under the License. */ #include #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/enforce.h" -#ifdef __AVX__ -#include -#endif - #ifdef PADDLE_WITH_MKLML #include "paddle/fluid/platform/dynload/mklml.h" #endif diff --git a/paddle/fluid/operators/math/detail/activation_functions.h b/paddle/fluid/operators/math/detail/activation_functions.h index b127fbe8c8515e7fe57b07ea1d4291675ec4efca..24df1f93edd85145d703ed3277b0d1ca06e67009 100644 --- a/paddle/fluid/operators/math/detail/activation_functions.h +++ b/paddle/fluid/operators/math/detail/activation_functions.h @@ -15,13 +15,10 @@ limitations under the License. */ #pragma once #include #include +#include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/hostdevice.h" -#ifdef __AVX__ -#include -#endif - namespace paddle { namespace operators { namespace math { diff --git a/paddle/fluid/operators/math/jit_kernel_blas.cc b/paddle/fluid/operators/math/jit_kernel_blas.cc index f976953a245e424e6cb26bbf1cff2f120f84c133..73089a4f0c846122987298b824ccee3f0fe05a5f 100644 --- a/paddle/fluid/operators/math/jit_kernel_blas.cc +++ b/paddle/fluid/operators/math/jit_kernel_blas.cc @@ -25,10 +25,6 @@ limitations under the License. */ #include "paddle/fluid/platform/dynload/mklml.h" #endif -#ifdef __AVX__ -#include -#endif - namespace paddle { namespace operators { namespace math { diff --git a/paddle/fluid/operators/math/jit_kernel_crf_decode.cc b/paddle/fluid/operators/math/jit_kernel_crf_decode.cc index a4861c347e44ad86a066861d3375b556302a84bc..4626ff5cb3ab6d1756119c1cd9ecc9102e8a0cae 100644 --- a/paddle/fluid/operators/math/jit_kernel_crf_decode.cc +++ b/paddle/fluid/operators/math/jit_kernel_crf_decode.cc @@ -16,9 +16,6 @@ limitations under the License. */ #include #include #include "paddle/fluid/operators/math/jit_kernel_macro.h" -#ifdef __AVX__ -#include -#endif namespace paddle { namespace operators { @@ -263,6 +260,7 @@ class CRFDecodeKernelImpl : public CRFDecodeKernel { } \ } +#ifndef _WIN32 // commented out crf decoding #ifdef __AVX__ INTRIAVX_FLOAT(kEQ8); INTRIAVX_FLOAT(kGT8LT16); @@ -275,6 +273,7 @@ INTRIAVX2_FLOAT(jit::avx2, kGT8LT16); INTRIAVX2_FLOAT(jit::avx2, kEQ16); INTRIAVX2_FLOAT(jit::avx2, kGT16); #endif +#endif // WIN32 #ifdef __AVX512F__ INTRIAVX2_FLOAT(jit::avx512f, kEQ8); INTRIAVX2_FLOAT(jit::avx512f, kGT8LT16); diff --git a/paddle/fluid/operators/math/jit_kernel_exp.cc b/paddle/fluid/operators/math/jit_kernel_exp.cc index d7c177e6782e19e199542e10e1d62587ee0df4cf..131c226589a0a233f3f78e89dfe56e6f017798d4 100644 --- a/paddle/fluid/operators/math/jit_kernel_exp.cc +++ b/paddle/fluid/operators/math/jit_kernel_exp.cc @@ -20,10 +20,6 @@ limitations under the License. */ #include "paddle/fluid/platform/dynload/mklml.h" #endif -#ifdef __AVX__ -#include -#endif - namespace paddle { namespace operators { namespace math { @@ -66,14 +62,18 @@ namespace detail { #ifdef __AVX__ +#if defined(_WIN32) +#define ALIGN32 __declspec(align(32)) +#else #define ALIGN32 __attribute__((aligned(32))) +#endif // _WIN32 #define _PS256_CONST(Name, Val) \ - static const float _ps256_##Name[8] ALIGN32 = {Val, Val, Val, Val, \ + static const float ALIGN32 _ps256_##Name[8] = {Val, Val, Val, Val, \ Val, Val, Val, Val} #define _PI256_CONST(Name, Val) \ - static const int _pi256_##Name[8] ALIGN32 = {Val, Val, Val, Val, \ + static const int ALIGN32 _pi256_##Name[8] = {Val, Val, Val, Val, \ Val, Val, Val, Val} _PI256_CONST(0x7f, 0x7f); @@ -98,7 +98,7 @@ typedef union imm_xmm_union { #define COPY_IMM_TO_XMM(imm_, xmm0_, xmm1_) \ { \ - imm_xmm_union u ALIGN32; \ + imm_xmm_union ALIGN32 u; \ u.imm = imm_; \ xmm0_ = u.xmm[0]; \ xmm1_ = u.xmm[1]; \ @@ -106,7 +106,7 @@ typedef union imm_xmm_union { #define COPY_XMM_TO_IMM(xmm0_, xmm1_, imm_) \ { \ - imm_xmm_union u ALIGN32; \ + imm_xmm_union ALIGN32 u; \ u.xmm[0] = xmm0_; \ u.xmm[1] = xmm1_; \ imm_ = u.imm; \ @@ -508,12 +508,14 @@ class VTanhKernelImpl : public VTanhKernel { vaddbias_->Compute(-1.f, y, y); \ } +#ifndef __WIN32 #ifdef __AVX__ INTRI8_FLOAT(jit::avx, detail::ExpAVX); INTRI16_FLOAT(jit::avx, detail::ExpAVX); INTRI_GT8LT16_FLOAT(jit::avx, detail::ExpAVX); INTRI_GT16_FLOAT(jit::avx, detail::ExpAVX); -#endif +#endif // AVX +#endif // WIN32 #ifdef __AVX2__ INTRI8_FLOAT(jit::avx2, detail::ExpAVX2); INTRI16_FLOAT(jit::avx2, detail::ExpAVX2); diff --git a/paddle/fluid/operators/math/jit_kernel_rnn.cc b/paddle/fluid/operators/math/jit_kernel_rnn.cc index ba3e917377cf12192a068a9d71238442e12d5e5e..fc6a3caef00765957ce0627d0ae4e6446c1d6355 100644 --- a/paddle/fluid/operators/math/jit_kernel_rnn.cc +++ b/paddle/fluid/operators/math/jit_kernel_rnn.cc @@ -18,10 +18,6 @@ limitations under the License. */ #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/macros.h" -#ifdef __AVX__ -#include -#endif - namespace paddle { namespace operators { namespace math { diff --git a/paddle/fluid/operators/math/selected_rows_functor.cu b/paddle/fluid/operators/math/selected_rows_functor.cu index c4fccdbf862fda8a599869c30ae598573ca367aa..ddd6b2a531c1f1c05255f0fe89995990b4184763 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cu +++ b/paddle/fluid/operators/math/selected_rows_functor.cu @@ -16,6 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/math_function_impl.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/float16.h" diff --git a/paddle/fluid/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu index e468cd23e8f3519cac9f5a91d5e9f90a156a6d0a..51da6de26e2a47da2c22a1c2e2e1a9412badc58f 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cu +++ b/paddle/fluid/operators/math/sequence_pooling.cu @@ -16,18 +16,12 @@ limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence_pooling.h" #include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/macros.h" namespace paddle { namespace operators { namespace math { -#if defined(__FLT_MAX__) -#define FLT_MAX __FLT_MAX__ -#else -#include -#include -#endif - template struct MaxPoolFunctor { HOSTDEVICE void operator()(const T* input, const size_t start, diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index e7f1caf4d3a81dc7633139933c6a4c3d51a4e2a0..e18bc17fd642478a03e0a28fa448b19738bc27e0 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/var_type.h" diff --git a/paddle/fluid/operators/save_combine_op.cc b/paddle/fluid/operators/save_combine_op.cc index 5b05f757c0355ed15617dea925b5d4929fcbfee0..f1cd7c6ff64e43c7c2ddc25e8965e577c357894d 100644 --- a/paddle/fluid/operators/save_combine_op.cc +++ b/paddle/fluid/operators/save_combine_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include #include +#include #include #include #include "paddle/fluid/framework/data_type.h" @@ -41,6 +42,7 @@ class SaveCombineOp : public framework::OperatorBase { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); auto save_as_fp16 = Attr("save_as_fp16"); + auto format = Attr("format"); bool is_present = FileExists(filename); if (is_present && !overwrite) { @@ -49,8 +51,14 @@ class SaveCombineOp : public framework::OperatorBase { } MkDirRecursively(DirName(filename).c_str()); - std::ofstream fout(filename); - PADDLE_ENFORCE(static_cast(fout), "Cannot open %s to write", + std::unique_ptr fout; + if (format == "windows") { + fout.reset(new std::ofstream(filename, + std::ios_base::out | std::ios_base::binary)); + } else { + fout.reset(new std::ofstream(filename)); + } + PADDLE_ENFORCE(static_cast(*fout), "Cannot open %s to write", filename); auto inp_var_names = Inputs("X"); @@ -86,12 +94,11 @@ class SaveCombineOp : public framework::OperatorBase { // copy LoD info to the new tensor out.set_lod(tensor.lod()); framework::TransDataType(in_kernel_type, out_kernel_type, tensor, &out); - framework::SerializeToStream(fout, out, dev_ctx); + framework::SerializeToStream(*fout, out, dev_ctx); } else { - framework::SerializeToStream(fout, tensor, dev_ctx); + framework::SerializeToStream(*fout, tensor, dev_ctx); } } - fout.close(); } }; @@ -124,6 +131,18 @@ to a file on disk. "The \"file_path\" where the LoDTensor variables will be saved.") .AddCustomChecker( [](const std::string &path) { return !path.empty(); }); + AddAttr("format", + R"DOC((windows|linux)" "saved model file format + windows and linux file newline symbol is +different. windows(newline is \n\r) or linux(newline is \r) +So if you set attribute format to windows, then we saved model file in binary. +It can be used both linux and windows. If you set format to linux, +it will save file in normal file, newline symbol is \r. Need to note +that these two format is not inter-compatible.)DOC") + .SetDefault("linux") + .AddCustomChecker([](const std::string &s) { + return s == "windows" || s == "linux"; + }); } }; diff --git a/paddle/fluid/operators/save_op.cc b/paddle/fluid/operators/save_op.cc index e79cffcf498c52ed14db235f6221cfdf08399c9d..9eea9e1a9517e84edcb11695ca33c5b7bfdc66f1 100644 --- a/paddle/fluid/operators/save_op.cc +++ b/paddle/fluid/operators/save_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include #include +#include #include #include "paddle/fluid/framework/data_type.h" @@ -64,6 +65,7 @@ class SaveOp : public framework::OperatorBase { framework::Variable *var) const { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); + auto format = Attr("format"); if (FileExists(filename) && !overwrite) { PADDLE_THROW("%s is existed, cannot save to it when overwrite=false", @@ -80,8 +82,14 @@ class SaveOp : public framework::OperatorBase { // FIXME(yuyang18): We save variable to local file now, but we should change // it to save an output stream. - std::ofstream fout(filename); - PADDLE_ENFORCE(static_cast(fout), "Cannot open %s to write", + std::unique_ptr fout; + if (format == "windows") { + fout.reset(new std::ofstream(filename, + std::ios_base::out | std::ios_base::binary)); + } else { + fout.reset(new std::ofstream(filename)); + } + PADDLE_ENFORCE(static_cast(*fout), "Cannot open %s to write", filename); auto save_as_fp16 = Attr("save_as_fp16"); @@ -95,11 +103,10 @@ class SaveOp : public framework::OperatorBase { framework::TransDataType(in_kernel_type, out_kernel_type, tensor, &out); // copy LoD info to the new tensor out.set_lod(tensor.lod()); - framework::SerializeToStream(fout, out, dev_ctx); + framework::SerializeToStream(*fout, out, dev_ctx); } else { - framework::SerializeToStream(fout, tensor, dev_ctx); + framework::SerializeToStream(*fout, tensor, dev_ctx); } - fout.close(); } void SaveSelectedRows(const framework::Scope &scope, @@ -110,6 +117,7 @@ class SaveOp : public framework::OperatorBase { lt_var != nullptr, "Can not find variable kLookupTablePath for SaveSelectedRows"); std::string filename = lt_var->data(); + auto format = Attr("format"); VLOG(4) << "SaveSelectedRows get File name: " << filename; MkDirRecursively(DirName(filename).c_str()); @@ -122,11 +130,16 @@ class SaveOp : public framework::OperatorBase { // FIXME(yuyang18): We save variable to local file now, but we should change // it to save an output stream. - std::ofstream fout(filename); - PADDLE_ENFORCE(static_cast(fout), "Cannot open %s to write", + std::unique_ptr fout; + if (format == "windows") { + fout.reset(new std::ofstream(filename, + std::ios_base::out | std::ios_base::binary)); + } else { + fout.reset(new std::ofstream(filename)); + } + PADDLE_ENFORCE(static_cast(*fout), "Cannot open %s to write", filename); - framework::SerializeToStream(fout, selectedRows, dev_ctx); - fout.close(); + framework::SerializeToStream(*fout, selectedRows, dev_ctx); } }; @@ -154,6 +167,18 @@ This operator will serialize and write LoDTensor / SelectedRows variable to file "The \"file_path\" where the variable will be saved.") .AddCustomChecker( [](const std::string &path) { return !path.empty(); }); + AddAttr("format", + R"DOC((windows|linux)" "saved model file format + windows and linux file newline symbol is +different. windows(newline is \n\r) or linux(newline is \r) +So if you set attribute format to windows, then we saved model file in binary. +It can be used both linux and windows. If you set format to linux, +it will save file in normal file, newline symbol is \r. Need to note +that these two format is not inter-compatible.)DOC") + .SetDefault("linux") + .AddCustomChecker([](const std::string &s) { + return s == "windows" || s == "linux"; + }); } }; diff --git a/paddle/fluid/operators/space_to_depth_op.cc b/paddle/fluid/operators/space_to_depth_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..f109dd685c87ab1b0776a855bb5f510eab1f5526 --- /dev/null +++ b/paddle/fluid/operators/space_to_depth_op.cc @@ -0,0 +1,131 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/space_to_depth_op.h" +#include +#include + +namespace paddle { +namespace operators { + +class SpaceToDepthOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of SpaceToDepthOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of SpaceToDepthOp should not be null."); + + auto x_dims = ctx->GetInputDim("X"); + PADDLE_ENFORCE_EQ(x_dims.size(), 4, "input should be a 4D tensor"); + auto blocksize = ctx->Attrs().Get("blocksize"); + + PADDLE_ENFORCE_GT(blocksize, 1, "The blocksize should be Greater than 1"); + PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0"); + PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0"); + PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); + + PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, + "input channel should be divisible of the square of " + "SpaceToDepthOp blocksize"); + PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, + "input Height should be divisible of the square of " + "SpaceToDepthOp blocksize"); + PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, + "input Width should be divisible of the square of " + "SpaceToDepthOp blocksize"); + + VLOG(3) << "SpaceToDepthOp operator x.shape=" << x_dims + << "Attribute blocksize" << blocksize << std::endl; + + std::vector output_shape(4, 0); // [B,C,H,W] + output_shape[0] = x_dims[0]; + output_shape[1] = x_dims[1] * blocksize * blocksize; + output_shape[2] = x_dims[2] / blocksize; + output_shape[3] = x_dims[3] / blocksize; + + auto out_dims = framework::make_ddim(output_shape); + + ctx->SetOutputDim("Out", out_dims); + + if (x_dims[0] == out_dims[0]) { + // Only pass LoD when the first dimension of output and Input(X) + // are the same. + ctx->ShareLoD("X", /*->*/ "Out"); + } + } +}; + +class SpaceToDepthOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(Tensor). The input should be a 4D tensor B * C * W * H of " + "SpaceToDepthOp " + "operator."); + AddOutput("Out", + "(Tensor), The output should be a 4D tensor B * C2 * W2 * H2 of " + "SpaceToDepthOp operator."); + AddAttr( + "blocksize", + "(int64_t, default 2) blocksize used to do change Space To Depth.") + .SetDefault(2) + .GreaterThan(1); + AddComment(R"DOC( + reorg operator used in Yolo v2. + The equation is: C2 = C1/blocksize * blocksize, W2 = W1 ∗ blocksize + offset % blocksize, H2 = H1 ∗ blocksize + offset / blocksize, + + Reshape Input(X) into the shape according to Attr(blocksize). The + data in Input(X) are unchanged. + + Examples: + + 1. Given a 4-D tensor Input(X) with a shape [128, 2048, 26, 26], and the blocksize is 2, the reorg operator will transform Input(X) + into a 4-D tensor with shape [128, 2048, 13, 13] and leaving Input(X)'s data unchanged. + + )DOC"); + } +}; + +class SpaceToDepthGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(space_to_depth, ops::SpaceToDepthOp, ops::SpaceToDepthOpMaker, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(space_to_depth_grad, ops::SpaceToDepthGradOp); +REGISTER_OP_CPU_KERNEL( + space_to_depth, + ops::SpaceToDepthKernel, + ops::SpaceToDepthKernel, + ops::SpaceToDepthKernel); +REGISTER_OP_CPU_KERNEL( + space_to_depth_grad, + ops::SpaceToDepthGradKernel, + ops::SpaceToDepthGradKernel, + ops::SpaceToDepthGradKernel); diff --git a/paddle/fluid/operators/space_to_depth_op.cu b/paddle/fluid/operators/space_to_depth_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..38d0a662733222386b8ecd68d064f3d1abe56c3b --- /dev/null +++ b/paddle/fluid/operators/space_to_depth_op.cu @@ -0,0 +1,30 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/space_to_depth_op.h" + +namespace plat = paddle::platform; +namespace ops = paddle::operators; + +REGISTER_OP_CUDA_KERNEL( + space_to_depth, + ops::SpaceToDepthKernel, + ops::SpaceToDepthKernel, + ops::SpaceToDepthKernel); + +REGISTER_OP_CUDA_KERNEL( + space_to_depth_grad, + ops::SpaceToDepthGradKernel, + ops::SpaceToDepthGradKernel, + ops::SpaceToDepthGradKernel); diff --git a/paddle/fluid/operators/space_to_depth_op.h b/paddle/fluid/operators/space_to_depth_op.h new file mode 100644 index 0000000000000000000000000000000000000000..a71662b4813ab27b65f5c7a918e2bb6fb15a1993 --- /dev/null +++ b/paddle/fluid/operators/space_to_depth_op.h @@ -0,0 +1,127 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#ifndef PADDLE_FLUID_OPERATORS_SPACE_TO_DEPTH_OP_H_ +#define PADDLE_FLUID_OPERATORS_SPACE_TO_DEPTH_OP_H_ +#endif // PADDLE_FLUID_OPERATORS_SPACE_TO_DEPTH_OP_H_ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/for_range.h" + +namespace paddle { +namespace operators { + +template +class space_to_depth_compute { + public: + HOSTDEVICE space_to_depth_compute(const T *x, int64_t w, int64_t h, int64_t c, + int64_t batch, int64_t blocksize, + int64_t forward, T *out) + : x_(x), + w_(w), + h_(h), + c_(c), + batch_(batch), + blocksize_(blocksize), + forward_(forward), + out_(out) {} + + HOSTDEVICE void operator()(int64_t in_index) { + int64_t out_c = c_ / (blocksize_ * blocksize_); + // calculate each dim position with index of tensor + int64_t b = in_index / (c_ * h_ * w_); + int64_t k = (in_index % (c_ * h_ * w_)) / (h_ * w_); + int64_t j = ((in_index % (c_ * h_ * w_)) % (h_ * w_)) / w_; + int64_t i = ((in_index % (c_ * h_ * w_)) % (h_ * w_)) % w_; + + int64_t c2 = k % out_c; + int64_t offset = k / out_c; + int64_t w2 = i * blocksize_ + offset % blocksize_; + int64_t h2 = j * blocksize_ + offset / blocksize_; + int64_t out_index = + w2 + w_ * blocksize_ * (h2 + h_ * blocksize_ * (c2 + out_c * b)); + if (forward_) + out_[out_index] = x_[in_index]; + else + out_[in_index] = x_[out_index]; + } + + private: + const T *x_; + int64_t w_, h_, c_, batch_, blocksize_, forward_; + T *out_; +}; + +template +class SpaceToDepthKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *out = context.Output("Out"); + auto *x = context.Input("X"); + auto blocksize = context.Attr("blocksize"); + auto in_dims = x->dims(); + out->mutable_data(context.GetPlace(), x->type()); + + auto out_dims = out->dims(); + auto B = in_dims[0]; + auto C = in_dims[1]; + auto H = in_dims[2]; + auto W = in_dims[3]; + platform::ForRange for_range( + context.template device_context(), + static_cast(x->numel())); + + auto *x_data = x->data(); + auto *out_data = out->data(); + paddle::operators::space_to_depth_compute computer( + x_data, W, H, C, B, blocksize, 1, out_data); + for_range(computer); + + out->Resize(out_dims); + } +}; + +template +class SpaceToDepthGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *d_out = + context.Input(framework::GradVarName("Out")); + auto *d_x = + context.Output(framework::GradVarName("X")); + auto blocksize = context.Attr("blocksize"); + auto in_dims = d_x->dims(); + d_x->mutable_data(context.GetPlace(), d_out->type()); + + auto B = in_dims[0]; + auto C = in_dims[1]; + auto H = in_dims[2]; + auto W = in_dims[3]; + + platform::ForRange for_range( + context.template device_context(), + static_cast(d_x->numel())); + + auto *dx_data = d_x->data(); + auto *dout_data = d_out->data(); + + paddle::operators::space_to_depth_compute computer( + dout_data, W, H, C, B, blocksize, 0, dx_data); + for_range(computer); + + d_x->Resize(in_dims); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/split_lod_tensor_op.cc b/paddle/fluid/operators/split_lod_tensor_op.cc index 767449cde981e5925b7144ff1038560c67651f3e..cfe491f4c59b71fef5381d8caabefdebfd2fd719 100644 --- a/paddle/fluid/operators/split_lod_tensor_op.cc +++ b/paddle/fluid/operators/split_lod_tensor_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/port.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt_engine_op.h index 673f86da76ee0712b4d941f5b33594f89926b973..f30668fd21e5cf1b000e273cf853f0e07527f6f5 100644 --- a/paddle/fluid/operators/tensorrt_engine_op.h +++ b/paddle/fluid/operators/tensorrt_engine_op.h @@ -34,7 +34,7 @@ namespace operators { using FluidDT = framework::proto::VarType_Type; using TRT_DT = nvinfer1::DataType; -namespace { +namespace { // NOLINT TRT_DT FluidDataType2TRT(FluidDT type) { switch (type) { @@ -60,7 +60,7 @@ nvinfer1::Dims Vec2TRT_Dims(const std::vector& shape) { return nvinfer1::DimsCHW(shape[1], 1, 1); } -} // namespace +} // NOLINT // namespace using inference::Singleton; using inference::tensorrt::TRT_EngineManager; diff --git a/paddle/fluid/platform/cpu_info.h b/paddle/fluid/platform/cpu_info.h index 6810a1651a14cdb2080af846b21cad242b70bf35..bc0204e579d717062eb9754d1d0531649d119c04 100644 --- a/paddle/fluid/platform/cpu_info.h +++ b/paddle/fluid/platform/cpu_info.h @@ -16,6 +16,18 @@ limitations under the License. */ #include +#ifdef _WIN32 +#if defined(__AVX2__) +#include //avx2 +#elif defined(__AVX__) +#include //avx +#endif // AVX +#else // WIN32 +#ifdef __AVX__ +#include +#endif +#endif // WIN32 + namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index 07bb02be1962f758e50cab1f27de43e89f3953c3..0ec3a2a8595f743d77511b8cb81896608e2b4718 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -59,6 +59,7 @@ inline const char* cudnnGetErrorString(cudnnStatus_t status) { #define CUDNN_VERSION_MIN(major, minor, patch) \ (CUDNN_VERSION >= ((major)*1000 + (minor)*100 + (patch))) +#if !defined(_WIN32) #define CUDNN_ENFORCE(condition) \ do { \ cudnnStatus_t status = condition; \ @@ -66,6 +67,16 @@ inline const char* cudnnGetErrorString(cudnnStatus_t status) { PADDLE_THROW(::paddle::platform::cudnnGetErrorString(status)); \ } \ } while (false) +#else +// windows +#define CUDNN_ENFORCE(condition) \ + do { \ + cudnnStatus_t status = condition; \ + if (status != CUDNN_STATUS_SUCCESS) { \ + std::cerr << ::paddle::platform::cudnnGetErrorString(status); \ + } \ + } while (false) +#endif enum class DataLayout { // Not use kNHWC, diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index ff49a1d57fd977a6d6b4502b44e48aad34cde872..b95e25e2c146daf2ed78c85f6a4315b1acf962bd 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -55,7 +55,6 @@ DeviceContextPool::DeviceContextPool( for (auto& p : places) { set.insert(p); } - for (auto& p : set) { if (platform::is_cpu_place(p)) { #ifdef PADDLE_WITH_MKLDNN @@ -205,7 +204,9 @@ CUDADeviceContext::CUDADeviceContext(CUDAPlace place) << ", Runtime Version: " << runtime_version_ / 1000 << "." << (runtime_version_ % 100) / 10; +#ifndef _WIN32 callback_manager_.reset(new StreamCallbackManager(stream_)); +#endif // NOT WIN32 } CUDADeviceContext::~CUDADeviceContext() { diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index df248f9bb15591d5015ad01278797ec7e31ef9d1..51cac83961d7571727e6b6d41f964ec60500910f 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -32,7 +32,7 @@ limitations under the License. */ #include "glog/logging.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/place.h" -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) #include "paddle/fluid/platform/stream_callback_manager.h" #endif #include "unsupported/Eigen/CXX11/Tensor" @@ -173,6 +173,7 @@ class CUDADeviceContext : public DeviceContext { PADDLE_ENFORCE(cudaEventRecord(ev, stream_)); } +#ifndef _WIN32 template void AddStreamCallback(Callback&& callback) const { std::lock_guard guard(callback_mtx_); @@ -183,6 +184,16 @@ class CUDADeviceContext : public DeviceContext { std::lock_guard guard(callback_mtx_); callback_manager_->Wait(); } +#else + template + void AddStreamCallback(Callback&& callback) const { + // ugly empty functor. + } + + void WaitStreamCallback() const { + // ugly empty functor. + } +#endif private: CUDAPlace place_; @@ -201,10 +212,12 @@ class CUDADeviceContext : public DeviceContext { mutable std::mutex mtx_; +#ifndef _WIN32 // This lock is only used by callback // If we use mtx_ for StreamCallbackManager, deadlock may occur sometimes mutable std::mutex callback_mtx_; std::unique_ptr callback_manager_; +#endif }; template <> diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index a251bfcd9914422cb6300adbbcdef3dfa79f441c..23f64170eb2e936ca3af1c467c5d4a621045528d 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -127,7 +127,7 @@ struct EOFException : public std::exception { #define UNLIKELY(condition) __builtin_expect(static_cast(condition), 0) #else // there is no equivalent intrinsics in msvc. -#define UNLIKELY(condition) (condition == 0) +#define UNLIKELY(condition) ((condition) == 0) #endif #if !defined(_WIN32) diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index f61abfc43d4acb89d6cd522106e7093296a036b6..03ec832c6b461157be3434470b53109fe8bb1a62 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -112,6 +112,14 @@ void InitDevices(bool init_p2p, const std::vector devices) { } places.emplace_back(platform::CPUPlace()); platform::DeviceContextPool::Init(places); + +// windows has no support for openblas multi-thread +#ifdef _WIN32 + if (FLAGS_paddle_num_threads > 1) { + FLAGS_paddle_num_threads = 1; + } +#endif + #ifndef PADDLE_WITH_MKLDNN platform::SetNumThreads(FLAGS_paddle_num_threads); #endif @@ -167,7 +175,7 @@ void InitGLOG(const std::string &prog_name) { // glog will not hold the ARGV[0] inside. // Use strdup to alloc a new string. google::InitGoogleLogging(strdup(prog_name.c_str())); -#ifndef _WIN32 +#if !defined(_WIN32) google::InstallFailureSignalHandler(); #endif } diff --git a/paddle/fluid/platform/macros.h b/paddle/fluid/platform/macros.h index 32b7efc04c1f2ecc22f93c08387aec69ded4930a..906ed6e8258d4157762d1cc2f26de2d6da1707e1 100644 --- a/paddle/fluid/platform/macros.h +++ b/paddle/fluid/platform/macros.h @@ -28,3 +28,16 @@ limitations under the License. */ #if defined(__FLT_MAX__) #define FLT_MAX __FLT_MAX__ #endif // __FLT_MAX__ + +#ifdef _WIN32 +#if defined(PADDLE_COMPILE) +// by default, msvc has predefined macro _LIB for static library +// only shared library need to export and import symbols +// static library export all symbols by default. +#define PADDLE_DLL __declspec(dllexport) +#else +#define PADDLE_DLL __declspec(dllimport) +#endif +#else +#define PADDLE_DLL +#endif diff --git a/paddle/fluid/platform/port.h b/paddle/fluid/platform/port.h index cf9f4aa95bc1cb79d95b79331fbc09e11af64194..8f1e3bdd317507c246b3851e200e4acf7c58f675 100644 --- a/paddle/fluid/platform/port.h +++ b/paddle/fluid/platform/port.h @@ -15,12 +15,13 @@ #pragma once #include -#include - #include +#include // NOLINT +#include #include #define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h +#define GOOGLE_GLOG_DLL_DECL #include "glog/logging.h" #if !defined(_WIN32) @@ -61,7 +62,6 @@ static void *dlopen(const char *filename, int flag) { } return reinterpret_cast(hModule); } - #endif // !_WIN32 static void ExecShellCommand(const std::string &cmd, std::string *message) { diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 0b9f20987535e0405be50ea9a7d1d3afa7161bb6..512ac83994bd4d7630248becb194ce9ea193a486 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -152,6 +152,7 @@ __all__ = [ 'mul', 'sigmoid_cross_entropy_with_logits', 'maxout', + 'space_to_depth', 'affine_grid', 'sequence_reverse', 'affine_channel', @@ -3064,7 +3065,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): x = fluid.layers.data(name='y', shape=[10, 5], dtype='float32', lod_level=1) pad_value = fluid.layers.assign( - input=numpy.array([0], dtype=numpy.float32)) + input=numpy.array([0.0], dtype=numpy.float32)) out = fluid.layers.sequence_pad(x=x, pad_value=pad_value) """ @@ -7679,6 +7680,66 @@ def maxout(x, groups, name=None): return out +def space_to_depth(x, blocksize, name=None): + """ + Gives a blocksize to space_to_depth the input LoDtensor with Layout: [batch, channel, height, width] + + This op rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of the + input LoDtensor where values from the height and width dimensions are moved to the channel dimension. + The attr blocksize indicates the input block size. + + space_to_depth will reorgnize the elements of input with shape[batch, channel, height, width] according + to blocksize to construct output with shape [batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]: + + space_to_depth is used to This operation is useful for resizing the activations between convolutions + (but keeping all data) + + - Non-overlapping blocks of size block_size x block size are rearranged into depth at each location. + - The depth of the output tensor is block_size * block_size * input channel + - The Y, X coordinates within each block of the input become the high order component of the output channel index + - channel should be divisible by square of blocksize + - height, width should be divsible by blocksize + + + Args: + x(variable): The input LoDtensor. + blocksize(variable): The blocksize to select the element on each feature map should be > 2 + + Returns: + Variable: The output LoDtensor. + + Raises: + TypeError: blocksize type must be a long. + + Examples: + .. code-block:: python + + data = fluid.layers.data( + name='data', shape=[1, 4, 2, 2], dtype='float32') + space_to_depthed = fluid.layers.space_to_depth( + x=data, blocksize=2) + """ + + helper = LayerHelper("space_to_depth", **locals()) + + if not (isinstance(blocksize, int)): + raise ValueError("blocksize must be a python Int") + + if name is None: + out = helper.create_variable_for_type_inference( + dtype=x.dtype) #fix create + else: + out = helper.create_variable( + name=name, dtype=x.dtype, persistable=False) + + helper.append_op( + type="space_to_depth", + inputs={"X": x}, + attrs={"blocksize": blocksize}, + outputs={"Out": out}) + return out + + @templatedoc() def sequence_reverse(x, name=None): """ diff --git a/python/paddle/fluid/op.py b/python/paddle/fluid/op.py index 4e1d1450dea85fe4eb3e68713250836e4beac992..b8bb3db1eedcf25c9b6a02ad3b4f261e8be8efce 100644 --- a/python/paddle/fluid/op.py +++ b/python/paddle/fluid/op.py @@ -108,6 +108,8 @@ class OpDescCreationMethod(object): new_attr.i = user_defined_attr elif attr.type == framework_pb2.FLOAT: new_attr.f = user_defined_attr + elif attr.type == framework_pb2.LONG: + new_attr.l = user_defined_attr elif attr.type == framework_pb2.STRING: new_attr.s = user_defined_attr elif attr.type == framework_pb2.BOOLEAN: diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index c4ecc2c2c2563fcad09821453ee73e41f81407d5..49ba41e6fc908e9713414120bbeb45ca715042c3 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -248,6 +248,17 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(layers.softmax(hid)) print(str(program)) + def test_space_to_depth(self): + program = Program() + with program_guard(program): + data = layers.data( + name='data', + shape=[32, 9, 6, 6], + append_batch_size=False, + dtype='float32') + self.assertIsNotNone(layers.space_to_depth(data, 3)) + print(str(program)) + def test_sequence_unsqueeze(self): program = Program() with program_guard(program): diff --git a/python/paddle/fluid/tests/unittests/test_space_to_depth_op.py b/python/paddle/fluid/tests/unittests/test_space_to_depth_op.py new file mode 100644 index 0000000000000000000000000000000000000000..5fdad44f1242b9ee99040b43d7ce2cf84664eed1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_space_to_depth_op.py @@ -0,0 +1,135 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +import numpy as np +import paddle.fluid as fluid +from op_test import OpTest + + +class TestSpaceToDepthOp(OpTest): + @staticmethod + def helper(in_, width, height, channel, batch, blocksize, forward, out_): + channel_out = channel // (blocksize * blocksize) + for b in range(batch): + for k in range(channel): + for j in range(height): + for i in range(width): + in_index = i + width * (j + height * (k + channel * b)) + channel2 = k % channel_out + offset = k // channel_out + width2 = i * blocksize + offset % blocksize + height2 = j * blocksize + offset // blocksize + out_index = width2 + width * blocksize * ( + height2 + height * blocksize * + (channel2 + channel_out * b)) + if forward: + out_[out_index] = in_[in_index] + else: + out_[in_index] = in_[out_index] + + def setUp(self): + self.init_data() + + self.op_type = "space_to_depth" + self.inputs = {"X": self.x} + self.helper(self.x_1d, self.x.shape[3], self.x.shape[2], + self.x.shape[1], self.x.shape[0], self.blocksize, + self.forward, self.out_1d) + self.out = np.reshape(self.out_1d, self.infered_shape) + self.attrs = {"blocksize": self.blocksize} + self.outputs = {"Out": self.out} + + def init_data(self): + self.ori_shape = (32, 12, 6, 6) + self.infered_shape = (32, 48, 3, 3) + self.one_d_len = 32 * 48 * 3 * 3 + + self.blocksize = 2 + self.x = np.random.random(self.ori_shape).astype('float32') + self.x_1d = np.reshape(self.x, self.one_d_len) + self.out = np.zeros(self.infered_shape).astype('float32') + self.out_1d = np.reshape(self.out, self.one_d_len) + self.forward = 1 + + def test_check_output(self): + place = fluid.core.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.core.CPUPlace() + self.check_output_with_place(place, 1e-5, None, False) + + def test_check_grad(self): + place = fluid.core.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.core.CPUPlace() + self.check_grad_with_place(place, ['X'], 'Out') + + +class TestSpaceToDepthOpBasic(TestSpaceToDepthOp): + def init_data(self): + self.ori_shape = (32, 8, 6, 6) + self.infered_shape = (32, 32, 3, 3) + self.one_d_len = 32 * 32 * 3 * 3 + + self.blocksize = 2 + self.x = np.random.random(self.ori_shape).astype('float32') + self.x_1d = np.reshape(self.x, self.one_d_len) + self.out = np.zeros(self.infered_shape).astype('float32') + self.out_1d = np.reshape(self.out, self.one_d_len) + self.forward = 1 + + +class TestSpaceToDepthOpDoubleBasic(TestSpaceToDepthOp): + def init_data(self): + self.ori_shape = (32, 8, 6, 6) + self.infered_shape = (32, 32, 3, 3) + self.one_d_len = 32 * 32 * 3 * 3 + + self.blocksize = 2 + self.x = np.random.random(self.ori_shape).astype('float64') + self.x_1d = np.reshape(self.x, self.one_d_len) + self.out = np.zeros(self.infered_shape).astype('float64') + self.out_1d = np.reshape(self.out, self.one_d_len) + self.forward = 1 + + +class TestSpaceToDepthOpWithStride3(TestSpaceToDepthOp): + def init_data(self): + self.ori_shape = (32, 9, 6, 6) + self.infered_shape = (32, 81, 2, 2) + self.one_d_len = 32 * 81 * 2 * 2 + + self.blocksize = 3 + self.x = np.random.random(self.ori_shape).astype('float32') + self.x_1d = np.reshape(self.x, self.one_d_len) + self.out = np.zeros(self.infered_shape).astype('float32') + self.out_1d = np.reshape(self.out, self.one_d_len) + self.forward = 1 + + +class TestSpaceToDepthOpWithNotSquare(TestSpaceToDepthOp): + def init_data(self): + self.ori_shape = (32, 9, 9, 6) + self.infered_shape = (32, 81, 3, 2) + self.one_d_len = 32 * 81 * 3 * 2 + + self.blocksize = 3 + self.x = np.random.random(self.ori_shape).astype('float32') + self.x_1d = np.reshape(self.x, self.one_d_len) + self.out = np.zeros(self.infered_shape).astype('float32') + self.out_1d = np.reshape(self.out, self.one_d_len) + self.forward = 1 + + +if __name__ == '__main__': + unittest.main()