diff --git a/.gitignore b/.gitignore index fa0c8882606b76ac71b43dcde7e1df6770c46c31..10a4262aa7e129c48d79fbe7d978720b28f4bcea 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ paddle/operators/tensor.save python/paddle/v2/fluid/tests/book/image_classification_resnet.inference.model/ python/paddle/v2/fluid/tests/book/image_classification_vgg.inference.model/ python/paddle/v2/fluid/tests/book/label_semantic_roles.inference.model/ +paddle/fluid/operators/distributed/send_recv.proto *.DS_Store *.vs build/ @@ -28,4 +29,5 @@ third_party/ build_* # clion workspace. cmake-build-* +paddle/fluid/operators/distributed/send_recv.proto model_test diff --git a/CMakeLists.txt b/CMakeLists.txt index 9cfec8e70b4a3d166e3b45048408d7f5e45ce6e4..c62cc9bfd70d72d926eeee5eb52a69428855eb9b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -302,6 +302,14 @@ set(PADDLE_PYTHON_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/python/build") set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") +if (ON_INFER) + message(STATUS "On inference mode, will take place some specific optimization.") + add_definitions(-DPADDLE_ON_INFERENCE) +else() + #TODO(luotao), combine this warning with `make inference_lib_dist` command. + message(WARNING "On inference mode, will take place some specific optimization. Turn on the ON_INFER flag when building inference_lib only.") +endif() + add_subdirectory(paddle) if(WITH_PYTHON) add_subdirectory(python) @@ -312,10 +320,3 @@ if(WITH_DOC) find_python_module(recommonmark REQUIRED) add_subdirectory(doc) endif() - -if (ON_INFER) - message(STATUS "On inference mode, will take place some specific optimization.") -else() - #TODO(luotao), combine this warning with `make inference_lib_dist` command. - message(WARNING "On inference mode, will take place some specific optimization. Turn on the ON_INFER flag when building inference_lib only.") -endif() diff --git a/cmake/operators.cmake b/cmake/operators.cmake new file mode 100644 index 0000000000000000000000000000000000000000..ba9c266d133b637fd99f128bbfe42253a2400aaf --- /dev/null +++ b/cmake/operators.cmake @@ -0,0 +1,219 @@ +set(PART_CUDA_KERNEL_FILES) +function(op_library TARGET) + # op_library is a function to create op library. The interface is same as + # cc_library. But it handle split GPU/CPU code and link some common library + # for ops. + set(cc_srcs) + set(cu_srcs) + set(hip_cu_srcs) + set(miopen_hip_cc_srcs) + set(cu_cc_srcs) + set(cudnn_cu_cc_srcs) + set(CUDNN_FILE) + set(mkldnn_cc_srcs) + set(MKLDNN_FILE) + set(op_common_deps operator op_registry math_function) + set(options "") + set(oneValueArgs "") + set(multiValueArgs SRCS DEPS) + set(pybind_flag 0) + cmake_parse_arguments(op_library "${options}" "${oneValueArgs}" + "${multiValueArgs}" ${ARGN}) + + list(LENGTH op_library_SRCS op_library_SRCS_len) + if (${op_library_SRCS_len} EQUAL 0) + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cc) + list(APPEND cc_srcs ${TARGET}.cc) + endif() + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu.cc) + list(APPEND cu_cc_srcs ${TARGET}.cu.cc) + endif() + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu) + list(APPEND cu_srcs ${TARGET}.cu) + endif() + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.part.cu) + set(PART_CUDA_KERNEL_FILES ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.part.cu + ${PART_CUDA_KERNEL_FILES} PARENT_SCOPE) + list(APPEND cu_srcs ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.part.cu) + endif() + + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.hip.cu) + list(APPEND hip_cu_srcs ${TARGET}.hip.cu) + endif() + string(REPLACE "_op" "_cudnn_op" CUDNN_FILE "${TARGET}") + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${CUDNN_FILE}.cu.cc) + list(APPEND cudnn_cu_cc_srcs ${CUDNN_FILE}.cu.cc) + endif() + if(WITH_AMD_GPU) + string(REPLACE "_op" "_miopen_op" MIOPEN_FILE "${TARGET}") + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${MIOPEN_FILE}.hip.cc) + list(APPEND miopen_hip_cc_srcs ${MIOPEN_FILE}.hip.cc) + endif() + endif() + if(WITH_MKLDNN) + string(REPLACE "_op" "_mkldnn_op" MKLDNN_FILE "${TARGET}") + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${MKLDNN_FILE}.cc) + list(APPEND mkldnn_cc_srcs ${MKLDNN_FILE}.cc) + endif() + endif() + else() + foreach(src ${op_library_SRCS}) + if (${src} MATCHES ".*\\.hip.cu$") + list(APPEND hip_cu_srcs ${src}) + elseif (${src} MATCHES ".*\\.cu$") + list(APPEND cu_srcs ${src}) + elseif(${src} MATCHES ".*_cudnn_op.cu.cc$") + list(APPEND cudnn_cu_cc_srcs ${src}) + elseif(WITH_AMD_GPU AND ${src} MATCHES ".*_miopen_op.hip.cc$") + list(APPEND miopen_hip_cc_srcs ${src}) + elseif(WITH_MKLDNN AND ${src} MATCHES ".*_mkldnn_op.cc$") + list(APPEND mkldnn_cc_srcs ${src}) + elseif(${src} MATCHES ".*\\.cu.cc$") + list(APPEND cu_cc_srcs ${src}) + elseif(${src} MATCHES ".*\\.cc$") + list(APPEND cc_srcs ${src}) + else() + message(FATAL_ERROR "${TARGET} Source file ${src} should only be .cc or .cu") + endif() + endforeach() + endif() + + list(LENGTH cc_srcs cc_srcs_len) + if (${cc_srcs_len} EQUAL 0) + message(FATAL_ERROR "The op library ${TARGET} should contains at least one .cc file") + endif() + if (WIN32) + # remove windows unsupported op, because windows has no nccl, no warpctc such ops. + foreach(windows_unsupport_op "nccl_op" "gen_nccl_id_op" "warpctc_op" "hierarchical_sigmoid_op" + "crf_decoding_op" "select_op" "lstmp_op" "gru_op" "fusion_gru_op" "lstm_op" "fusion_lstm_op" "cumsum_op" + "fusion_seqconv_eltadd_relu_op" "channel_send_op" "channel_create_op" "channel_close_op" "channel_recv_op") + if ("${TARGET}" STREQUAL "${windows_unsupport_op}") + return() + endif() + endforeach() + endif(WIN32) + set(OP_LIBRARY ${TARGET} ${OP_LIBRARY} CACHE INTERNAL "op libs") + + list(LENGTH op_library_DEPS op_library_DEPS_len) + if (${op_library_DEPS_len} GREATER 0) + set(DEPS_OPS ${TARGET} ${DEPS_OPS} PARENT_SCOPE) + endif() + if (WITH_GPU) + nv_library(${TARGET} SRCS ${cc_srcs} ${cu_cc_srcs} ${cudnn_cu_cc_srcs} ${mkldnn_cc_srcs} ${cu_srcs} DEPS ${op_library_DEPS} + ${op_common_deps}) + elseif (WITH_AMD_GPU) + hip_library(${TARGET} SRCS ${cc_srcs} ${hip_cu_srcs} ${miopen_hip_cc_srcs} ${mkldnn_cc_srcs} DEPS ${op_library_DEPS} + ${op_common_deps}) + else() + cc_library(${TARGET} SRCS ${cc_srcs} ${mkldnn_cc_srcs} DEPS ${op_library_DEPS} + ${op_common_deps}) + endif() + + # Define operators that don't need pybind here. + foreach(manual_pybind_op "compare_op" "logical_op" "nccl_op" +"tensor_array_read_write_op" "tensorrt_engine_op" "conv_fusion_op") + if ("${TARGET}" STREQUAL "${manual_pybind_op}") + set(pybind_flag 1) + endif() + endforeach() + + # The registration of USE_OP, please refer to paddle/fluid/framework/op_registry.h. + # Note that it's enough to just adding one operator to pybind in a *_op.cc file. + # And for detail pybind information, please see generated paddle/pybind/pybind.h. + file(READ ${TARGET}.cc TARGET_CONTENT) + string(REGEX MATCH "REGISTER_OPERATOR\\(.*REGISTER_OPERATOR\\(" multi_register "${TARGET_CONTENT}") + string(REGEX MATCH "REGISTER_OPERATOR\\([a-z0-9_]*," one_register "${multi_register}") + if (one_register STREQUAL "") + string(REPLACE "_op" "" TARGET "${TARGET}") + else () + string(REPLACE "REGISTER_OPERATOR(" "" TARGET "${one_register}") + string(REPLACE "," "" TARGET "${TARGET}") + endif() + + # pybind USE_NO_KERNEL_OP + # HACK: if REGISTER_OP_CPU_KERNEL presents the operator must have kernel + string(REGEX MATCH "REGISTER_OP_CPU_KERNEL" regex_result "${TARGET_CONTENT}") + string(REPLACE "_op" "" TARGET "${TARGET}") + if (${pybind_flag} EQUAL 0 AND regex_result STREQUAL "") + file(APPEND ${pybind_file} "USE_NO_KERNEL_OP(${TARGET});\n") + set(pybind_flag 1) + endif() + + # pybind USE_CPU_ONLY_OP + list(LENGTH cu_srcs cu_srcs_len) + list(LENGTH cu_cc_srcs cu_cc_srcs_len) + list(LENGTH mkldnn_cc_srcs mkldnn_cc_srcs_len) + list(LENGTH hip_cu_srcs hip_cu_srcs_len) + list(LENGTH miopen_hip_cc_srcs miopen_hip_cc_srcs_len) + if (${pybind_flag} EQUAL 0 AND ${mkldnn_cc_srcs_len} EQUAL 0 AND ${cu_srcs_len} EQUAL 0 AND ${cu_cc_srcs_len} EQUAL 0 AND + ${hip_cu_srcs_len} EQUAL 0 AND ${miopen_hip_cc_srcs_len} EQUAL 0) + file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(${TARGET});\n") + set(pybind_flag 1) + endif() + + # pybind USE_OP_DEVICE_KERNEL for CUDNN + list(LENGTH cudnn_cu_cc_srcs cudnn_cu_cc_srcs_len) + if (WITH_GPU AND ${cudnn_cu_cc_srcs_len} GREATER 0) + file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, CUDNN);\n") + endif() + + # pybind USE_OP_DEVICE_KERNEL for MIOPEN + if (WITH_AMD_GPU AND ${miopen_hip_cc_srcs_len} GREATER 0) + file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, MIOPEN);\n") + endif() + + # pybind USE_OP_DEVICE_KERNEL for MKLDNN + if (WITH_MKLDNN AND ${mkldnn_cc_srcs_len} GREATER 0) + # Append first implemented MKLDNN activation operator + if (${MKLDNN_FILE} STREQUAL "activation_mkldnn_op") + file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(relu, MKLDNN);\n") + else() + file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, MKLDNN);\n") + endif() + endif() + + # pybind USE_OP + if (${pybind_flag} EQUAL 0) + # NOTE(*): activation use macro to regist the kernels, set use_op manually. + if(${TARGET} STREQUAL "activation") + file(APPEND ${pybind_file} "USE_OP(relu);\n") + elseif(${TARGET} STREQUAL "fake_dequantize") + file(APPEND ${pybind_file} "USE_OP(fake_dequantize_max_abs);\n") + elseif(${TARGET} STREQUAL "fake_quantize") + file(APPEND ${pybind_file} "USE_OP(fake_quantize_abs_max);\n") + elseif(${TARGET} STREQUAL "tensorrt_engine_op") + message(STATUS "Pybind skips [tensorrt_engine_op], for this OP is only used in inference") + elseif(${TARGET} STREQUAL "fc") + # HACK: fc only have mkldnn and cpu, which would mismatch the cpu only condition + file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(${TARGET});\n") + else() + file(APPEND ${pybind_file} "USE_OP(${TARGET});\n") + endif() + endif() +endfunction() + + +function(register_operators) + set(options "") + set(oneValueArgs "") + set(multiValueArgs EXCLUDES DEPS) + cmake_parse_arguments(register_operators "${options}" "${oneValueArgs}" + "${multiValueArgs}" ${ARGN}) + + file(GLOB OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*_op.cc") + string(REPLACE "_mkldnn" "" OPS "${OPS}") + string(REPLACE ".cc" "" OPS "${OPS}") + list(REMOVE_DUPLICATES OPS) + list(LENGTH register_operators_DEPS register_operators_DEPS_len) + + foreach(src ${OPS}) + list(FIND register_operators_EXCLUDES ${src} _index) + if (${_index} EQUAL -1) + if (${register_operators_DEPS_len} GREATER 0) + op_library(${src} DEPS ${register_operators_DEPS}) + else() + op_library(${src}) + endif() + endif() + endforeach() +endfunction() diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index 21e0cb3f91cc0ae05513c3bbd470650ca71194d7..2d2323edc3a6636bec72ea2ae7329ebd4e619348 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/init.h" diff --git a/paddle/fluid/framework/details/exception_holder.h b/paddle/fluid/framework/details/exception_holder.h index c97b364de1ecae21e97351196389615187932b5e..1b1afce04ebbf803f543f839eadc26c522cc89ef 100644 --- a/paddle/fluid/framework/details/exception_holder.h +++ b/paddle/fluid/framework/details/exception_holder.h @@ -30,6 +30,8 @@ class ExceptionHolder { Catch(exp); } catch (platform::EnforceNotMet exp) { Catch(exp); + } catch (std::exception& ex) { + LOG(FATAL) << "std::exception caught, " << ex.what(); } catch (...) { LOG(FATAL) << "Unknown exception caught"; } diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 0313a6a1e3d11b9c43714544db15b092bbc586b3..7ce08b728d9436c3b6e678faf328ddf1c45b7080 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -418,11 +418,6 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, DeleteUnusedTensors(*local_scope, op.get(), gc.get(), &(ctx->cur_ref_cnts_)); } - - if (FLAGS_benchmark) { - VLOG(20) << "Memory used after operator " + op->Type() + " running: " - << memory::memory_usage(place_); - } } if (gc != nullptr) { @@ -444,13 +439,6 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, scope->DropKids(); } } - - if (FLAGS_benchmark) { - VLOG(20) << "-------------------------------------------------------"; - VLOG(20) << "Memory used after deleting local scope: " - << memory::memory_usage(place_); - VLOG(20) << "-------------------------------------------------------"; - } } void Executor::RunPreparedContext( diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 8d0035ae98b093979eb8bbcc0a8d6ae5356d951f..5376fc163e259e5049955052baf02fd614aa511e 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -14,14 +14,15 @@ #include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" #include -#include +#include +#include +#include #include "paddle/fluid/framework/ir/graph_traits.h" namespace paddle { namespace framework { namespace ir { -namespace { // The function keeps the graph consistent by replacing // a node 'from' in the set of inputs nodes @@ -51,99 +52,382 @@ void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { } } } -} // namespace -using graph_ptr = std::unique_ptr; -graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { - FusePassBase::Init(name_scope_, graph.get()); +bool IsReachable(ir::Graph* graph, Node* from, Node* to) { + auto find_node = [](ir::Graph* graph, const Node* node) -> Node* { + for (auto n : graph->Nodes()) { + if (n == node) { + return n; + } + } - GraphPatternDetector gpd; - auto pattern = gpd.mutable_pattern(); + return nullptr; + }; - patterns::Conv conv_pattern{pattern, name_scope_}; - auto conv_output = conv_pattern(); + if (from == to) { + return true; + } - patterns::ElementwiseAdd elementwise_add_pattern{pattern, name_scope_}; - elementwise_add_pattern(conv_output); + std::map visited; - conv_output->AsIntermediate(); + for (auto& node : GraphTraits::DFS(*graph)) { + visited[&node] = false; + } - auto conv_op_has_bias = [](const Node& conv_op) -> std::pair { - auto bias_input_names = conv_op.Op()->Inputs(); - auto bias_it = bias_input_names.find("Bias"); - - if (bias_it != std::end(bias_input_names)) { - bool has_bias = !bias_it->second.empty(); - - if (has_bias) { - auto conv_bias_names = bias_it->second; - auto conv_bias_names_it = - std::find_if(std::begin(conv_op.inputs), std::end(conv_op.inputs), - [&conv_bias_names](Node* n) -> bool { - return n->Name() == conv_bias_names[0]; - }); - return std::make_pair(has_bias, *conv_bias_names_it); - } - } + visited[from] = true; - return std::make_pair(false, nullptr); - }; + std::list queue; + queue.push_back(from); - auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, - Graph* g) { - GET_IR_NODE_FROM_SUBGRAPH(conv_op, conv_op, conv_pattern); - GET_IR_NODE_FROM_SUBGRAPH(conv_input, conv_input, conv_pattern); - GET_IR_NODE_FROM_SUBGRAPH(conv_filter, conv_filter, conv_pattern); - GET_IR_NODE_FROM_SUBGRAPH(conv_output, conv_output, conv_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, - elementwise_add_pattern); - GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, - elementwise_add_pattern); + while (!queue.empty()) { + auto cur = find_node(graph, queue.front()); + queue.pop_front(); - if (FindFuseOption(*conv_op, *elementwise_add_op) != FUSE_MKLDNN) return; + if (!cur) return false; - OpDesc op_desc; - op_desc.SetType("conv2d"); + for (auto n : cur->outputs) { + if (n == to) { + return true; + } - op_desc.SetInput("Input", {conv_input->Name()}); - op_desc.SetInput("Filter", {conv_filter->Name()}); - op_desc.SetInput("ResidualData", {elementwise_add_x->Name()}); - op_desc.SetOutput("Output", {conv_output->Name()}); + if (!visited[n]) { + visited[n] = true; + queue.push_back(n); + } + } + } + return false; +} - bool has_bias; - Node* conv_bias; +boost::optional HasBias(const Node& op, const std::string& bias_name) { + auto bias_input_names = op.Op()->Inputs(); + auto bias_it = bias_input_names.find(bias_name); - std::tie(has_bias, conv_bias) = conv_op_has_bias(*conv_op); + if (bias_it != std::end(bias_input_names)) { + bool has_bias = !bias_it->second.empty(); if (has_bias) { - op_desc.SetInput("Bias", {conv_bias->Name()}); + auto bias_names = bias_it->second; + auto bias_names_it = + std::find_if(std::begin(op.inputs), std::end(op.inputs), + [&bias_names](Node* n) -> bool { + return n->Name() == bias_names[0]; + }); + return *bias_names_it; } + } - for (const auto& attr : conv_op->Op()->GetAttrMap()) { - op_desc.SetAttr(attr.first, attr.second); - } + return boost::none; +} - op_desc.SetAttr("fuse_residual_connection", true); +ResidualConnectionMKLDNNFusePass::IdentityFuseHandle::IdentityFuseHandle( + const ResidualConnectionMKLDNNFusePass::CanFuseFunc& can_fuse_func, + const ResidualConnectionMKLDNNFusePass::IdentityConvFunc& + get_node_from_conv_op, + const ResidualConnectionMKLDNNFusePass::IdentityElementwiseAddFunc& + get_node_from_elementwise_add_op) + : fusion_stats{std::make_shared(0)}, + can_fuse_func{can_fuse_func}, + get_node_from_conv_op{get_node_from_conv_op}, + get_node_from_elementwise_add_op{get_node_from_elementwise_add_op} {} + +void ResidualConnectionMKLDNNFusePass::IdentityFuseHandle::operator()( + const GraphPatternDetector::subgraph_t& subgraph, Graph* graph) { + Node* conv_op; + Node* conv_input; + Node* conv_filter; + Node* conv_output; + + Node* elementwise_add_op; + Node* elementwise_add_identity; + Node* elementwise_add_out; + + std::tie(conv_op, conv_input, conv_filter, conv_output) = + get_node_from_conv_op(subgraph); + std::tie(elementwise_add_op, elementwise_add_identity, elementwise_add_out) = + get_node_from_elementwise_add_op(subgraph); + + if (!can_fuse_func(conv_op, elementwise_add_op)) return; + + if (!IsReachable(graph, elementwise_add_identity, conv_output)) return; + + OpDesc op_desc; + op_desc.SetType("conv2d"); + + op_desc.SetInput("Input", {conv_input->Name()}); + op_desc.SetInput("Filter", {conv_filter->Name()}); + op_desc.SetInput("ResidualData", {elementwise_add_identity->Name()}); + op_desc.SetOutput("Output", {conv_output->Name()}); + + auto conv_bias = HasBias(*conv_op, "Bias"); + + if (conv_bias) { + op_desc.SetInput("Bias", {(*conv_bias)->Name()}); + } - auto fused_conv_op = g->CreateOpNode(&op_desc); + for (const auto& attr : conv_op->Op()->GetAttrMap()) { + op_desc.SetAttr(attr.first, attr.second); + } - IR_NODE_LINK_TO(conv_input, fused_conv_op); - IR_NODE_LINK_TO(conv_filter, fused_conv_op); - IR_NODE_LINK_TO(elementwise_add_x, fused_conv_op); - IR_NODE_LINK_TO(fused_conv_op, conv_output); + op_desc.SetAttr("fuse_residual_connection", true); - if (has_bias) { - IR_NODE_LINK_TO(conv_bias, fused_conv_op); - } + auto fused_conv_op = graph->CreateOpNode(&op_desc); - CorrectGraphEdges(g, elementwise_add_out, conv_output); - GraphSafeRemoveNodes(g, {elementwise_add_out, conv_op, elementwise_add_op}); - }; + IR_NODE_LINK_TO(conv_input, fused_conv_op); + IR_NODE_LINK_TO(conv_filter, fused_conv_op); + IR_NODE_LINK_TO(elementwise_add_identity, fused_conv_op); + IR_NODE_LINK_TO(fused_conv_op, conv_output); - gpd(graph.get(), handler); + if (conv_bias) { + IR_NODE_LINK_TO((*conv_bias), fused_conv_op); + } + CorrectGraphEdges(graph, elementwise_add_out, conv_output); + GraphSafeRemoveNodes(graph, + {elementwise_add_out, conv_op, elementwise_add_op}); + (*fusion_stats)++; +} + +ResidualConnectionMKLDNNFusePass::ProjectionFuseHandle::ProjectionFuseHandle( + const ResidualConnectionMKLDNNFusePass::CanFuseFunc& can_fuse_func, + const ResidualConnectionMKLDNNFusePass::ProjectionConvFunc& + get_node_from_conv_x_op, + const ResidualConnectionMKLDNNFusePass::ProjectionConvFunc& + get_node_from_conv_y_op, + const ResidualConnectionMKLDNNFusePass::ProjectionElementwiseAddFunc& + get_node_from_elementwise_add_op) + : fusion_stats{std::make_shared(0)}, + can_fuse_func{can_fuse_func}, + get_node_from_conv_x_op{get_node_from_conv_x_op}, + get_node_from_conv_y_op{get_node_from_conv_y_op}, + get_node_from_elementwise_add_op{get_node_from_elementwise_add_op} {} + +void ResidualConnectionMKLDNNFusePass::ProjectionFuseHandle::operator()( + const GraphPatternDetector::subgraph_t& subgraph, Graph* graph) { + Node* conv_x_op; + Node* conv_x_input; + Node* conv_x_filter; + Node* conv_x_output; + + Node* conv_y_op; + Node* conv_y_input; + Node* conv_y_filter; + Node* conv_y_output; + + Node* elementwise_add_op; + Node* elementwise_add_out; + + std::tie(conv_x_op, conv_x_input, conv_x_filter, conv_x_output) = + get_node_from_conv_x_op(subgraph); + std::tie(conv_y_op, conv_y_input, conv_y_filter, conv_y_output) = + get_node_from_conv_y_op(subgraph); + std::tie(elementwise_add_op, elementwise_add_out) = + get_node_from_elementwise_add_op(subgraph); + + if (!can_fuse_func(conv_x_op, elementwise_add_op)) return; + if (!can_fuse_func(conv_y_op, elementwise_add_op)) return; + + Node* projection_node; + Node* residual_conv_op; + Node* residual_conv_input; + Node* residual_conv_filter; + Node* residual_conv_output; + + if (IsReachable(graph, conv_x_input, conv_y_output)) { + projection_node = conv_x_output; + residual_conv_op = conv_y_op; + residual_conv_input = conv_y_input; + residual_conv_filter = conv_y_filter; + residual_conv_output = conv_y_output; + } else if (IsReachable(graph, conv_y_input, conv_x_output)) { + projection_node = conv_y_output; + residual_conv_op = conv_x_op; + residual_conv_input = conv_x_input; + residual_conv_filter = conv_x_filter; + residual_conv_output = conv_x_output; + } else { + return; + } + + OpDesc op_desc; + op_desc.SetType("conv2d"); + + op_desc.SetInput("Input", {residual_conv_input->Name()}); + op_desc.SetInput("Filter", {residual_conv_filter->Name()}); + op_desc.SetInput("ResidualData", {projection_node->Name()}); + op_desc.SetOutput("Output", {residual_conv_output->Name()}); + + auto residual_conv_bias = HasBias(*residual_conv_op, "Bias"); + + if (residual_conv_bias) { + op_desc.SetInput("Bias", {(*residual_conv_bias)->Name()}); + } + + for (const auto& attr : residual_conv_op->Op()->GetAttrMap()) { + op_desc.SetAttr(attr.first, attr.second); + } + + op_desc.SetAttr("fuse_residual_connection", true); + + auto fused_conv_op = graph->CreateOpNode(&op_desc); + + IR_NODE_LINK_TO(residual_conv_input, fused_conv_op); + IR_NODE_LINK_TO(residual_conv_filter, fused_conv_op); + IR_NODE_LINK_TO(projection_node, fused_conv_op); + IR_NODE_LINK_TO(fused_conv_op, residual_conv_output); + + if (residual_conv_bias) { + IR_NODE_LINK_TO((*residual_conv_bias), fused_conv_op); + } + + CorrectGraphEdges(graph, elementwise_add_out, residual_conv_output); + GraphSafeRemoveNodes( + graph, {elementwise_add_out, residual_conv_op, elementwise_add_op}); + (*fusion_stats)++; +} + +std::tuple +ResidualConnectionMKLDNNFusePass::GetNodesFromConv( + const patterns::Conv& conv_pattern, + const GraphPatternDetector::subgraph_t& subgraph) const { + GET_IR_NODE_FROM_SUBGRAPH(conv_op, conv_op, conv_pattern); + GET_IR_NODE_FROM_SUBGRAPH(conv_input, conv_input, conv_pattern); + GET_IR_NODE_FROM_SUBGRAPH(conv_filter, conv_filter, conv_pattern); + GET_IR_NODE_FROM_SUBGRAPH(conv_output, conv_output, conv_pattern); + + return std::make_tuple(conv_op, conv_input, conv_filter, conv_output); +} + +GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsX( + const std::string& name_scope, + const GraphWithStats& graph_with_stats) const { + ir::Graph* graph; + int stats; + + std::tie(graph, stats) = graph_with_stats; + + GraphPatternDetector gpd; + auto pattern = gpd.mutable_pattern(); + + patterns::Conv conv_pattern{pattern, name_scope}; + auto conv_output = conv_pattern(); + + patterns::ElementwiseAdd elementwise_add_pattern{pattern, name_scope}; + elementwise_add_pattern( + conv_output, + pattern->NewNode(elementwise_add_pattern.elementwise_add_y_repr())); + conv_output->AsIntermediate(); + + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) + -> std::tuple { + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_y, elementwise_add_y, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_y, + elementwise_add_out); + }; + + return ExecuteHandleOnGraph( + &gpd, graph_with_stats, + [this, &conv_pattern](const GraphPatternDetector::subgraph_t& subgraph) { + return GetNodesFromConv(conv_pattern, subgraph); + }, + get_node_from_elementwise_add); +} + +GraphWithStats ResidualConnectionMKLDNNFusePass::FuseConvAsY( + const std::string& name_scope, + const GraphWithStats& graph_with_stats) const { + GraphPatternDetector gpd; + auto pattern = gpd.mutable_pattern(); + + patterns::Conv conv_pattern{pattern, name_scope}; + auto conv_output = conv_pattern(); + + patterns::ElementwiseAdd elementwise_add_pattern{pattern, name_scope}; + elementwise_add_pattern( + pattern->NewNode(elementwise_add_pattern.elementwise_add_x_repr()), + conv_output); + conv_output->AsIntermediate(); + + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) + -> std::tuple { + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_x, + elementwise_add_out); + }; + + return ExecuteHandleOnGraph( + &gpd, graph_with_stats, + [this, &conv_pattern](const GraphPatternDetector::subgraph_t& subgraph) { + return GetNodesFromConv(conv_pattern, subgraph); + }, + get_node_from_elementwise_add); +} + +GraphWithStats ResidualConnectionMKLDNNFusePass::FuseProjectionConv( + const std::string& name_scope, + const GraphWithStats& graph_with_stats) const { + GraphPatternDetector gpd; + auto pattern = gpd.mutable_pattern(); + + patterns::Conv conv_x_pattern{pattern, name_scope}; + auto conv_x_output = conv_x_pattern(); + + patterns::Conv conv_y_pattern{pattern, name_scope}; + auto conv_y_output = conv_y_pattern(); + + patterns::ElementwiseAdd elementwise_add_pattern{pattern, name_scope}; + elementwise_add_pattern(conv_x_output, conv_y_output); + conv_x_output->AsIntermediate(); + conv_y_output->AsIntermediate(); + + auto get_node_from_elementwise_add = [&elementwise_add_pattern]( + const GraphPatternDetector::subgraph_t& subgraph) + -> std::tuple { + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + + return std::make_tuple(elementwise_add_op, elementwise_add_out); + }; + + return ExecuteHandleOnGraph( + &gpd, graph_with_stats, + [this, + &conv_x_pattern](const GraphPatternDetector::subgraph_t& subgraph) { + return GetNodesFromConv(conv_x_pattern, subgraph); + }, + [this, + &conv_y_pattern](const GraphPatternDetector::subgraph_t& subgraph) { + return GetNodesFromConv(conv_y_pattern, subgraph); + }, + get_node_from_elementwise_add); +} + +graph_ptr ResidualConnectionMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { + FusePassBase::Init(name_scope_, graph.get()); + auto fused_graph_with_stats = FuseConvAsY( + name_scope_, + FuseConvAsX( + name_scope_, + FuseProjectionConv(name_scope_, std::make_pair(graph.get(), 0)))); + + std::cout << "Fused graph " << fused_graph_with_stats.second << std::endl; + AddStatis(fused_graph_with_stats.second); return graph; } } // namespace ir @@ -151,4 +435,4 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { } // namespace paddle REGISTER_PASS(conv_elementwise_add_mkldnn_fuse_pass, - paddle::framework::ir::ConvElementwiseAddMKLDNNFusePass); + paddle::framework::ir::ResidualConnectionMKLDNNFusePass); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h index f4a899f1adb5e993895a40a8cfb846a67b41bb22..6629dae425ae85446fe2f6c8c172ca53f5ae8bea 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h @@ -15,24 +15,119 @@ #pragma once #include +#include +#include #include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" +#include + namespace paddle { namespace framework { namespace ir { -class ConvElementwiseAddMKLDNNFusePass : public FusePassBase { +using graph_ptr = std::unique_ptr; +using GraphWithStats = std::pair; + +void CorrectGraphEdges(Graph* graph, Node* from, Node* to); +bool IsReachable(ir::Graph* graph, Node* from, Node* to); +boost::optional HasBias(const Node& op, const std::string& bias_name); + +class ResidualConnectionMKLDNNFusePass : public FusePassBase { + private: + GraphWithStats FuseConvAsX(const std::string& name_scope, + const GraphWithStats& graph_with_stats) const; + GraphWithStats FuseConvAsY(const std::string& name_scope, + const GraphWithStats& graph_with_stats) const; + GraphWithStats FuseProjectionConv( + const std::string& name_scope, + const GraphWithStats& graph_with_stats) const; + + template + using GetNodeFunc = + std::function; + using IdentityConvFunc = GetNodeFunc>; + using IdentityElementwiseAddFunc = + GetNodeFunc>; + + using ProjectionConvFunc = IdentityConvFunc; + using ProjectionElementwiseAddFunc = GetNodeFunc>; + + using CanFuseFunc = std::function; + + std::tuple GetNodesFromConv( + const patterns::Conv& conv_pattern, + const GraphPatternDetector::subgraph_t& subgraph) const; + + std::tuple GetNodesFromProjectionConv( + const patterns::Conv& conv_pattern, + const GraphPatternDetector::subgraph_t& subgraph) const; + + template + GraphWithStats ExecuteHandleOnGraph(GraphPatternDetector* gpd, + const GraphWithStats& graph_with_stats, + OpFuncs&&... op_funcs) const { + ir::Graph* graph; + int stats; + + std::tie(graph, stats) = graph_with_stats; + + auto can_fuse = [this](Node* op1, Node* op2) -> bool { + return this->FindFuseOption(*op1, *op2) == FUSE_MKLDNN; + }; + + auto fuse_handle = HandleType{can_fuse, std::forward(op_funcs)...}; + + (*gpd)(graph, fuse_handle); + + return std::make_pair(graph, stats + fuse_handle.get_stats()); + } + + struct IdentityFuseHandle { + IdentityFuseHandle( + const CanFuseFunc& can_fuse_func, + const IdentityConvFunc& get_node_from_conv_op, + const IdentityElementwiseAddFunc& get_node_from_elementwise_add_op); + + void operator()(const GraphPatternDetector::subgraph_t& subgraph, + Graph* graph); + int get_stats() const { return *fusion_stats; } + + private: + std::shared_ptr fusion_stats; + CanFuseFunc can_fuse_func; + IdentityConvFunc get_node_from_conv_op; + IdentityElementwiseAddFunc get_node_from_elementwise_add_op; + }; + + struct ProjectionFuseHandle { + ProjectionFuseHandle( + const CanFuseFunc& can_fuse_func, + const ProjectionConvFunc& get_node_from_conv_x_op, + const ProjectionConvFunc& get_node_from_conv_y_op, + const ProjectionElementwiseAddFunc& get_node_from_elementwise_add_op); + + void operator()(const GraphPatternDetector::subgraph_t& subgraph, + Graph* graph); + int get_stats() const { return *fusion_stats; } + + private: + std::shared_ptr fusion_stats; + CanFuseFunc can_fuse_func; + ProjectionConvFunc get_node_from_conv_x_op; + ProjectionConvFunc get_node_from_conv_y_op; + ProjectionElementwiseAddFunc get_node_from_elementwise_add_op; + }; + public: - virtual ~ConvElementwiseAddMKLDNNFusePass() {} + virtual ~ResidualConnectionMKLDNNFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl(graph_ptr graph) const; - const std::string name_scope_{"residual_connections_fuse_pass"}; + const std::string name_scope_{"residual_connection_fuse_pass"}; }; - } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index 348a3dfc5da78e860742595a60a0b7a8b2d92243..61ba097fd8cb55e25bda1947ea97d53308c55bd3 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -40,7 +40,7 @@ void SetOp(ProgramDesc* prog, const std::string& type, op->SetOutput(output.first, {output.second}); } -struct IsReachable { +struct TestIsReachable { using func = std::function; auto operator()(const std::unique_ptr& graph) -> func { @@ -89,7 +89,9 @@ struct IsReachable { } }; -void AssertOpsCount(const std::unique_ptr& graph) { +void AssertOpsCount(const std::unique_ptr& graph, + int expected_conv_count, + int expected_elementwise_add_count = 0) { int conv_count = 0; int elementwise_add_count = 0; @@ -101,8 +103,8 @@ void AssertOpsCount(const std::unique_ptr& graph) { ++elementwise_add_count; } } - EXPECT_EQ(conv_count, 1); - EXPECT_EQ(elementwise_add_count, 0); + EXPECT_EQ(conv_count, expected_conv_count); + EXPECT_EQ(elementwise_add_count, expected_elementwise_add_count); } ProgramDesc BuildProgramDesc(const std::vector& transient_vars, @@ -127,22 +129,13 @@ ProgramDesc BuildProgramDesc(const std::vector& transient_vars, return prog; } -} // namespace - -TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { - auto prog = - BuildProgramDesc({"a", "b", "c", "d", "e", "f"}, {"bias", "weights"}); - - SetOp(&prog, "conv2d", - {{"Input", "a"}, {"Bias", "bias"}, {"Filter", "weights"}}, - {"Output", "b"}); - SetOp(&prog, "elementwise_add", {{"X", "b"}, {"Y", "c"}}, {"Out", "d"}); - SetOp(&prog, "relu", {{"X", "d"}}, {"Out", "e"}); - std::unique_ptr graph(new ir::Graph(prog)); +void RunPassAndAssert(ProgramDesc* prog, const std::string& from, + const std::string& to, int expected_conv_num) { + std::unique_ptr graph(new ir::Graph(*prog)); - IsReachable is_reachable; - EXPECT_TRUE(is_reachable(graph)("a", "relu")); + TestIsReachable is_reachable; + EXPECT_TRUE(is_reachable(graph)(from, to)); auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); @@ -150,82 +143,87 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { graph = pass->Apply(std::move(graph)); int current_nodes_num = graph->Nodes().size(); - EXPECT_TRUE(is_reachable(graph)("a", "relu")); + EXPECT_TRUE(is_reachable(graph)(from, to)); EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, current_nodes_num); - AssertOpsCount(graph); + AssertOpsCount(graph, expected_conv_num); } +} // namespace -TEST(ConvElementwiseAddMKLDNNFusePass, - ConvolutionWithElementwiseAddReluNoBias) { - auto prog = BuildProgramDesc({"a", "b", "c", "d", "e"}, {"weights"}); - SetOp(&prog, "conv2d", {{"Input", "a"}, {"Filter", "weights"}}, - {"Output", "b"}); - SetOp(&prog, "elementwise_add", {{"X", "b"}, {"Y", "c"}}, {"Out", "d"}); - SetOp(&prog, "relu", {{"X", "d"}}, {"Out", "e"}); - - std::unique_ptr graph(new ir::Graph(prog)); +TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionAsYWithElementwiseAddRelu) { + auto prog = BuildProgramDesc({"a", "b", "c", "d", "e"}, {"bias", "weights"}); - IsReachable is_reachable; + SetOp(&prog, "sigmoid", {{"X", "a"}}, {"Out", "b"}); + SetOp(&prog, "conv2d", + {{"Input", "b"}, {"Bias", "bias"}, {"Filter", "weights"}}, + {"Output", "c"}); - EXPECT_TRUE(is_reachable(graph)("a", "relu")); + SetOp(&prog, "elementwise_add", {{"X", "a"}, {"Y", "c"}}, {"Out", "d"}); + SetOp(&prog, "relu", {{"X", "d"}}, {"Out", "e"}); - auto pass = - PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); - int original_nodes_num = graph->Nodes().size(); - graph = pass->Apply(std::move(graph)); - int current_nodes_num = graph->Nodes().size(); + RunPassAndAssert(&prog, "a", "relu", 1); +} - EXPECT_TRUE(is_reachable(graph)("a", "relu")); +TEST(ConvElementwiseAddMKLDNNFusePass, + ConvolutionAsYWithElementwiseAddReluNoBias) { + auto prog = BuildProgramDesc({"a", "b", "c", "d", "e"}, {"weights"}); - EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, - current_nodes_num); + SetOp(&prog, "sigmoid", {{"X", "a"}}, {"Out", "b"}); + SetOp(&prog, "conv2d", {{"Input", "b"}, {"Filter", "weights"}}, + {"Output", "c"}); + SetOp(&prog, "elementwise_add", {{"X", "a"}, {"Y", "c"}}, {"Out", "d"}); + SetOp(&prog, "relu", {{"X", "d"}}, {"Out", "e"}); - AssertOpsCount(graph); + RunPassAndAssert(&prog, "a", "relu", 1); } -TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { - auto prog = BuildProgramDesc({"a", "b", "c", "d"}, {"bias", "weights"}); +TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionAsXWithElementwiseAddRelu) { + auto prog = BuildProgramDesc({"a", "b", "c", "d", "e"}, {"bias", "weights"}); + + SetOp(&prog, "sigmoid", {{"X", "a"}}, {"Out", "b"}); SetOp(&prog, "conv2d", - {{"Input", "a"}, {"Bias", "bias"}, {"Filter", "weights"}}, - {"Output", "b"}); - SetOp(&prog, "elementwise_add", {{"X", "b"}, {"Y", "c"}}, {"Out", "d"}); + {{"Input", "b"}, {"Bias", "bias"}, {"Filter", "weights"}}, + {"Output", "c"}); - std::unique_ptr graph(new ir::Graph(prog)); + SetOp(&prog, "elementwise_add", {{"X", "c"}, {"Y", "a"}}, {"Out", "d"}); + SetOp(&prog, "relu", {{"X", "d"}}, {"Out", "e"}); - IsReachable is_reachable; - EXPECT_TRUE(is_reachable(graph)("a", "d")); + RunPassAndAssert(&prog, "a", "relu", 1); +} - auto pass = - PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); - int original_nodes_num = graph->Nodes().size(); - graph = pass->Apply(std::move(graph)); - int current_nodes_num = graph->Nodes().size(); +TEST(ConvElementwiseAddMKLDNNFusePass, + ConvolutionAsXWithElementwiseAddReluNoBias) { + auto prog = BuildProgramDesc({"a", "b", "c", "d", "e"}, {"weights"}); - EXPECT_FALSE(is_reachable(graph)("a", "d")); + SetOp(&prog, "sigmoid", {{"X", "a"}}, {"Out", "b"}); + SetOp(&prog, "conv2d", {{"Input", "b"}, {"Filter", "weights"}}, + {"Output", "c"}); + SetOp(&prog, "elementwise_add", {{"X", "c"}, {"Y", "a"}}, {"Out", "d"}); + SetOp(&prog, "relu", {{"X", "d"}}, {"Out", "e"}); - EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, - current_nodes_num); - AssertOpsCount(graph); + RunPassAndAssert(&prog, "a", "relu", 1); } -TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { +TEST(ConvElementwiseAddMKLDNNFusePass, NoFusion) { auto prog = - BuildProgramDesc({"a", "b", "c", "d", "e", "f"}, {"bias", "weights"}); + BuildProgramDesc({"a", "b", "c", "d", "e", "f", "g"}, {"weights"}); + SetOp(&prog, "sigmoid", {{"X", "a"}}, {"Out", "b"}); - SetOp(&prog, "conv2d", - {{"Input", "b"}, {"Bias", "bias"}, {"Filter", "weights"}}, + SetOp(&prog, "conv2d", {{"Input", "b"}, {"Filter", "weights"}}, {"Output", "c"}); - SetOp(&prog, "elementwise_add", {{"X", "c"}, {"Y", "d"}}, {"Out", "e"}); - SetOp(&prog, "relu", {{"X", "e"}}, {"Out", "f"}); - std::unique_ptr graph(new ir::Graph(prog)); + SetOp(&prog, "conv2d", {{"Input", "d"}, {"Filter", "weights"}}, + {"Output", "e"}); - IsReachable is_reachable; + SetOp(&prog, "elementwise_add", {{"X", "c"}, {"Y", "e"}}, {"Out", "f"}); + SetOp(&prog, "relu", {{"X", "f"}}, {"Out", "g"}); - EXPECT_TRUE(is_reachable(graph)("a", "f")); + std::unique_ptr graph(new ir::Graph(prog)); + + TestIsReachable is_reachable; + EXPECT_TRUE(is_reachable(graph)("a", "g")); auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); @@ -233,11 +231,10 @@ TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { graph = pass->Apply(std::move(graph)); int current_nodes_num = graph->Nodes().size(); - EXPECT_TRUE(is_reachable(graph)("a", "f")); + EXPECT_TRUE(is_reachable(graph)("a", "g")); + EXPECT_EQ(original_nodes_num, current_nodes_num); - EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, - current_nodes_num); - AssertOpsCount(graph); + AssertOpsCount(graph, 2, 1); } } // namespace ir diff --git a/paddle/fluid/framework/ir/graph_helper.cc b/paddle/fluid/framework/ir/graph_helper.cc index 98112c1ed317c230cb5150e7cbc6d0d173256601..963179192fa6cc959db66f76e0f48393143be0da 100644 --- a/paddle/fluid/framework/ir/graph_helper.cc +++ b/paddle/fluid/framework/ir/graph_helper.cc @@ -15,8 +15,15 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/graph_helper.h" #include #include +#include +#include +#include #include +DEFINE_string(print_sub_graph_dir, "", + "FLAGS_print_sub_graph_dir is used " + "to print the nodes of sub_graphs."); + namespace paddle { namespace framework { namespace ir { @@ -164,12 +171,15 @@ size_t GraphNum(const Graph &graph) { graph_nodes.emplace_back(g_nodes); } - if (VLOG_IS_ON(100)) { - VLOG(100) << "graph_num: " << graph_nodes.size(); - for (auto &g_n : graph_nodes) { - VLOG(100) << "graph_nodes: " << g_n.size(); - if (g_n.size() < 10) { - std::stringstream out; + if (FLAGS_print_sub_graph_dir.size()) { + if (graph_nodes.size() > 1) { + std::stringstream out; + for (auto &g_n : graph_nodes) { + out << "graph_nodes: " << g_n.size() << "\n"; + } + out << "\n\n"; + for (auto &g_n : graph_nodes) { + out << "graph_nodes: " << g_n.size(); for (auto &node : g_n) { out << "\nNode: " << node->Name() << " in ["; for (auto &n : node->inputs) { @@ -181,8 +191,12 @@ size_t GraphNum(const Graph &graph) { } out << "]"; } - VLOG(100) << out.str(); + out << "\n\n\n"; } + std::unique_ptr fout( + new std::ofstream(FLAGS_print_sub_graph_dir)); + PADDLE_ENFORCE(fout->good()); + *fout << out.str(); } } diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index b534a5509279ef7bfc5fc92ec726224e6c5ed16f..f1f971656ae6ab6bbf66c4a75dd7cf68b5848b7b 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1084,16 +1084,12 @@ PDNode *patterns::Conv::operator()() { return output_var; } -PDNode *patterns::ElementwiseAdd::operator()(PDNode *x_var) { +PDNode *patterns::ElementwiseAdd::operator()(PDNode *x_var, PDNode *y_var) { auto elementwise_add_op = pattern->NewNode(elementwise_add_op_repr()) ->assert_is_op("elementwise_add"); - x_var->assert_is_op_input("elementwise_add", "X"); - - auto y_var = pattern->NewNode(elementwise_add_x_repr()) - ->AsInput() - ->assert_is_op_input("elementwise_add", "Y"); - + x_var->AsInput()->assert_is_op_input("elementwise_add", "X"); + y_var->AsInput()->assert_is_op_input("elementwise_add", "Y"); auto out_var = pattern->NewNode(elementwise_add_out_repr()) ->AsOutput() ->assert_is_op_output("elementwise_add", "Out"); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index 1c5155df7867f95fb403d51bf633084a6c400f12..c12b9503fd817757ec8d1e988be3e449fc63c6ff 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -664,7 +664,7 @@ struct ElementwiseAdd : public PatternBase { ElementwiseAdd(PDPattern* pattern, const std::string& name_scope) : PatternBase(pattern, name_scope, "elementwise_add") {} - PDNode* operator()(PDNode* x_var); + PDNode* operator()(PDNode* x_var, PDNode* y_var); PATTERN_DECL_NODE(elementwise_add_op); PATTERN_DECL_NODE(elementwise_add_x); diff --git a/paddle/fluid/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h index e9b473d547252e80ed26ec61e1a33fbe1742dbe0..fb6e781fd07b9033bea547118b8338ad8b705c5e 100644 --- a/paddle/fluid/framework/lod_tensor.h +++ b/paddle/fluid/framework/lod_tensor.h @@ -111,9 +111,6 @@ class LoDTensor : public Tensor { public: LoDTensor() : Tensor() {} - /* Constructor with place should only be used in pybind */ - explicit LoDTensor(const platform::Place& place) : Tensor(place) {} - explicit LoDTensor(const LoD& lod) : lod_(lod) {} void set_lod(const LoD& lod) { lod_ = lod; } diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index e1aac6dc5a92fb616f00de5806f044b83c2f503f..6940250c3f9663bbb734d5a6eb78135aecbc3a3b 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -23,6 +23,7 @@ #include "paddle/fluid/framework/details/cow_ptr.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/memory/memcpy.h" #include "glog/logging.h" @@ -31,46 +32,6 @@ namespace paddle { namespace framework { #if defined(PADDLE_WITH_CUDA) -namespace details { -struct CUDABuffer { - void *data_{nullptr}; - size_t size_{0}; - platform::CUDAPlace place_; - - CUDABuffer() {} - CUDABuffer(platform::Place place, size_t size) - : size_(size), place_(boost::get(place)) { - data_ = memory::Alloc(place_, size); - } - - ~CUDABuffer() { ClearMemory(); } - - CUDABuffer(const CUDABuffer &o) = delete; - CUDABuffer &operator=(const CUDABuffer &o) = delete; - - void Resize(platform::Place place, size_t size) { - ClearMemory(); - place_ = boost::get(place); - data_ = memory::Alloc(place_, size); - PADDLE_ENFORCE_NOT_NULL(data_); - size_ = size; - } - - void Swap(CUDABuffer &o) { - std::swap(data_, o.data_); - std::swap(place_, o.place_); - std::swap(size_, o.size_); - } - - private: - void ClearMemory() const { - if (data_ != nullptr) { - memory::Free(place_, data_); - } - } -}; -} // namespace details - // Vector implements the std::vector interface, and can get Data or // MutableData from any place. The data will be synced implicitly inside. template @@ -103,8 +64,6 @@ class Vector { o.ImmutableCPU(); cpu_ = o.cpu_; flag_ = kDataInCPU; - details::CUDABuffer null; - gpu_.Swap(null); return *this; } @@ -199,7 +158,7 @@ class Vector { PADDLE_ENFORCE(platform::is_gpu_place(place), "CUDA Data must on CUDA place"); ImmutableCUDA(place); - return reinterpret_cast(gpu_.data_); + return reinterpret_cast(gpu_->ptr()); } // get cuda ptr. mutable @@ -234,13 +193,11 @@ class Vector { std::mutex &Mutex() const { return mtx_; } - std::unique_ptr CUDAPlace() const { - if (gpu_.data_ == nullptr) { - return nullptr; - } else { - return std::unique_ptr( - new platform::CUDAPlace(gpu_.place_)); - } + boost::optional CUDAPlace() const { + return gpu_ == nullptr + ? boost::none + : boost::optional( + boost::get(gpu_->place())); } private: @@ -254,13 +211,12 @@ class Vector { void CopyToCPU() const { // COPY GPU Data To CPU auto *dev_ctx = static_cast( - platform::DeviceContextPool::Instance().Get( - platform::Place(gpu_.place_))); + platform::DeviceContextPool::Instance().Get(gpu_->place())); auto stream = dev_ctx->stream(); - void *src = gpu_.data_; + void *src = gpu_->ptr(); void *dst = cpu_.data(); - memory::Copy(platform::CPUPlace(), dst, gpu_.place_, src, gpu_.size_, - stream); + memory::Copy(platform::CPUPlace(), dst, CUDAPlace().get(), src, + gpu_->size(), stream); dev_ctx->Wait(); } @@ -277,8 +233,7 @@ class Vector { CopyCPUDataToCUDA(place); UnsetFlag(kDirty); SetFlag(kDataInCUDA); - } else if (IsInCUDA() && - !(boost::get(place) == gpu_.place_)) { + } else if (IsInCUDA() && !(place == gpu_->place())) { PADDLE_THROW("This situation should not happen"); // Still dirty } else { @@ -290,7 +245,7 @@ class Vector { // Even data is not dirty. However, data is not in CUDA. Copy data. CopyCPUDataToCUDA(place); SetFlag(kDataInCUDA); - } else if (!(boost::get(place) == gpu_.place_)) { + } else if (!(place == gpu_->place())) { PADDLE_THROW("This situation should not happen."); } else { // Not Dirty && DataInCUDA && Device is same @@ -301,13 +256,13 @@ class Vector { void CopyCPUDataToCUDA(const platform::Place &place) const { void *src = cpu_.data(); - gpu_.Resize(place, cpu_.size() * sizeof(T)); - void *dst = gpu_.data_; + gpu_ = memory::Alloc(place, cpu_.size() * sizeof(T)); + void *dst = gpu_->ptr(); auto *dev_ctx = static_cast( platform::DeviceContextPool::Instance().Get(place)); auto stream = dev_ctx->stream(); - memory::Copy(gpu_.place_, dst, platform::CPUPlace(), src, gpu_.size_, - stream); + memory::Copy(CUDAPlace().get(), dst, platform::CPUPlace(), src, + gpu_->size(), stream); } void ImmutableCPU() const { @@ -329,7 +284,7 @@ class Vector { bool IsInCPU() const { return flag_ & kDataInCPU; } mutable std::vector cpu_; - mutable details::CUDABuffer gpu_; + mutable memory::AllocationPtr gpu_; mutable int flag_; mutable std::mutex mtx_; @@ -428,8 +383,8 @@ class Vector { auto &mtx = m_.Data().Mutex(); std::lock_guard guard(mtx); auto cuda_place = m_.Data().CUDAPlace(); - if (cuda_place == nullptr || - *cuda_place == boost::get(place)) { + if (cuda_place == boost::none || + cuda_place == boost::get(place)) { return m_.Data().CUDAData(place); } } @@ -444,8 +399,8 @@ class Vector { auto &mtx = m_.Data().Mutex(); std::lock_guard guard(mtx); auto cuda_place = m_.Data().CUDAPlace(); - if (cuda_place == nullptr || - *cuda_place == boost::get(place)) { + if (cuda_place == boost::none || + cuda_place == boost::get(place)) { return m_.MutableData()->CUDAMutableData(place); } } diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 39b47415ff7e378cabc79e668fe2be63eb71d87f..2c6e337568306502fbaa362015e51f81efc0a5ff 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -171,8 +171,17 @@ ParallelExecutor::ParallelExecutor( } // If the loss_var_name is given, the number of graph should be only one. if (loss_var_name.size()) { - PADDLE_ENFORCE_EQ(ir::GraphNum(*graph), 1, - "The number of graph should be only one"); + size_t graph_num = ir::GraphNum(*graph); + if (graph_num > 1) { + LOG(WARNING) + << "The number of graph should be only one, " + "but the current graph has " + << ir::GraphNum(*graph) + << " sub_graphs. If you want to see the nodes of the " + "sub_graphs, you should use 'FLAGS_print_sub_graph_dir' " + "to specify the output dir. NOTES: if you not do training, " + "please don't pass loss_var_name."; + } } if (exec_strategy.type_ == ExecutionStrategy::kDefault) { diff --git a/paddle/fluid/framework/tensor.cc b/paddle/fluid/framework/tensor.cc index b6ba0df033af12d48e88eb57a3b97b559077250d..41566800e5781d576120ccf5dfbb3024bf4bea24 100644 --- a/paddle/fluid/framework/tensor.cc +++ b/paddle/fluid/framework/tensor.cc @@ -32,10 +32,9 @@ size_t Tensor::memory_size() const { } void* Tensor::mutable_data(platform::Place place, std::type_index type, + memory::Allocator::Attr attr, size_t requested_size) { - if (holder_ != nullptr) { - holder_->set_type(type); - } + type_ = type; PADDLE_ENFORCE_GE(numel(), 0, "When calling this method, the Tensor's numel must be " "equal or larger than zero. " @@ -48,35 +47,18 @@ void* Tensor::mutable_data(platform::Place place, std::type_index type, /* some versions of boost::variant don't have operator!= */ if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + offset_) { - if (platform::is_cpu_place(place)) { - holder_.reset(new PlaceholderImpl( - boost::get(place), size, type)); - } else if (platform::is_gpu_place(place) || - platform::is_cuda_pinned_place(place)) { -#ifndef PADDLE_WITH_CUDA - PADDLE_THROW( - "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode."); - } -#else - if (platform::is_gpu_place(place)) { - holder_.reset(new PlaceholderImpl( - boost::get(place), size, type)); - } else if (platform::is_cuda_pinned_place(place)) { - holder_.reset(new PlaceholderImpl( - boost::get(place), size, type)); - } - } -#endif + holder_ = memory::AllocShared(place, size, attr); offset_ = 0; } return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } -void* Tensor::mutable_data(platform::Place place, size_t requested_size) { +void* Tensor::mutable_data(platform::Place place, memory::Allocator::Attr attr, + size_t requested_size) { PADDLE_ENFORCE(this->holder_ != nullptr, "Cannot invoke mutable data if current hold nothing."); - return mutable_data(place, holder_->type(), requested_size); + return mutable_data(place, type_, attr, requested_size); } Tensor& Tensor::ShareDataWith(const Tensor& src) { @@ -101,6 +83,7 @@ Tensor Tensor::Slice(int begin_idx, int end_idx) const { Tensor dst; dst.holder_ = holder_; dst.set_layout(layout_); + dst.type_ = type_; DDim dst_dims = dims_; dst_dims[0] = end_idx - begin_idx; dst.Resize(dst_dims); diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index f1d268548578fea12082e2edb213a3749eccbfaf..71e8badd4b6b08e7d380fd45d93a33176172081d 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -67,12 +67,7 @@ class Tensor { friend struct EigenVector; public: - Tensor() : offset_(0) {} - - /*! Constructor with place should only be used in pybind. */ - explicit Tensor(const platform::Place& place) : offset_(0) { - holder_->set_place(place); - } + Tensor() : type_(typeid(float)), offset_(0) {} /*! Return a pointer to mutable memory block. */ template @@ -89,12 +84,17 @@ class Tensor { * @note If not exist, then allocation. */ template - T* mutable_data(platform::Place place, size_t requested_size = 0); + T* mutable_data(platform::Place place, + memory::Allocator::Attr attr = memory::Allocator::kDefault, + size_t requested_size = 0); void* mutable_data(platform::Place place, std::type_index type, + memory::Allocator::Attr attr = memory::Allocator::kDefault, size_t requested_size = 0); - void* mutable_data(platform::Place place, size_t requested_size = 0); + void* mutable_data(platform::Place place, + memory::Allocator::Attr attr = memory::Allocator::kDefault, + size_t requested_size = 0); /** * @brief Return a pointer to mutable memory block. @@ -106,7 +106,9 @@ class Tensor { * @note If not exist, then allocation. */ template - T* mutable_data(DDim dims, platform::Place place, size_t requested_size = 0); + T* mutable_data(DDim dims, platform::Place place, + memory::Allocator::Attr attr = memory::Allocator::kDefault, + size_t requested_size = 0); /*! Return the dimensions of the memory block. */ const DDim& dims() const; @@ -139,7 +141,7 @@ class Tensor { std::type_index type() const { PADDLE_ENFORCE_NOT_NULL( holder_, "Tensor not initialized yet when Tensor::type() is called."); - return holder_->type(); + return type_; } // memory size returns the holding memory size in byte. @@ -153,56 +155,13 @@ class Tensor { void clear() { holder_ = nullptr; } - private: - /** - * @note Placeholder hides type T, so it doesn't appear as a template - * parameter of Variable. - */ - struct Placeholder { - virtual ~Placeholder() = default; - virtual void* ptr() const = 0; - virtual size_t size() const = 0; - virtual std::type_index type() const = 0; - virtual platform::Place place() const = 0; - virtual void set_type(std::type_index type) = 0; - virtual void set_place(platform::Place place) = 0; - }; - - template - struct PlaceholderImpl : public Placeholder { - PlaceholderImpl(Place place, size_t size, std::type_index type) - : ptr_(static_cast(memory::Alloc(place, size)), - memory::PODDeleter(place)), - place_(place), - size_(size), - type_(type) { - PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.", - (is_cpu_place(place_) ? "CPU" : "GPU")); - } - - virtual size_t size() const { return size_; } - virtual platform::Place place() const { return place_; } - virtual void* ptr() const { return static_cast(ptr_.get()); } - virtual std::type_index type() const { return type_; } - virtual void set_type(std::type_index type) { type_ = type; } - virtual void set_place(platform::Place place) { place_ = place; } - - /*! the pointer of memory block. */ - std::unique_ptr> ptr_; - - /*! the place of memory block. */ - platform::Place place_; - - /*! the size of memory block. */ - size_t size_; - - /* the current type of memory */ - std::type_index type_; - }; + const std::shared_ptr& Holder() const { return holder_; } + size_t offset() const { return offset_; } + private: /*! holds the memory block if allocated. */ - std::shared_ptr holder_; - + std::shared_ptr holder_; + std::type_index type_; /** * @brief points to elements dimensions. * diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index 6d3047c95d6cf30c2a5308d4f69ded367066d78c..0c9c0d782fc73bd8278b82bebf7fd84a4f297b94 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -23,10 +23,10 @@ namespace framework { template inline const T* Tensor::data() const { check_memory_size(); - bool valid = std::is_same::value || - holder_->type() == std::type_index(typeid(T)); + bool valid = + std::is_same::value || type_ == std::type_index(typeid(T)); PADDLE_ENFORCE(valid, "Tensor holds the wrong type, it holds %s", - this->holder_->type().name()); + type_.name()); return reinterpret_cast( reinterpret_cast(holder_->ptr()) + offset_); @@ -37,26 +37,30 @@ inline bool Tensor::IsInitialized() const { return holder_ != nullptr; } template inline T* Tensor::data() { check_memory_size(); - bool valid = std::is_same::value || - holder_->type() == std::type_index(typeid(T)); + bool valid = + std::is_same::value || type_ == std::type_index(typeid(T)); PADDLE_ENFORCE(valid, "Tensor holds the wrong type, it holds %s", - this->holder_->type().name()); + type_.name()); return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } template inline T* Tensor::mutable_data(DDim dims, platform::Place place, + memory::Allocator::Attr attr, size_t requested_size) { static_assert(std::is_pod::value, "T must be POD"); Resize(dims); - return mutable_data(place, requested_size); + return mutable_data(place, attr, requested_size); } template -inline T* Tensor::mutable_data(platform::Place place, size_t requested_size) { +inline T* Tensor::mutable_data(platform::Place place, + memory::Allocator::Attr attr, + size_t requested_size) { static_assert(std::is_pod::value, "T must be POD"); - return reinterpret_cast(mutable_data(place, typeid(T), requested_size)); + return reinterpret_cast( + mutable_data(place, typeid(T), attr, requested_size)); } inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { diff --git a/paddle/fluid/framework/tensor_util_test.cc b/paddle/fluid/framework/tensor_util_test.cc index 793ccfc79fe56707f226477b9d50b1d972ab6a59..17c55378178325b40e394f4b422c22c1c10bd130 100644 --- a/paddle/fluid/framework/tensor_util_test.cc +++ b/paddle/fluid/framework/tensor_util_test.cc @@ -379,7 +379,9 @@ TEST(Tensor, FromAndToStream) { TensorToStream(oss, gpu_tensor, gpu_ctx); std::istringstream iss(oss.str()); - TensorFromStream(iss, &dst_tensor, gpu_ctx); + TensorFromStream( + iss, &dst_tensor, + *platform::DeviceContextPool::Instance().Get(platform::CPUPlace())); int* dst_ptr = dst_tensor.mutable_data(platform::CPUPlace()); for (int i = 0; i < 6; ++i) { diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index fc656613010d18608c2780c96212f2d7bb674704..2c5364b72402befd2c34e5f542ce5c6b2add621d 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -13,7 +13,7 @@ set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor) # TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal? cc_library(paddle_fluid_api SRCS io.cc - DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS}) get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) get_property(cuda_modules GLOBAL PROPERTY CUDA_MODULES) diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index 21fd8d2df49698d7fa38d906f7921f092ca916a3..c6b7c05f784b7c44fe30dd69529fe48405538ab6 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -114,7 +114,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(framework::ir::Node *node, // it is either an OP's input or an OP's output. auto &subgraph_nodes = *Agent(node).subgraph(); - for (size_t index = 0; index < block_desc.OpSize(); index++) { + for (size_t index = 0; index < block_desc.OpSize(); ++index) { framework::proto::OpDesc *op = block_desc.Op(index)->Proto(); auto correspond_node = subgraph_nodes[index]; PADDLE_ENFORCE_EQ(correspond_node->Name(), op->type()); diff --git a/paddle/fluid/inference/analysis/passes/ir_analysis_compose_pass.cc b/paddle/fluid/inference/analysis/passes/ir_analysis_compose_pass.cc index 38e9b1c5e7c19c89f94ce55324507b02da0c5160..267737e95cb51f9a2d72b70b171be779d6f52c8d 100644 --- a/paddle/fluid/inference/analysis/passes/ir_analysis_compose_pass.cc +++ b/paddle/fluid/inference/analysis/passes/ir_analysis_compose_pass.cc @@ -45,7 +45,8 @@ void IrAnalysisComposePass::InitTensorRTAttrs(Argument *argument) { std::unordered_set teller_set( {"mul", "conv2d", "pool2d", "relu", "softmax", "sigmoid", "depthwise_conv2d", "batch_norm", "concat", "tanh", "pad", - "elementwise_add", "dropout", "split", "prelu", "conv2d_transpose"}); + "elementwise_add", "elementwise_mul", "dropout", "split", "prelu", + "conv2d_transpose"}); if (!node->IsOp()) return false; if (teller_set.count(node->Op()->Type())) { diff --git a/paddle/fluid/inference/tensorrt/CMakeLists.txt b/paddle/fluid/inference/tensorrt/CMakeLists.txt index e09705e3c69eb2b2370bd1ad2d9cf178ef041ee6..17f6c6d9f10abf99fd93364d1356e2b3ef1b3934 100644 --- a/paddle/fluid/inference/tensorrt/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/CMakeLists.txt @@ -1,4 +1,4 @@ -nv_library(tensorrt_engine SRCS engine.cc DEPS framework_proto device_context) +nv_library(tensorrt_engine SRCS engine.cc DEPS ${GLOB_OPERATOR_DEPS} framework_proto device_context) nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) nv_test(test_tensorrt_engine SRCS test_engine.cc DEPS dynload_cuda tensorrt_engine) add_subdirectory(plugin) diff --git a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt index aa4126392bf1f4b16a123403b68f506d6f76b5a2..8dd6e8453f971b980a3dcb95c10734537e8333dd 100644 --- a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt @@ -1,39 +1,40 @@ # Add TRT tests nv_library(tensorrt_converter - SRCS mul_op.cc conv2d_op.cc fc_op.cc pool2d_op.cc elementwise_op.cc -batch_norm_op.cc activation_op.cc softmax_op.cc concat_op.cc dropout_op.cc -pad_op.cc split_op.cc prelu_op.cc - DEPS tensorrt_engine tensorrt_plugin operator scope framework_proto op_registry) + SRCS mul_op.cc conv2d_op.cc fc_op.cc pool2d_op.cc elementwise_op.cc + batch_norm_op.cc activation_op.cc softmax_op.cc concat_op.cc dropout_op.cc + pad_op.cc split_op.cc prelu_op.cc + DEPS tensorrt_engine tensorrt_plugin operator scope framework_proto op_registry) nv_test(test_op_converter SRCS test_op_converter.cc DEPS - ${FLUID_CORE_MODULES} tensorrt_engine tensorrt_converter) + ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_converter) nv_test(test_io_converter SRCS test_io_converter.cc io_converter.cc DEPS dynload_cuda dynamic_loader lod_tensor) nv_test(test_trt_mul_op SRCS test_mul_op.cc mul_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine mul_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine mul_op SERIAL) nv_test(test_trt_fc_op SRCS test_fc_op.cc fc_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine mul_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine mul_op SERIAL) nv_test(test_trt_activation_op SRCS test_activation_op.cc activation_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine activation_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine activation_op SERIAL) nv_test(test_trt_conv_op SRCS test_conv2d_op.cc conv2d_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine conv_op conv_transpose_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine conv_op conv_transpose_op SERIAL) nv_test(test_trt_pool2d_op SRCS test_pool2d_op.cc pool2d_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine pool_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine pool_op SERIAL) nv_test(test_trt_elementwise_op SRCS test_elementwise_op.cc elementwise_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine elementwise_add_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_plugin + elementwise_add_op elementwise_mul_op SERIAL) nv_test(test_trt_softmax_op SRCS test_softmax_op.cc softmax_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine softmax_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine softmax_op SERIAL) nv_test(test_trt_batch_norm_op SRCS test_batch_norm_op.cc batch_norm_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine batch_norm_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine batch_norm_op SERIAL) nv_test(test_trt_concat_op SRCS test_concat_op.cc concat_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine concat_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine concat_op SERIAL) nv_test(test_trt_dropout_op SRCS test_dropout_op.cc dropout_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine dropout_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine dropout_op SERIAL) nv_test(test_trt_pad_op SRCS test_pad_op.cc pad_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine pad_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine pad_op SERIAL) nv_test(test_trt_split_op SRCS test_split_op.cc split_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine tensorrt_plugin - split_op concat_op SERIAL) + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_plugin + split_op concat_op SERIAL) nv_test(test_trt_prelu_op SRCS test_prelu_op.cc prelu_op.cc - DEPS ${FLUID_CORE_MODULES} tensorrt_engine tensorrt_plugin + DEPS ${FLUID_CORE_MODULES} ${GLOB_OPERATOR_DEPS} tensorrt_engine tensorrt_plugin prelu_op SERIAL) diff --git a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc index 1af091fabd2aea03a85b2d19fd556b18cdd65e3b..6975086193d991dc9f53b2d9d988f960c8ad118d 100644 --- a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc @@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,11 +13,25 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h" namespace paddle { namespace inference { namespace tensorrt { +static bool CheckDims(const nvinfer1::Dims& dims_x, + const nvinfer1::Dims& dims_y) { + if (dims_x.nbDims != dims_y.nbDims) { + return false; + } + for (int i = 0; i < dims_x.nbDims; i++) { + if (dims_x.d[i] != dims_y.d[i]) { + return false; + } + } + return true; +} + class ElementwiseWeightOpConverter : public OpConverter { public: ElementwiseWeightOpConverter() {} @@ -26,7 +40,7 @@ class ElementwiseWeightOpConverter : public OpConverter { // Here the two nullptr looks strange, that's because the // framework::OpDesc's constructor is strange. framework::OpDesc op_desc(op, nullptr); - VLOG(3) << "convert a fluid elementwise op to tensorrt IScaleLayer"; + VLOG(3) << "Convert a fluid elementwise op to TensorRT IScaleLayer"; PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1); // Y is a weight @@ -106,10 +120,12 @@ class ElementwiseTensorOpConverter : public OpConverter { ElementwiseTensorOpConverter() {} void operator()(const framework::proto::OpDesc& op, const framework::Scope& scope, bool test_mode) override { + auto op_pair = ops.find(op_type_); + PADDLE_ENFORCE(op_pair != ops.end(), "Wrong elementwise op type!"); + // Here the two nullptr looks strange, that's because the // framework::OpDesc's constructor is strange. framework::OpDesc op_desc(op, nullptr); - VLOG(3) << "convert a fluid elementwise op to tensorrt IScaleLayer"; PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1); // Y is a weight @@ -120,29 +136,35 @@ class ElementwiseTensorOpConverter : public OpConverter { nvinfer1::Dims dims_x = X->getDimensions(); nvinfer1::Dims dims_y = Y->getDimensions(); - // The two input tensor should have the same dims - PADDLE_ENFORCE(dims_x.nbDims >= 3); - if (dims_x.nbDims == dims_y.nbDims) { - for (int i = 0; i < dims_x.nbDims; i++) { - if (dims_x.d[i] != dims_y.d[i]) - PADDLE_THROW("TensorRT unsupported tensor shape for Elementwise op!"); - } - } else { - PADDLE_THROW("TensorRT unsupported tensor shape for Elementwise op!"); - } + int axis = boost::get(op_desc.GetAttr("axis")); + auto output_name = op_desc.Output("Out")[0]; + if (CheckDims(dims_x, dims_y)) { + // The two input tensor should have the same dims + VLOG(3) << "Convert a fluid elementwise op to TensorRT IElementWiseLayer"; - auto op_pair = ops.find(op_type_); - if (op_pair == ops.end()) { - PADDLE_THROW("Wrong elementwise op type!"); - } - nvinfer1::IElementWiseLayer* layer = TRT_ENGINE_ADD_LAYER( - engine_, ElementWise, *const_cast(X), - *const_cast(Y), op_pair->second); + nvinfer1::IElementWiseLayer* layer = TRT_ENGINE_ADD_LAYER( + engine_, ElementWise, *const_cast(X), + *const_cast(Y), op_pair->second); - auto output_name = op_desc.Output("Out")[0]; - layer->setName(("elementwise (Output: " + output_name + ")").c_str()); - layer->getOutput(0)->setName(output_name.c_str()); - engine_->SetITensor(output_name, layer->getOutput(0)); + layer->setName(("elementwise (Output: " + output_name + ")").c_str()); + layer->getOutput(0)->setName(output_name.c_str()); + engine_->SetITensor(output_name, layer->getOutput(0)); + } else { + VLOG(3) << "Convert a fluid elementwise op to TensorRT " + "ElementWisePluginLayer"; + + plugin::ElementWisePlugin* plugin = + new plugin::ElementWisePlugin(op_pair->second, dims_x, dims_y, axis); + plugin->AddInput(X); + plugin->AddInput(Y); + nvinfer1::IPluginLayer* layer = engine_->AddPlugin( + const_cast(plugin->GetInputs().data()), 2, + reinterpret_cast(plugin)); + + layer->setName(("elementwise (Output: " + output_name + ")").c_str()); + layer->getOutput(0)->setName(output_name.c_str()); + engine_->SetITensor(output_name, layer->getOutput(0)); + } if (test_mode) { // the test framework can not determine which is the // output, so place the declaration inside. engine_->DeclareOutput(output_name); diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index d309d94c560f2b484fac6b6cd40cc2704d641069..d61d635ed707bc455d495f2420925a3585234b5c 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -61,7 +61,7 @@ class OpConverter { // TODO(xingzhaolong): all mul, sub, div // static std::unordered_set add_weight_op_set {"add", "mul", // "sub", "div"}; - static std::unordered_set add_weight_op_set{"add"}; + static std::unordered_set add_weight_op_set{"add", "mul"}; PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1UL); int op_type_len = op_desc.Type().size(); std::string op_type = op_desc.Type().substr(op_type_len - 3, op_type_len); diff --git a/paddle/fluid/inference/tensorrt/convert/prelu_op.cc b/paddle/fluid/inference/tensorrt/convert/prelu_op.cc index 337885e6baa578d1f733e40f09f0586eba393333..dbdff85ddebc85bc51938a204a48affe485b8240 100644 --- a/paddle/fluid/inference/tensorrt/convert/prelu_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/prelu_op.cc @@ -54,7 +54,7 @@ class PReluOpConverter : public OpConverter { TensorRTEngine::Weight alpha_rt(nvinfer1::DataType::kFLOAT, static_cast(alpha_data), alpha_tensor_device->numel()); - PReluPlugin* plugin = new PReluPlugin(alpha_rt, mode); + plugin::PReluPlugin* plugin = new plugin::PReluPlugin(alpha_rt, mode); nvinfer1::IPluginLayer* layer = engine_->AddPlugin(&input, input_num, plugin); // keep alpha tensor to avoid release it's memory diff --git a/paddle/fluid/inference/tensorrt/convert/split_op.cc b/paddle/fluid/inference/tensorrt/convert/split_op.cc index 159854ab593fbbfa1e08a9ca148f1b3a636d668c..6620c76318f99092236d4009037f2ce01b295164 100644 --- a/paddle/fluid/inference/tensorrt/convert/split_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/split_op.cc @@ -50,7 +50,7 @@ class SplitOpConverter : public OpConverter { PADDLE_ENFORCE(output_lengths.size() == output_num); // - SplitPlugin* plugin = new SplitPlugin(axis, output_lengths); + plugin::SplitPlugin* plugin = new plugin::SplitPlugin(axis, output_lengths); nvinfer1::IPluginLayer* layer = engine_->AddPlugin(&input, input_num, plugin); diff --git a/paddle/fluid/inference/tensorrt/convert/test_elementwise_op.cc b/paddle/fluid/inference/tensorrt/convert/test_elementwise_op.cc index 7537d02a35b66a41c158cd8eb1b1e5d4107e7d84..cc967464a5f29151a061e99cda6870f9f370ec1b 100644 --- a/paddle/fluid/inference/tensorrt/convert/test_elementwise_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/test_elementwise_op.cc @@ -20,13 +20,12 @@ namespace paddle { namespace inference { namespace tensorrt { -TEST(elementwise_op, add_weight_test) { +TEST(elementwise_op, add_weight) { std::unordered_set parameters({"elementwise_add-Y"}); framework::Scope scope; TRTConvertValidation validator(10, parameters, scope, 1 << 15); validator.DeclInputVar("elementwise_add-X", nvinfer1::DimsCHW(10, 3, 3)); validator.DeclParamVar("elementwise_add-Y", nvinfer1::Dims3(10, 1, 1)); - // validator.DeclParamVar("mul-Y", nvinfer1::Dims2(8, 2)); validator.DeclOutputVar("elementwise_add-Out", nvinfer1::DimsCHW(10, 3, 3)); // Prepare Op description @@ -44,30 +43,65 @@ TEST(elementwise_op, add_weight_test) { validator.Execute(8); } -TEST(elementwise_op, add_tensor_test) { - std::unordered_set parameters; - framework::Scope scope; - TRTConvertValidation validator(8, parameters, scope, 1 << 15); - validator.DeclInputVar("elementwise_add-X", nvinfer1::DimsCHW(10, 3, 3)); - validator.DeclInputVar("elementwise_add-Y", nvinfer1::Dims3(10, 3, 3)); - // validator.DeclParamVar("mul-Y", nvinfer1::Dims2(8, 2)); - validator.DeclOutputVar("elementwise_add-Out", nvinfer1::DimsCHW(10, 3, 3)); - - // Prepare Op description - framework::OpDesc desc; - desc.SetType("elementwise_add"); - desc.SetInput("X", {"elementwise_add-X"}); - desc.SetInput("Y", {"elementwise_add-Y"}); - desc.SetOutput("Out", {"elementwise_add-Out"}); - - // the defalut axis of elementwise op is -1 - - validator.SetOp(*desc.Proto()); +TEST(elementwise_op, native) { + for (std::string type : {"add", "mul"}) { + int batch_size = 8; + std::unordered_set parameters; + framework::Scope scope; + TRTConvertValidation validator(batch_size, parameters, scope, 1 << 15); + validator.DeclInputVar("elementwise_" + type + "-X", + nvinfer1::DimsCHW(10, 3, 3)); + validator.DeclInputVar("elementwise_" + type + "-Y", + nvinfer1::Dims3(10, 3, 3)); + validator.DeclOutputVar("elementwise_" + type + "-Out", + nvinfer1::DimsCHW(10, 3, 3)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("elementwise_" + type); + desc.SetInput("X", {"elementwise_" + type + "-X"}); + desc.SetInput("Y", {"elementwise_" + type + "-Y"}); + desc.SetOutput("Out", {"elementwise_" + type + "-Out"}); + + int axis = -1; + desc.SetAttr("axis", axis); + + validator.SetOp(*desc.Proto()); + validator.Execute(batch_size); + } +} - validator.Execute(8); +TEST(elementwise_op, plugin) { + for (std::string type : {"add", "mul"}) { + int batch_size = 8; + std::unordered_set parameters; + framework::Scope scope; + TRTConvertValidation validator(batch_size, parameters, scope, 1 << 15); + validator.DeclInputVar("elementwise_" + type + "-X", + nvinfer1::DimsCHW(10, 3, 3)); + validator.DeclInputVar("elementwise_" + type + "-Y", + nvinfer1::Dims3(10, 1, 1)); + validator.DeclOutputVar("elementwise_" + type + "-Out", + nvinfer1::DimsCHW(10, 3, 3)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("elementwise_" + type); + desc.SetInput("X", {"elementwise_" + type + "-X"}); + desc.SetInput("Y", {"elementwise_" + type + "-Y"}); + desc.SetOutput("Out", {"elementwise_" + type + "-Out"}); + + int axis = -1; + desc.SetAttr("axis", axis); + + validator.SetOp(*desc.Proto()); + validator.Execute(batch_size); + } } } // namespace tensorrt } // namespace inference } // namespace paddle + USE_OP(elementwise_add); +USE_OP(elementwise_mul); diff --git a/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc b/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc index 3d34cd7d5d0deca4d83a3f5b5ed0fb396c6acd56..282f53559aa75b2c7c252450e392e1996f9b1d81 100644 --- a/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/inference/tensorrt/convert/ut_helper.h b/paddle/fluid/inference/tensorrt/convert/ut_helper.h index 0a6f171fc40a838fd81d6a51aca0430d5526f188..f313beb73bb0d21cab1d62859a46fcc76a373548 100644 --- a/paddle/fluid/inference/tensorrt/convert/ut_helper.h +++ b/paddle/fluid/inference/tensorrt/convert/ut_helper.h @@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 208bd12b83aa19f01de9bcf4ada630c87defad5d..f739752cbc44805cb0fb3246385609cf16ba744a 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -257,9 +257,10 @@ void TensorRTEngine::freshDeviceId() { } nvinfer1::IPluginLayer *TensorRTEngine::AddPlugin( - nvinfer1::ITensor *const *inputs, int nbInputs, PluginTensorRT *plugin) { + nvinfer1::ITensor *const *inputs, int num_inputs, + plugin::PluginTensorRT *plugin) { owned_plugin_.emplace_back(plugin); - return infer_network_.get()->addPluginExt(inputs, nbInputs, *plugin); + return infer_network_.get()->addPluginExt(inputs, num_inputs, *plugin); } } // namespace tensorrt diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index 99420f19ba17d0bebf6dde3800d57c912256dc6b..f5b2c28ba9e6fefc1d6c14640d696c3bf3ac8249 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -128,7 +128,7 @@ class TensorRTEngine : public EngineBase { int GetRuntimeBatch(); int GetDevice() { return device_; } nvinfer1::IPluginLayer* AddPlugin(nvinfer1::ITensor* const* inputs, - int nbInputs, PluginTensorRT*); + int num_inputs, plugin::PluginTensorRT*); // A pointer to CPU memory is needed of the TRT weight. // Before TRT runs, fluid loads weight into GPU storage. @@ -171,7 +171,7 @@ class TensorRTEngine : public EngineBase { // The specific GPU id that the TensorRTEngine bounded to. int device_; - std::vector> owned_plugin_; + std::vector> owned_plugin_; // TensorRT related internal members template diff --git a/paddle/fluid/inference/tensorrt/plugin/CMakeLists.txt b/paddle/fluid/inference/tensorrt/plugin/CMakeLists.txt index b6811f9183aaa2313157bc5a8b2de1b7e447480f..40902694995f487343e3cfc727ad95daafa6b415 100644 --- a/paddle/fluid/inference/tensorrt/plugin/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/plugin/CMakeLists.txt @@ -1 +1,3 @@ -nv_library(tensorrt_plugin SRCS trt_plugin.cc split_op_plugin.cu prelu_op_plugin.cu DEPS enforce device_context) +nv_library(tensorrt_plugin + SRCS trt_plugin.cc split_op_plugin.cu elementwise_op_plugin.cu prelu_op_plugin.cu + DEPS enforce device_context) diff --git a/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.cu new file mode 100644 index 0000000000000000000000000000000000000000..9cd9026b7328083389b5af484bbb15c07b4908b0 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.cu @@ -0,0 +1,138 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h" + +namespace paddle { +namespace inference { +namespace tensorrt { +namespace plugin { + +namespace details { + +template +struct Add { + __device__ T operator()(const T& a, const T& b) const { return a + b; } +}; + +template +struct Mul { + __device__ T operator()(const T& a, const T& b) const { return a * b; } +}; + +template +__global__ void ColumnWiseKernel(Operator op, const T* x, const T* y, T* out, + int batch_size, int num_rows, int num_cols) { + for (int batch_id = 0; batch_id < batch_size; ++batch_id) { + int row = blockIdx.x; + for (; row < num_rows; row += gridDim.x) { + T value_y = y[batch_id * num_rows + row]; + int col = threadIdx.x; + int offset = (batch_id * num_rows + row) * num_cols; + for (; col < num_cols; col += blockDim.x) { + T value_x = x[offset + col]; + out[offset + col] = op(value_x, value_y); + } + } + } +} + +template +static void ElementWise(Operator op, const T* x, const T* y, T* out, + int batch_size, int prev, int midd, int post, + cudaStream_t stream) { + const int kThreadsPerBlock = 1024; + const int kMaximumBlocks = 65535; + if (prev == 1) { + int num_threads = (post > kThreadsPerBlock) ? kThreadsPerBlock + : (((post + 31) >> 5) << 5); + int num_blocks = (midd < kMaximumBlocks) ? midd : kMaximumBlocks; + ColumnWiseKernel<<>>( + op, x, y, out, batch_size, midd, post); + } else if (post == 1) { + PADDLE_THROW("Not implemented."); + } else { + PADDLE_THROW("Not implemented."); + } +} + +} // namespace details + +nvinfer1::Dims ElementWisePlugin::getOutputDimensions( + int index, const nvinfer1::Dims* input_dims, int num_inputs) { + PADDLE_ENFORCE_EQ(index, 0); + PADDLE_ENFORCE_EQ(num_inputs, 2); + PADDLE_ENFORCE_NOT_NULL(input_dims); + return input_dims[0]; +} + +int ElementWisePlugin::initialize() { + PADDLE_ENFORCE_GT(dims_y_.nbDims, 0); + + axis_ = (axis_ == -1) ? dims_x_.nbDims - dims_y_.nbDims : axis_; + int trimed_nb_dims = dims_y_.nbDims; + for (; trimed_nb_dims > 0; --trimed_nb_dims) { + if (dims_y_.d[trimed_nb_dims - 1] != 1) { + break; + } + } + dims_y_.nbDims = trimed_nb_dims; + + PADDLE_ENFORCE_GE(dims_x_.nbDims, dims_y_.nbDims + axis_); + PADDLE_ENFORCE_LT(axis_, dims_x_.nbDims); + + prev_size_ = 1; + midd_size_ = 1; + post_size_ = 1; + for (int i = 0; i < axis_; ++i) { + prev_size_ *= dims_x_.d[i]; + } + + for (int i = 0; i < dims_y_.nbDims; ++i) { + PADDLE_ENFORCE_EQ(dims_x_.d[i + axis_], dims_y_.d[i], + "Broadcast dimension mismatch."); + midd_size_ *= dims_y_.d[i]; + } + + for (int i = axis_ + dims_y_.nbDims; i < dims_x_.nbDims; ++i) { + post_size_ *= dims_x_.d[i]; + } + return 0; +} + +int ElementWisePlugin::enqueue(int batch_size, const void* const* inputs, + void** outputs, void* workspace, + cudaStream_t stream) { + const float* x = reinterpret_cast(inputs[0]); + const float* y = reinterpret_cast(inputs[1]); + float* out = reinterpret_cast(outputs[0]); + + if (type_ == nvinfer1::ElementWiseOperation::kSUM) { + details::ElementWise(details::Add(), x, y, out, batch_size, + prev_size_, midd_size_, post_size_, stream); + } else if (type_ == nvinfer1::ElementWiseOperation::kPROD) { + details::ElementWise(details::Mul(), x, y, out, batch_size, + prev_size_, midd_size_, post_size_, stream); + } else { + PADDLE_THROW("Not implemented."); + } + + return cudaGetLastError() != cudaSuccess; +} + +} // namespace plugin +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h new file mode 100644 index 0000000000000000000000000000000000000000..9c461f7a5c44ebb9d4a755288c69abff55e2dea8 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h @@ -0,0 +1,87 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h" + +namespace paddle { +namespace inference { +namespace tensorrt { +namespace plugin { + +class ElementWisePlugin : public PluginTensorRT { + public: + ElementWisePlugin(nvinfer1::ElementWiseOperation type, + nvinfer1::Dims const &dims_x, nvinfer1::Dims const &dims_y, + int axis) + : type_(type), + dims_x_(dims_x), + dims_y_(dims_y), + axis_(axis), + prev_size_(1), + midd_size_(1), + post_size_(1) {} + + ElementWisePlugin(void const *serial_data, size_t serial_length) { + deserializeBase(serial_data, serial_length); + DeserializeValue(&serial_data, &serial_length, &axis_); + DeserializeValue(&serial_data, &serial_length, &dims_x_); + DeserializeValue(&serial_data, &serial_length, &dims_y_); + } + + ElementWisePlugin *clone() const override { + // return new ElementWisePlugin(dims_x_, dims_y_, axis_); + return nullptr; + } + + const char *getPluginType() const override { return "elementwise"; } + + nvinfer1::Dims getOutputDimensions(int index, + const nvinfer1::Dims *input_dims, + int num_inputs) override; + + int initialize() override; + + // execute the layer + int enqueue(int batch_size, const void *const *inputs, void **outputs, + void *workspace, cudaStream_t stream); + + protected: + size_t getSerializationSize() override { + return SerializedSize(axis_) + SerializedSize(dims_x_) + + SerializedSize(dims_y_) + getBaseSerializationSize(); + } + + void serialize(void *buffer) override { + serializeBase(buffer); + SerializeValue(&buffer, axis_); + SerializeValue(&buffer, dims_x_); + SerializeValue(&buffer, dims_y_); + } + + nvinfer1::ElementWiseOperation type_; + nvinfer1::Dims dims_x_; + nvinfer1::Dims dims_y_; + int axis_; + int prev_size_; + int midd_size_; + int post_size_; +}; + +} // namespace plugin +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu index 0f1ca112955afeecbf82b26324b77aa8def2ad9f..e8f4254402a5d8a5e6c5a2384bf9fbe48341956e 100644 --- a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu @@ -20,6 +20,7 @@ namespace paddle { namespace inference { namespace tensorrt { +namespace plugin { static const int CUDA_NUM_THREADS = 1024; static const int CUDA_MAX_NUM_BLOCKS = 65535; @@ -126,6 +127,7 @@ int PReluPlugin::enqueue(int batchSize, const void *const *inputs, return cudaGetLastError() != cudaSuccess; } +} // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h index aa0f865c89be2dc20d3a30314ec02fd0b425b2fe..0db56a310b072e64425f70ac23267ec72353e54b 100644 --- a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h @@ -21,6 +21,7 @@ namespace paddle { namespace inference { namespace tensorrt { +namespace plugin { class PReluPlugin : public PluginTensorRT { TensorRTEngine::Weight alpha_; @@ -63,6 +64,7 @@ class PReluPlugin : public PluginTensorRT { void *workspace, cudaStream_t stream) override; }; +} // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/plugin/serialize.h b/paddle/fluid/inference/tensorrt/plugin/serialize.h index 50c0b17d78327e22b0aa81fdac6958e80a30dfe8..ce859f16fc87479adf090687121ff06951b5684c 100644 --- a/paddle/fluid/inference/tensorrt/plugin/serialize.h +++ b/paddle/fluid/inference/tensorrt/plugin/serialize.h @@ -14,10 +14,15 @@ #pragma once -#include #include #include #include +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace inference { +namespace tensorrt { +namespace plugin { template inline void SerializeValue(void** buffer, T const& value); @@ -26,7 +31,7 @@ template inline void DeserializeValue(void const** buffer, size_t* buffer_size, T* value); -namespace { +namespace details { template struct Serializer {}; @@ -36,10 +41,12 @@ struct Serializer::value || std::is_enum::value || std::is_pod::value>::type> { static size_t SerializedSize(T const& value) { return sizeof(T); } + static void Serialize(void** buffer, T const& value) { std::memcpy(*buffer, &value, sizeof(T)); reinterpret_cast(*buffer) += sizeof(T); } + static void Deserialize(void const** buffer, size_t* buffer_size, T* value) { assert(*buffer_size >= sizeof(T)); std::memcpy(value, *buffer, sizeof(T)); @@ -51,10 +58,12 @@ struct Serializer::value || template <> struct Serializer { static size_t SerializedSize(const char* value) { return strlen(value) + 1; } + static void Serialize(void** buffer, const char* value) { - std::strcpy(static_cast(*buffer), value); + std::strcpy(static_cast(*buffer), value); // NOLINT reinterpret_cast(*buffer) += strlen(value) + 1; } + static void Deserialize(void const** buffer, size_t* buffer_size, const char** value) { *value = static_cast(*buffer); @@ -73,39 +82,46 @@ struct Serializer, static size_t SerializedSize(std::vector const& value) { return sizeof(value.size()) + value.size() * sizeof(T); } + static void Serialize(void** buffer, std::vector const& value) { SerializeValue(buffer, value.size()); size_t nbyte = value.size() * sizeof(T); std::memcpy(*buffer, value.data(), nbyte); reinterpret_cast(*buffer) += nbyte; } + static void Deserialize(void const** buffer, size_t* buffer_size, std::vector* value) { size_t size; DeserializeValue(buffer, buffer_size, &size); value->resize(size); size_t nbyte = value->size() * sizeof(T); - assert(*buffer_size >= nbyte); + PADDLE_ENFORCE_GE(*buffer_size, nbyte); std::memcpy(value->data(), *buffer, nbyte); reinterpret_cast(*buffer) += nbyte; *buffer_size -= nbyte; } }; -} // namespace +} // namespace details template inline size_t SerializedSize(T const& value) { - return Serializer::SerializedSize(value); + return details::Serializer::SerializedSize(value); } template inline void SerializeValue(void** buffer, T const& value) { - return Serializer::Serialize(buffer, value); + return details::Serializer::Serialize(buffer, value); } template inline void DeserializeValue(void const** buffer, size_t* buffer_size, T* value) { - return Serializer::Deserialize(buffer, buffer_size, value); + return details::Serializer::Deserialize(buffer, buffer_size, value); } + +} // namespace plugin +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu index bd6a44dcc14d50cddb879763a93abf4297494ec9..4adea2db1ee80fb20adba3cf4141a6485a1065a0 100644 --- a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu @@ -12,26 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include #include "paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h" namespace paddle { namespace inference { namespace tensorrt { +namespace plugin { -nvinfer1::Dims SplitPlugin::getOutputDimensions(int index, - const nvinfer1::Dims* inputDims, - int nbInputs) { - assert(nbInputs == 1); - assert(index < this->getNbOutputs()); - nvinfer1::Dims const& input_dims = inputDims[0]; - nvinfer1::Dims output_dims = input_dims; +nvinfer1::Dims SplitPlugin::getOutputDimensions( + int index, const nvinfer1::Dims* input_dims, int num_inputs) { + PADDLE_ENFORCE_EQ(num_inputs, 1); + PADDLE_ENFORCE_LT(index, this->getNbOutputs()); + + nvinfer1::Dims output_dims = input_dims[0]; output_dims.d[axis_] = output_length_.at(index); return output_dims; } int SplitPlugin::initialize() { + PADDLE_ENFORCE_LE(axis_, nvinfer1::Dims::MAX_DIMS); + std::vector segment_offsets(1, 0); for (int i = 0; i < this->getNbOutputs(); ++i) { segment_offsets.push_back(segment_offsets.back() + output_length_[i]); @@ -76,6 +76,7 @@ int SplitPlugin::enqueue(int batchSize, const void* const* inputs, return cudaGetLastError() != cudaSuccess; } -} // tensorrt -} // inference -} // paddle +} // namespace plugin +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h index 7281e40c331550de472df49c57b1d9a5226842d5..b5b6e69992b057a1478f61457b4ae6f5f1619b4d 100644 --- a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h @@ -14,61 +14,58 @@ #pragma once +#include #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h" namespace paddle { namespace inference { namespace tensorrt { +namespace plugin { class SplitPlugin : public PluginTensorRT { - int axis_; - std::vector output_length_; - int nx_, ny_, nz_; - std::vector segment_offsets_; + public: + SplitPlugin(int axis, std::vector const &output_lengths) + : axis_(axis), output_length_(output_lengths) {} + + SplitPlugin(void const *serial_data, size_t serial_length) { + deserializeBase(serial_data, serial_length); + DeserializeValue(&serial_data, &serial_length, &axis_); + DeserializeValue(&serial_data, &serial_length, &output_length_); + } + + SplitPlugin *clone() const override { + return new SplitPlugin(axis_, output_length_); + } + + const char *getPluginType() const override { return "split"; } + int getNbOutputs() const override { return output_length_.size(); } + nvinfer1::Dims getOutputDimensions(int index, + const nvinfer1::Dims *input_dims, + int num_inputs) override; + + int initialize() override; + int enqueue(int batchSize, const void *const *inputs, void **outputs, + void *workspace, cudaStream_t stream) override; protected: - virtual size_t getSerializationSize() override { + size_t getSerializationSize() override { return SerializedSize(axis_) + SerializedSize(output_length_) + getBaseSerializationSize(); } - // TRT will call this func when we need to serialize the configuration of - // tensorrt. - // It should not be called by users. - virtual void serialize(void *buffer) override { + void serialize(void *buffer) override { serializeBase(buffer); SerializeValue(&buffer, axis_); SerializeValue(&buffer, output_length_); } - public: - SplitPlugin(int axis, std::vector const &output_lengths) - : axis_(axis), output_length_(output_lengths) { - assert(axis <= nvinfer1::Dims::MAX_DIMS); - } - - // It was used for tensorrt deserialization. - // It should not be called by users. - SplitPlugin(void const *serialData, size_t serialLength) { - deserializeBase(serialData, serialLength); - DeserializeValue(&serialData, &serialLength, &axis_); - DeserializeValue(&serialData, &serialLength, &output_length_); - } - - SplitPlugin *clone() const override { - return new SplitPlugin(axis_, output_length_); - } - - virtual const char *getPluginType() const override { return "split"; } - virtual int getNbOutputs() const override { return output_length_.size(); } - virtual nvinfer1::Dims getOutputDimensions(int index, - const nvinfer1::Dims *inputs, - int nbInputDims) override; - virtual int initialize() override; - virtual int enqueue(int batchSize, const void *const *inputs, void **outputs, - void *workspace, cudaStream_t stream) override; + int axis_; + std::vector output_length_; + int nx_, ny_, nz_; + std::vector segment_offsets_; }; -} // tensorrt -} // inference -} // paddle +} // namespace plugin +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/plugin/trt_plugin.cc b/paddle/fluid/inference/tensorrt/plugin/trt_plugin.cc index 08016d84b15bc750738f3183d8d61a5c90862288..b0f4cff3ac184beeed2ebd3a4b7531d570c87075 100644 --- a/paddle/fluid/inference/tensorrt/plugin/trt_plugin.cc +++ b/paddle/fluid/inference/tensorrt/plugin/trt_plugin.cc @@ -17,6 +17,7 @@ namespace paddle { namespace inference { namespace tensorrt { +namespace plugin { void PluginTensorRT::serializeBase(void*& buffer) { SerializeValue(&buffer, input_dims_); @@ -25,12 +26,12 @@ void PluginTensorRT::serializeBase(void*& buffer) { SerializeValue(&buffer, data_format_); } -void PluginTensorRT::deserializeBase(void const*& serialData, - size_t& serialLength) { - DeserializeValue(&serialData, &serialLength, &input_dims_); - DeserializeValue(&serialData, &serialLength, &max_batch_size_); - DeserializeValue(&serialData, &serialLength, &data_type_); - DeserializeValue(&serialData, &serialLength, &data_format_); +void PluginTensorRT::deserializeBase(void const*& serial_data, + size_t& serial_length) { + DeserializeValue(&serial_data, &serial_length, &input_dims_); + DeserializeValue(&serial_data, &serial_length, &max_batch_size_); + DeserializeValue(&serial_data, &serial_length, &data_type_); + DeserializeValue(&serial_data, &serial_length, &data_format_); } size_t PluginTensorRT::getBaseSerializationSize() { @@ -44,18 +45,17 @@ bool PluginTensorRT::supportsFormat(nvinfer1::DataType type, (format == nvinfer1::PluginFormat::kNCHW)); } -void PluginTensorRT::configureWithFormat(const nvinfer1::Dims* inputDims, - int nbInputs, - const nvinfer1::Dims* outputDims, - int nbOutputs, nvinfer1::DataType type, - nvinfer1::PluginFormat format, - int maxBatchSize) { +void PluginTensorRT::configureWithFormat( + const nvinfer1::Dims* input_dims, int num_inputs, + const nvinfer1::Dims* output_dims, int num_outputs, nvinfer1::DataType type, + nvinfer1::PluginFormat format, int max_batch_size) { data_type_ = type; data_format_ = format; - input_dims_.assign(inputDims, inputDims + nbInputs); - max_batch_size_ = maxBatchSize; + input_dims_.assign(input_dims, input_dims + num_inputs); + max_batch_size_ = max_batch_size; } +} // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h b/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h index 4d85e955a49b7dcccae158ea06b76419419797cf..86084829e150f8a39610319a8f2138f2b2fdec68 100644 --- a/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h @@ -14,23 +14,30 @@ #pragma once -#include +#include #include -#include #include #include -#include "NvInfer.h" #include "paddle/fluid/inference/tensorrt/plugin/serialize.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/profiler.h" + +DECLARE_bool(profile); namespace paddle { namespace inference { namespace tensorrt { +namespace plugin { class PluginTensorRT : public nvinfer1::IPluginExt { public: PluginTensorRT() {} + // It was used for TensorRT deserialization. + // It should not be called by users. PluginTensorRT(const void* serialized_data, size_t length) {} + virtual ~PluginTensorRT() {} + nvinfer1::Dims const& getInputDims(int index) const { return input_dims_.at(index); } @@ -38,43 +45,66 @@ class PluginTensorRT : public nvinfer1::IPluginExt { nvinfer1::DataType getDataType() const { return data_type_; } nvinfer1::PluginFormat getDataFormat() const { return data_format_; } virtual const char* getPluginVersion() const { return "1"; } + + void AddInput(nvinfer1::ITensor* input) { inputs_.push_back(input); } + std::vector& GetInputs() { return inputs_; } + + virtual nvinfer1::IPluginExt* clone() const = 0; + virtual const char* getPluginType() const = 0; + + // Following functions are inherit from nvinfer1::IPluginExt + // Get the number of outputs from the layer + int getNbOutputs() const { return 1; } + // Get the dimension of an output tensor + virtual nvinfer1::Dims getOutputDimensions(int index, + const nvinfer1::Dims* input_dims, + int num_inputs) = 0; + // Find the workspace size required by the layer size_t getWorkspaceSize(int) const override { return 0; } + + // Initialize the layer for execution. + // This is called when the engine is created. + int initialize() override { return 0; } + // Shutdown the layer. This is called when the engine is destroyed void terminate() override {} - virtual ~PluginTensorRT() {} + // Execute the layer + virtual int enqueue(int batch_size, const void* const* inputs, void** outputs, + void* workspace, cudaStream_t stream) = 0; + + // Find the size of the serialization buffer required + virtual size_t getSerializationSize() = 0; + // Serialize the layer config to buffer. + // TensorRT will call this func to serialize the configuration of TensorRT + // engine. It should not be called by users. + virtual void serialize(void* buffer) = 0; + // Check format support. The default is FLOAT32 and NCHW. bool supportsFormat(nvinfer1::DataType type, nvinfer1::PluginFormat format) const override; - void configureWithFormat(const nvinfer1::Dims* inputDims, int nbInputs, - const nvinfer1::Dims* outputDims, int nbOutputs, + // Configure the layer + void configureWithFormat(const nvinfer1::Dims* input_dims, int num_inputs, + const nvinfer1::Dims* output_dims, int num_outputs, nvinfer1::DataType type, nvinfer1::PluginFormat format, - int maxBatchSize) override; - - // *NOTE* The following functions need to be overrided in the subclass. - virtual nvinfer1::IPluginExt* clone() const = 0; - virtual const char* getPluginType() const = 0; - // Initialize the layer for execution. This is called when the engine is - // created. - int initialize() override { return 0; } - // Serialize the layer config to buffer. - virtual void serialize(void* buffer) = 0; - virtual size_t getSerializationSize() = 0; - virtual int enqueue(int batchSize, const void* const* inputs, void** outputs, - void* workspace, cudaStream_t stream) = 0; + int max_batch_size) override; protected: // Deserialize input_dims, max_batch_size, data_type, data_format - void deserializeBase(void const*& serialData, size_t& serialLength); + void deserializeBase(void const*& serial_data, // NOLINT + size_t& serial_length); // NOLINT size_t getBaseSerializationSize(); // Serialize input_dims, max_batch_size, data_type, data_format - void serializeBase(void*& buffer); + void serializeBase(void*& buffer); // NOLINT std::vector input_dims_; size_t max_batch_size_; nvinfer1::DataType data_type_; nvinfer1::PluginFormat data_format_; + + std::vector inputs_; }; +} // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index a4046914132cc713a707fc2a4d12087383d77fe5..e66ae2805766754d9d07877c31889dd421daf9f1 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -51,7 +51,7 @@ void PrintConfig(const PaddlePredictor::Config *config, bool use_analysis) { LOG(INFO) << *reinterpret_cast(config); return; } - LOG(INFO) << *config; + LOG(INFO) << *reinterpret_cast(config); } void CompareResult(const std::vector &outputs, diff --git a/paddle/fluid/memory/CMakeLists.txt b/paddle/fluid/memory/CMakeLists.txt index 709fc7e12e1db537ceece30c405c0e8a2582e8ca..e7268077643c3988c59a52bf54873f1e8db4619b 100644 --- a/paddle/fluid/memory/CMakeLists.txt +++ b/paddle/fluid/memory/CMakeLists.txt @@ -1,15 +1,12 @@ add_subdirectory(detail) - -cc_library(malloc SRCS malloc.cc DEPS buddy_allocator place enforce) +add_subdirectory(allocation) +cc_library(malloc SRCS malloc.cc DEPS place enforce allocator_facade) cc_library(memcpy SRCS memcpy.cc DEPS place) cc_library(memory DEPS malloc memcpy) - -cc_test(malloc_test SRCS malloc_test.cc DEPS malloc) - #if (WITH_GPU) # nv_test(pinned_memory_test SRCS pinned_memory_test.cu DEPS place memory) #endif() diff --git a/paddle/fluid/memory/allocation/CMakeLists.txt b/paddle/fluid/memory/allocation/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..4b7b9064dcde9b5209264257d51bbd976ba8eb85 --- /dev/null +++ b/paddle/fluid/memory/allocation/CMakeLists.txt @@ -0,0 +1,64 @@ +cc_library(allocator SRCS allocator.cc DEPS place) +cc_library(cpu_allocator SRCS cpu_allocator.cc DEPS allocator) +cc_library(best_fit_allocator SRCS best_fit_allocator.cc DEPS allocator) +cc_library(locked_allocator SRCS locked_allocator.cc DEPS allocator) +cc_library(buffered_allocator SRCS buffered_allocator.cc DEPS allocator) +cc_library(legacy_allocator SRCS legacy_allocator.cc DEPS allocator buddy_allocator) +cc_test(buffered_allocator_test SRCS buffered_allocator_test.cc DEPS best_fit_allocator locked_allocator buffered_allocator cpu_allocator) + +if (WITH_GPU) + nv_library(cuda_allocator SRCS cuda_allocator.cc DEPS allocator cuda_device_guard) +endif() + +cc_library(retry_allocator SRCS retry_allocator.cc DEPS allocator) + +if (WITH_GPU) + nv_test(best_fit_allocator_test + SRCS best_fit_allocator_test.cc + best_fit_allocator_test.cu + DEPS best_fit_allocator + locked_allocator + cpu_allocator + cuda_allocator + device_context + memcpy) +else() + cc_test(best_fit_allocator_test + SRCS best_fit_allocator_test.cc + DEPS best_fit_allocator + locked_allocator + cpu_allocator) +endif() + +nv_library(pinned_allocator SRCS pinned_allocator.cc DEPS allocator) +if (WITH_GPU) + set(AllocatorFacadeDeps gpu_info cuda_allocator pinned_allocator cuda_device_guard) +else () + set(AllocatorFacadeDeps) +endif() + +cc_library(aligned_allocator SRCS aligned_allocator.cc DEPS allocator) +cc_library(auto_increment_allocator SRCS auto_increment_allocator.cc DEPS allocator) +cc_library(zero_size_allocator SRCS zero_size_allocator.cc DEPS allocator) +cc_library(conditional_allocator SRCS conditional_allocator.cc DEPS allocator) +cc_library(allocator_strategy SRCS allocator_strategy.cc DEPS gflags) +cc_library(allocator_facade SRCS allocator_facade.cc DEPS + ${AllocatorFacadeDeps} + cpu_allocator + locked_allocator + best_fit_allocator + aligned_allocator + auto_increment_allocator + zero_size_allocator + conditional_allocator + retry_allocator + buffered_allocator + allocator_strategy + legacy_allocator + ) + +nv_test(allocation_and_eigen_test SRCS allocation_and_eigen_test.cu DEPS allocator_facade) + +cc_test(retry_allocator_test SRCS retry_allocator_test.cc DEPS retry_allocator best_fit_allocator locked_allocator cpu_allocator) + +cc_test(allocator_facade_test SRCS allocator_facade_test.cc DEPS allocator_facade) diff --git a/paddle/fluid/memory/allocation/aligned_allocator.cc b/paddle/fluid/memory/allocation/aligned_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..efae280dbd47a1db476f9c371ba73eac96c30df6 --- /dev/null +++ b/paddle/fluid/memory/allocation/aligned_allocator.cc @@ -0,0 +1,31 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/aligned_allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +ThinAlignedAllocator::ThinAlignedAllocator( + std::shared_ptr underlyning_allocator) + : underlying_allocator_(std::move(underlyning_allocator)) {} + +bool ThinAlignedAllocator::IsAllocThreadSafe() const { + return underlying_allocator_->IsAllocThreadSafe(); +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/aligned_allocator.h b/paddle/fluid/memory/allocation/aligned_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..fc1a8e9247b16374037bfde44449fd552b44c6b4 --- /dev/null +++ b/paddle/fluid/memory/allocation/aligned_allocator.h @@ -0,0 +1,100 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// The aligned allocation and allocator will wrap a managed allocator, +// and returns the aligned pointer. +// +// NOTE(yy): For speed reason, I just use a template parameter to get +// alignment, however, it can be an private member if necessary. +// +// NOTE(yy): kAlignment must be 2^N. a `static_assert` should be added. +template +class AlignedAllocation : public Allocation { + static_assert(kAlignment > 0 && (kAlignment & (kAlignment - 1)) == 0, + "kAlignment must be 2^N"); + + public: + AlignedAllocation(AllocationPtr&& underlying_allocation, size_t size) + : Allocation(AlignedPtr(underlying_allocation->ptr()), + size + kAlignment - Offset(underlying_allocation->ptr()), + underlying_allocation->place()), + underlying_allocation_(std::move(underlying_allocation)) {} + + private: + static void* AlignedPtr(void* ptr) { + return reinterpret_cast(reinterpret_cast(ptr) + + Offset(ptr)); + } + + // Offset to aligned pointer. + // if ptr is already aligned, returns 0. + static size_t Offset(void* ptr) { + auto ptr_addr = reinterpret_cast(ptr); + intptr_t aligned_addr = (ptr_addr & ~(kAlignment - 1)); + intptr_t diff = aligned_addr - ptr_addr; + if (diff == 0) { + return 0; + } else { + return kAlignment + diff; + } + } + + AllocationPtr underlying_allocation_; +}; + +// Thin aligned allocator is trivial and used to generate a small size binary. +// +// NOTE(yy): This is a trick to make a template class. This class extract the +// common code into a `thin` class. So if there are multiple specification of +// the template class, the binary size will not extended too much. +// +// NOTE(yy): This could be an over design. If it harms readability of code, it +// could be removed later. +class ThinAlignedAllocator : public Allocator { + public: + explicit ThinAlignedAllocator( + std::shared_ptr underlyning_allocator); + + bool IsAllocThreadSafe() const; + + protected: + std::shared_ptr underlying_allocator_; +}; + +// An aligned allocator will allocate `size+kAlignment` allocation and adjust +// the pointer offset. +template +class AlignedAllocator : public ThinAlignedAllocator { + public: + using ThinAlignedAllocator::ThinAlignedAllocator; + + protected: + Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override { + auto raw_allocation = + underlying_allocator_->Allocate(size + kAlignment, attr); + return new AlignedAllocation(std::move(raw_allocation), size); + } +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/allocation_and_eigen_test.cu b/paddle/fluid/memory/allocation/allocation_and_eigen_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..b61649e59d326a64aa806460feffc3a910b1cab8 --- /dev/null +++ b/paddle/fluid/memory/allocation/allocation_and_eigen_test.cu @@ -0,0 +1,48 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/for_range.h" +#include "unsupported/Eigen/CXX11/Tensor" + +// NOTE(yy): this unittest is not important. It just used for debugging. +// It can be removed later. +struct FillZero { + public: + float* ptr_; + + __device__ void operator()(size_t i) { ptr_[i] = 0.0f; } +}; + +namespace paddle { +TEST(Eigen, main) { + framework::Tensor tensor; + platform::CUDAPlace gpu(0); + float* ptr = tensor.mutable_data({10, 10}, gpu); + auto& dev_ctx = *reinterpret_cast( + platform::DeviceContextPool::Instance().Get(gpu)); + PADDLE_ENFORCE(cudaMemset(ptr, 0, sizeof(float) * 100)); + + platform::ForRange for_range(dev_ctx, 100); + for_range(FillZero{ptr}); + dev_ctx.Wait(); + + auto eigen_vec = framework::EigenVector::Flatten(tensor); + auto& eigen_dev = *dev_ctx.eigen_device(); + eigen_vec.device(eigen_dev) = eigen_vec.constant(0.0f); +} +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/allocation_with_underlying.h b/paddle/fluid/memory/allocation/allocation_with_underlying.h new file mode 100644 index 0000000000000000000000000000000000000000..69f78667d7d33c59245a9890b9a2ce469f629450 --- /dev/null +++ b/paddle/fluid/memory/allocation/allocation_with_underlying.h @@ -0,0 +1,33 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +class AllocationWithUnderlying : public Allocation { + public: + explicit AllocationWithUnderlying(AllocationPtr allocation) + : Allocation(allocation->ptr(), allocation->size(), allocation->place()), + allocation_(std::move(allocation)) {} + AllocationPtr allocation_; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/allocator.cc b/paddle/fluid/memory/allocation/allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..8fb8a5fb897a736d7515951ba08c633da9a7706c --- /dev/null +++ b/paddle/fluid/memory/allocation/allocator.cc @@ -0,0 +1,45 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/allocator.h" + +#include + +namespace paddle { +namespace memory { +namespace allocation { +Allocation::~Allocation() {} + +Allocator::~Allocator() {} + +bool Allocator::IsAllocThreadSafe() const { return false; } + +AllocationPtr Allocator::Allocate(size_t size, Allocator::Attr attr) { + auto ptr = AllocateImpl(size, attr); + ptr->set_allocator(this); + return AllocationPtr(ptr); +} + +void Allocator::Free(Allocation* allocation) { delete allocation; } + +const char* BadAlloc::what() const noexcept { return msg_.c_str(); } + +void AllocationDeleter::operator()(Allocation* allocation) const { + auto* allocator = allocation->allocator(); + allocator->Free(allocation); +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/allocator.h b/paddle/fluid/memory/allocation/allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..f2b6f438c382275cab4ecf9aceea1c55e5885dee --- /dev/null +++ b/paddle/fluid/memory/allocation/allocator.h @@ -0,0 +1,145 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// Exception when `Alloc`/`AllocShared` failed +class BadAlloc : public std::exception { + public: + explicit BadAlloc(std::string msg) : msg_(std::move(msg)) {} + const char* what() const noexcept override; + + private: + std::string msg_; +}; + +class Allocation; +class AllocationDeleter { + public: + void operator()(Allocation* allocation) const; +}; + +class Allocator; +// Allocation is the object holding the actually pointer. Use +// `Allocation::ptr()` will returns the pointer that allocated. +// +// NOTE: this is the base class of Allocation. Each allocator can use its own +// allocation object. +// NOTE: the `Allocation::ptr()` could be nullptr, if the allocation size is 0 +class Allocation { + public: + Allocation(void* ptr, size_t size, platform::Place place) + : allocator_(nullptr), ptr_(ptr), size_(size), place_(place) {} + + Allocation(const Allocation& o) = delete; + Allocation& operator=(const Allocation& o) = delete; + + // Returns the holding pointer. + // NOTE: For performance consideration, it is better not to make this method + // as a virtual method. If we want to implement a `defragmentation` later, + // we might need to make `ptr_` field as a protected field, and add a virtual + // method like `defragmentation` to change `ptr_`. + void* ptr() const { return ptr_; } + + // Returns the size of this memory buffer, i.e., ptr() + size() - 1 is the + // last valid element. + // + // NOTE: Some allocator might alloc more memory than request. The size + // could larger than its request. For example, + // the AlignedAllocator will always allocate memory as size + kAlignment. + // The raw pointer might not aligned, so an offset might be added to raw + // the pointer. The size of this allocation will be + // `size + kAlignemnt - offset`. + size_t size() const { return size_; } + + const platform::Place& place() const { return place_; } + + Allocator* allocator() { return allocator_; } + + void set_allocator(Allocator* allocator) { allocator_ = allocator; } + + virtual ~Allocation(); + + private: + Allocator* allocator_; + void* ptr_; + size_t size_; + platform::Place place_; +}; + +using AllocationPtr = std::unique_ptr; + +// Base interface class of memory Allocator. +// To allocate a memory, allocator needs two parameters: +// 1. size of bytes. +// 2. Attribute of memory. +// NOTE: the attribute of memory might be ignored if the allocator does not +// care it. +class Allocator { + public: + enum Attr { + kDefault = 0, // Default attribute. Uses the fast or stablest allocation + // algorithm. + + kFixedHuge = 1, // The allocation may not be freed until the program + // ends. e.g., `Parameters` and `Momentum`. + + kFluxHuge = 2, // The allocation may create and freed frequently and the + // allocation is considerable huge. Like `activations` + // and gradients. + + kScratchpad = + 3, // The `Scratchpad` memory is allocated and freed very soon, + // usually within an operator or aux memory. + // Like CUDNN workspace, AUX memory in batch norm, etc. + // + // https://en.wikipedia.org/wiki/Scratchpad_memory + + kCrossDevice = + 4, // The memory used cross-device memory copy/communication. + // For example: + // 1. it can use an `pinned` memory for CPU-GPU + // communication. + // 2. it can use an `registered` memory for RDMA + // communication. + + NumOfAttrs = 5 // The number of all attributes. It is used internally. + }; + + virtual ~Allocator(); + + // Allocate an allocation. + AllocationPtr Allocate(size_t size, Allocator::Attr attr = kDefault); + + // True if the `Allocate` is thread safe. + virtual bool IsAllocThreadSafe() const; + + protected: + virtual void Free(Allocation* allocation); + virtual Allocation* AllocateImpl(size_t size, Allocator::Attr attr) = 0; + + private: + friend class AllocationDeleter; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc new file mode 100644 index 0000000000000000000000000000000000000000..e207a853c8f782698b19d7f71caacf92f8df8e41 --- /dev/null +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -0,0 +1,271 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/allocator.h" +#include +#include +#include +#include +#include +#include "paddle/fluid/memory/allocation/aligned_allocator.h" +#include "paddle/fluid/memory/allocation/allocator_facade.h" +#include "paddle/fluid/memory/allocation/allocator_strategy.h" +#include "paddle/fluid/memory/allocation/auto_increment_allocator.h" +#include "paddle/fluid/memory/allocation/best_fit_allocator.h" +#include "paddle/fluid/memory/allocation/conditional_allocator.h" +#include "paddle/fluid/memory/allocation/cpu_allocator.h" +#include "paddle/fluid/memory/allocation/legacy_allocator.h" +#include "paddle/fluid/memory/allocation/locked_allocator.h" +#include "paddle/fluid/memory/allocation/retry_allocator.h" +#include "paddle/fluid/memory/allocation/zero_size_allocator.h" +#include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/place.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/memory/allocation/cuda_allocator.h" +#include "paddle/fluid/memory/allocation/pinned_allocator.h" +#include "paddle/fluid/platform/cuda_device_guard.h" +#include "paddle/fluid/platform/gpu_info.h" +#endif + +DEFINE_int64( + gpu_allocator_retry_time, 0, + "The retry time (milliseconds) when allocator fails " + "to allocate memory. No retry if this value is not greater than 0"); + +namespace paddle { +namespace memory { +namespace allocation { + +// TODO(yy): Dirty code here. This class should be configurable in runtime. +class CPUManagedAllocator : public Allocator { + public: + CPUManagedAllocator() : normal_allocator_(new CPUAllocator()) {} + + bool IsAllocThreadSafe() const override { return true; } + + protected: + Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override { + return normal_allocator_->Allocate(size, attr).release(); + } + + private: + std::shared_ptr normal_allocator_; +}; + +// TODO(yy): Dirty code here. This class should be configurable in runtime. +class ChunkedAllocator : public Allocator { + public: + explicit ChunkedAllocator(std::unique_ptr system_allocator, + size_t max_chunk_size, size_t capacity = 1, + int64_t retry_time = -1) + : max_chunk_size_(max_chunk_size), retry_time_(retry_time) { + raw_allocator_ = std::move(system_allocator); + + if (max_chunk_size_ == 0) { + default_allocator_ = raw_allocator_; + } else { + if (capacity == 1) { + VLOG(10) << "Create BestFitAllocator with chunk_size " + << max_chunk_size_; + default_allocator_ = CreateAllocatorWithChunk(); + } else { + VLOG(10) << "Create AutoIncrementAllocator with chunk_size " + << max_chunk_size_ << " and capacity " << capacity; + default_allocator_ = std::make_shared( + [this] { return std::move(CreateAllocatorWithChunk()); }, capacity); + } + } + + auto* cond_allocator = new ConditionalAllocator(); + cond_allocator + ->AddAllocator( + [this](size_t size, Attr attr) { return size < max_chunk_size_; }, + default_allocator_) + .AddAllocator( + [](size_t size, Attr attr) { + return true; // default case + }, + raw_allocator_); + default_allocator_.reset(cond_allocator); + } + + ~ChunkedAllocator() override { + // Specify destruct order. + default_allocator_.reset(); + chunks_.clear(); + raw_allocator_.reset(); + } + + std::shared_ptr CreateAllocatorWithChunk() { + chunks_.emplace_back(raw_allocator_->Allocate(max_chunk_size_)); + auto* allocation = chunks_.back().get(); + std::unique_ptr allocator(new LockedAllocator( + std::unique_ptr(new BestFitAllocator(allocation)))); + + if (retry_time_ > 0) { + auto* retry_allocator = + new RetryAllocator(std::move(allocator), retry_time_); + allocator.reset(retry_allocator); + } + + return std::make_shared>(std::move(allocator)); + } + + bool IsAllocThreadSafe() const override { return true; } + + protected: + Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override { + return default_allocator_->Allocate(size, attr).release(); + } + + protected: + size_t max_chunk_size_; + int64_t retry_time_; + std::vector chunks_; + std::shared_ptr raw_allocator_; + std::shared_ptr default_allocator_; +}; + +#ifdef PADDLE_WITH_CUDA + +class CUDAChunkedAllocator : public ChunkedAllocator { + public: + explicit CUDAChunkedAllocator(int dev_id) + : ChunkedAllocator(std::unique_ptr( + new CUDAAllocator(platform::CUDAPlace(dev_id))), + GetMaxChunkSize(dev_id), GetCapcity(dev_id), + GetRetryTime()) {} + + private: + static size_t GetMaxChunkSize(int dev_id) { + platform::CUDADeviceGuard guard(dev_id); + return platform::GpuMaxChunkSize(); + } + + static size_t GetCapcity(int dev_id) { + platform::CUDADeviceGuard guard(dev_id); + size_t available, total; + platform::GpuMemoryUsage(&available, &total); + size_t max_chunk_size = platform::GpuMaxChunkSize(); + return max_chunk_size == 0 ? 0 : available / max_chunk_size; + } + + static int64_t GetRetryTime() { return FLAGS_gpu_allocator_retry_time; } +}; + +class CUDAPinnedChunkedAllocator : public ChunkedAllocator { + public: + CUDAPinnedChunkedAllocator() + : ChunkedAllocator(std::unique_ptr(new CPUPinnedAllocator()), + platform::CUDAPinnedMaxChunkSize(), GetCapacity(), + -1) {} // never retry + + private: + static size_t GetCapacity() { + size_t total = platform::CpuTotalPhysicalMemory(); + size_t max_chunk_size = platform::CUDAPinnedMaxChunkSize(); + return max_chunk_size == 0 ? 0 : total / max_chunk_size; + } +}; + +#endif + +class AllocatorFacadePrivate { + public: + std::map> allocators_; + + ~AllocatorFacadePrivate() = default; + + AllocatorFacadePrivate() { + if (GetAllocatorStrategy() == AllocatorStrategy::kLegacy) { + InitLegacyAllocator(); + } else { + InitCPUAllocator(); + InitCUDAAllocator(); + InitCUDAPinnedAllocator(); + WrapZeroSizeAllocator(); + } + } + + private: + void InitLegacyAllocator() { + std::vector places{platform::CPUPlace()}; +#ifdef PADDLE_WITH_CUDA + for (int dev_id = 0; dev_id < platform::GetCUDADeviceCount(); ++dev_id) { + places.emplace_back(platform::CUDAPlace(dev_id)); + } + places.emplace_back(platform::CUDAPinnedPlace()); +#endif + for (auto& p : places) { + allocators_[p] = std::make_shared(p); + } + } + + void InitCPUAllocator() { + allocators_[platform::CPUPlace()] = std::make_shared(); + } + + void InitCUDAAllocator() { +#ifdef PADDLE_WITH_CUDA + int device_count = platform::GetCUDADeviceCount(); + for (int dev_id = 0; dev_id < device_count; ++dev_id) { + allocators_[platform::CUDAPlace(dev_id)] = + std::make_shared(dev_id); + } +#endif + } + + void InitCUDAPinnedAllocator() { +#ifdef PADDLE_WITH_CUDA + allocators_[platform::CUDAPinnedPlace()] = + std::make_shared(); +#endif + } + + void WrapZeroSizeAllocator() { + for (auto& pair : allocators_) { + pair.second = + std::make_shared(pair.second, pair.first); + } + } +}; + +// Pimpl. Make interface clean. +AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {} +AllocatorFacade::~AllocatorFacade() { delete m_; } + +AllocatorFacade& AllocatorFacade::Instance() { + static AllocatorFacade instance; + return instance; +} + +std::shared_ptr AllocatorFacade::AllocShared( + const platform::Place& place, size_t size, Allocator::Attr attr) { + return std::shared_ptr(Alloc(place, size, attr).release(), + AllocationDeleter()); +} + +AllocationPtr AllocatorFacade::Alloc(const platform::Place& place, size_t size, + Allocator::Attr attr) { + auto it = m_->allocators_.find(place); + if (it == m_->allocators_.end()) { + throw BadAlloc( + string::Sprintf("No such allocator for the place, %s", place)); + } + return m_->allocators_.at(place)->Allocate(size, attr); +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/allocator_facade.h b/paddle/fluid/memory/allocation/allocator_facade.h new file mode 100644 index 0000000000000000000000000000000000000000..16da30bec0d9f524bd076fe76d15c2fcfa7edd3a --- /dev/null +++ b/paddle/fluid/memory/allocation/allocator_facade.h @@ -0,0 +1,57 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// Allocator Facade is the interface exposed to other modules. +// All the configuration or dirty code under development should +// be hidden behind this facade. +// +// NOTE(yy): This class is a singleton class. +// NOTE(yy): To create a stable ABI and make compilation faster. Here we use +// a Pimpl trick; +class AllocatorFacadePrivate; +class AllocatorFacade { + public: + ~AllocatorFacade(); + AllocatorFacade(const AllocatorFacade& o) = delete; + const AllocatorFacade& operator=(const AllocatorFacade& o) = delete; + + static AllocatorFacade& Instance(); + + // Allocate a shared allocation. + std::shared_ptr AllocShared( + const platform::Place& place, size_t size, + Allocator::Attr attr = Allocator::kDefault); + + // Allocate a unique allocation. + AllocationPtr Alloc(const platform::Place& place, size_t size, + Allocator::Attr attr = Allocator::kDefault); + + // TODO(yy): Allocate a Copy-On-Write allocation? + private: + AllocatorFacade(); + AllocatorFacadePrivate* m_; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/allocator_facade_test.cc b/paddle/fluid/memory/allocation/allocator_facade_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..802d79e15de253d4e67e35046bdf1d689258da6d --- /dev/null +++ b/paddle/fluid/memory/allocation/allocator_facade_test.cc @@ -0,0 +1,87 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/allocator_facade.h" +#include +#include + +#ifdef PADDLE_WITH_CUDA +DECLARE_double(fraction_of_gpu_memory_to_use); +DECLARE_double(fraction_of_cuda_pinned_memory_to_use); +DECLARE_int64(gpu_allocator_retry_time); +#endif + +namespace paddle { +namespace memory { +namespace allocation { + +TEST(allocator, allocator) { +#ifdef PADDLE_WITH_CUDA + FLAGS_fraction_of_gpu_memory_to_use = 0.01; + FLAGS_gpu_allocator_retry_time = 500; + FLAGS_fraction_of_cuda_pinned_memory_to_use = 0.5; +#endif + + auto &instance = AllocatorFacade::Instance(); + platform::Place place; + size_t size = 1024; + + { + place = platform::CPUPlace(); + size = 1024; + auto cpu_allocation = instance.Alloc(place, size); + ASSERT_NE(cpu_allocation, nullptr); + ASSERT_NE(cpu_allocation->ptr(), nullptr); + ASSERT_EQ(cpu_allocation->place(), place); + ASSERT_EQ(cpu_allocation->size(), size); + } + +#ifdef PADDLE_WITH_CUDA + { + place = platform::CUDAPlace(0); + size = 1024; + auto gpu_allocation = instance.Alloc(place, size); + ASSERT_NE(gpu_allocation, nullptr); + ASSERT_NE(gpu_allocation->ptr(), nullptr); + ASSERT_EQ(gpu_allocation->place(), place); + ASSERT_GE(gpu_allocation->size(), size); + } + + { + // Allocate 2GB gpu memory + place = platform::CUDAPlace(0); + size = 2 * static_cast(1 << 30); + auto gpu_allocation = instance.Alloc(place, size); + ASSERT_NE(gpu_allocation, nullptr); + ASSERT_NE(gpu_allocation->ptr(), nullptr); + ASSERT_EQ(gpu_allocation->place(), place); + ASSERT_GE(gpu_allocation->size(), size); + } + + { + place = platform::CUDAPinnedPlace(); + size = (1 << 20); + auto cuda_pinned_allocation = + instance.Alloc(platform::CUDAPinnedPlace(), 1 << 20); + ASSERT_NE(cuda_pinned_allocation, nullptr); + ASSERT_NE(cuda_pinned_allocation->ptr(), nullptr); + ASSERT_EQ(cuda_pinned_allocation->place(), place); + ASSERT_GE(cuda_pinned_allocation->size(), size); + } +#endif +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/allocator_strategy.cc b/paddle/fluid/memory/allocation/allocator_strategy.cc new file mode 100644 index 0000000000000000000000000000000000000000..b46b1e9ae206b82f5810b4ba7345ebc60fb84285 --- /dev/null +++ b/paddle/fluid/memory/allocation/allocator_strategy.cc @@ -0,0 +1,41 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/allocator_strategy.h" +#include "gflags/gflags.h" + +DEFINE_string( + allocator_strategy, "legacy", + "The allocation strategy. Legacy means the original allocator of Fluid." + "New means the experimental allocators of Fluid. in [legacy, new]"); + +namespace paddle { +namespace memory { +namespace allocation { + +static AllocatorStrategy GetStrategyFromFlag() { + return FLAGS_allocator_strategy == "legacy" + ? AllocatorStrategy::kLegacy + : AllocatorStrategy::kNaiveBestFit; +} + +AllocatorStrategy GetAllocatorStrategy() { + static AllocatorStrategy strategy = GetStrategyFromFlag(); + return strategy; +} + +void UseAllocatorStrategyGFlag() {} +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/allocator_strategy.h b/paddle/fluid/memory/allocation/allocator_strategy.h new file mode 100644 index 0000000000000000000000000000000000000000..9adbd879939c562cf84579a92f21d3b82e69a7e5 --- /dev/null +++ b/paddle/fluid/memory/allocation/allocator_strategy.h @@ -0,0 +1,30 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +namespace paddle { +namespace memory { +namespace allocation { + +enum class AllocatorStrategy { kLegacy, kNaiveBestFit }; + +extern AllocatorStrategy GetAllocatorStrategy(); + +// Do nothing, just make sure linker do not prune this file. +extern void UseAllocatorStrategyGFlag(); + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/auto_increment_allocator.cc b/paddle/fluid/memory/allocation/auto_increment_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..c4785d2078601d7f9c5eeb7b902c7d1020340214 --- /dev/null +++ b/paddle/fluid/memory/allocation/auto_increment_allocator.cc @@ -0,0 +1,78 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/auto_increment_allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { +bool AutoIncrementAllocator::IsAllocThreadSafe() const { return true; } + +std::shared_ptr AutoIncrementAllocator::CreateNewAllocator() { + std::lock_guard guard(mtx_); + auto old_size = allocator_num_.load(); + PADDLE_ENFORCE_LT(old_size, underlying_allocators_.size(), + "Allocator number exceeds capacity %d", + underlying_allocators_.size()); + underlying_allocators_[old_size] = creator_(); + prev_success_allocator_ = old_size; + ++allocator_num_; + PADDLE_ENFORCE( + underlying_allocators_[old_size]->IsAllocThreadSafe(), + "the underlying allocator must be thread safe. This is a program " + "bug."); + return underlying_allocators_[old_size]; +} +Allocation *AutoIncrementAllocator::AllocateImpl(size_t size, + Allocator::Attr attr) { + auto cur = prev_success_allocator_.load(); + size_t retry_count = allocator_num_.load(); + size_t allocator_num = retry_count; + while (retry_count-- > 0) { // until there retry count is zero + try { + auto res = underlying_allocators_[cur]->Allocate(size, attr); + prev_success_allocator_ = cur; + return res.release(); + } catch (BadAlloc &) { + if (++cur >= allocator_num) { + cur = 0; + } + } catch (...) { + // if there is another type of allocation, just rethrow it. + throw; + } + } + + // This happens when the first allocator is exhausted and + // there are more than 1 allocation requests + // In this situation, the first allocation request would success + // and the second allocation request would fail if we do not use + // the newly created allocator by the first allocation request. + for (cur = allocator_num; cur < allocator_num_; ++cur) { + try { + auto ret = underlying_allocators_[cur]->Allocate(size, attr); + prev_success_allocator_ = cur; + return ret.release(); + } catch (BadAlloc &) { + } catch (...) { + throw; + } + } + // No suitable allocator + return CreateNewAllocator()->Allocate(size, attr).release(); +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/auto_increment_allocator.h b/paddle/fluid/memory/allocation/auto_increment_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..382588f17a9748b1b0a356c0469c683f6c904778 --- /dev/null +++ b/paddle/fluid/memory/allocation/auto_increment_allocator.h @@ -0,0 +1,79 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include // NOLINT +#include +#include +#include // NOLINT +#include // NOLINT +#include +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// The AutoIncrementAllocator manages many underlying allocators. If none of +// them can allocate the request memory, a new allocator will be created and +// invoke its `allocate` method. +// +// NOTE(yy): The AutoIncrementAllocator will prefer to allocate memory from +// the latest successful allocator. +// +// NOTE(yy): We may need to release an underlying allocator if it allocate +// nothing. However, it is generally not useful, since it will make performance +// undetermined. +// +// NOTE(yy): This allocator is only locked when creating new underlying +// allocator. The allocation requests from many threads may be dispatched +// to the same underlying allocator. So the underlying allocator must be +// thread safe. +// +// NOTE(zjl): Add capacity parameters to constructor. A high-performance +// thread-safe std::vector with varying size is hard to implement. +// Fortunately, we can get the total GPU memory and each chunk size. +// Therefore, we can get the suitable capacity of AutoIncrementAllocator. +class AutoIncrementAllocator : public Allocator { + public: + // Creator is the method to create ManagedAllocator + using AllocatorCreator = std::function()>; + + explicit AutoIncrementAllocator(AllocatorCreator&& creator, size_t capacity) + : creator_(std::move(creator)), underlying_allocators_(capacity) {} + + bool IsAllocThreadSafe() const override; + + private: + std::shared_ptr CreateNewAllocator(); + + protected: + Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override; + + private: + AllocatorCreator creator_; + + std::vector underlying_allocators_; + std::atomic allocator_num_{0}; + + // Use std::atomic rather than std::mutex, since std::atomic is usually + // lock-free + std::atomic prev_success_allocator_{0}; + + std::mutex mtx_; +}; +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/best_fit_allocator.cc b/paddle/fluid/memory/allocation/best_fit_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..6f3e512fb0b68df5e86eba3e50a255c18f75214f --- /dev/null +++ b/paddle/fluid/memory/allocation/best_fit_allocator.cc @@ -0,0 +1,168 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/best_fit_allocator.h" +#include +#include +#include +#include + +namespace paddle { +namespace memory { +namespace allocation { + +static int HighestBitPos(size_t N) { + if (UNLIKELY(N == 0)) { + return 0; + } else { +#ifdef __GNUCC__ + return sizeof(unsigned int) * 8 - __builtin_clz(N); +#else + return static_cast(std::log2(N) + 1); +#endif + } +} + +BestFitAllocator::BestFitAllocator(Allocation* allocation) + : allocation_(allocation) { + details::Chunk chunk; + chunk.size_ = allocation_->size(); + chunk.offset_ = 0; + chunk.is_free = true; + chunks_.emplace_back(chunk); + free_chunks_[HighestBitPos(chunk.size_)].insert( + {chunk.size_, chunks_.begin()}); +} + +size_t BestFitAllocator::FreeSize() const { + size_t acc = 0; + for (auto& array_item : free_chunks_) { + for (auto& pair : array_item) { + acc += pair.second->size_; + } + } + return acc; +} + +BestFitAllocator::ListIt BestFitAllocator::SplitChunk(size_t request_size, + size_t free_chunk_offset, + MapIt bin_iterator) { + auto to_split_it = bin_iterator->second; + free_chunks_[free_chunk_offset].erase(bin_iterator); + + PADDLE_ENFORCE(to_split_it->is_free); + PADDLE_ENFORCE_GE(to_split_it->size_, request_size); + + auto remaining_size = to_split_it->size_ - request_size; + details::Chunk to_use; + details::Chunk remaining; + to_use.size_ = request_size; + to_use.is_free = false; + remaining.size_ = remaining_size; + remaining.is_free = true; + + // calc offsets + to_use.offset_ = to_split_it->offset_; + remaining.offset_ = to_use.offset_ + to_use.size_; + + // insert to chunk list + auto to_use_it = chunks_.insert(to_split_it, to_use); + if (remaining.size_ != 0) { + auto bit_size = static_cast(HighestBitPos(remaining.size_)); + free_chunks_[bit_size].insert( + {remaining.size_, chunks_.insert(to_split_it, remaining)}); + } + chunks_.erase(to_split_it); + return to_use_it; +} + +void BestFitAllocator::InsertFreeNode(const ListIt& it) { + auto pos = static_cast(HighestBitPos(it->size_)); + auto& free_map = free_chunks_[pos]; + free_map.insert({it->size_, it}); +} +void BestFitAllocator::EraseFreeNode(const ListIt& it) { + size_t pos = static_cast(HighestBitPos(it->size_)); + auto& free_map = free_chunks_[pos]; + auto map_it = free_map.find(it->size_); + while (map_it->second != it && map_it != free_map.end()) { + ++map_it; + } + PADDLE_ENFORCE(map_it != free_map.end()); + free_map.erase(map_it); +} +size_t BestFitAllocator::NumFreeChunks() const { + size_t num = 0; + for (auto& array_item : free_chunks_) { + num += array_item.size(); + } + return num; +} +void BestFitAllocator::Free(Allocation* allocation) { + auto* bf_allocation = dynamic_cast(allocation); + auto chunk_it = bf_allocation->ChunkIterator(); + PADDLE_ENFORCE(!chunk_it->is_free); + chunk_it->is_free = true; + if (chunk_it != chunks_.begin()) { + auto prev_it = chunk_it; + --prev_it; + + if (prev_it->is_free) { + // Merge Left. + EraseFreeNode(prev_it); + prev_it->size_ += chunk_it->size_; + chunks_.erase(chunk_it); + chunk_it = prev_it; + } + } + + auto next_it = chunk_it; + ++next_it; + if (next_it != chunks_.end() && next_it->is_free) { + EraseFreeNode(next_it); + chunk_it->size_ += next_it->size_; + chunks_.erase(next_it); + } + + InsertFreeNode(chunk_it); + delete allocation; +} +Allocation* BestFitAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { + auto highest_set_bit = static_cast(HighestBitPos(size)); + MapIt map_it; + for (; highest_set_bit < free_chunks_.size(); ++highest_set_bit) { + map_it = free_chunks_[highest_set_bit].lower_bound(size); + if (map_it != free_chunks_[highest_set_bit].end()) { + break; + } + } + if (UNLIKELY(highest_set_bit == free_chunks_.size())) { + throw BadAlloc(string::Sprintf( + "Cannot allocate %d, All fragments size is %d", size, FreeSize())); + } + auto chunk_it = SplitChunk(size, highest_set_bit, map_it); + return new BestFitAllocation(this, chunk_it); +} + +BestFitAllocation::BestFitAllocation( + paddle::memory::allocation::BestFitAllocator* allocator, + typename details::ChunkList::iterator chunk_it) + : Allocation(reinterpret_cast( + reinterpret_cast(allocator->BasePtr()) + + chunk_it->offset_), + chunk_it->size_, allocator->Place()), + chunk_it_(chunk_it) {} +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/best_fit_allocator.h b/paddle/fluid/memory/allocation/best_fit_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..4f10f2b53e8543d4197097f1cae8de765bceeb0f --- /dev/null +++ b/paddle/fluid/memory/allocation/best_fit_allocator.h @@ -0,0 +1,132 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { +namespace details { +struct Chunk { + bool is_free{true}; + // Offset to the base allocation. + uintptr_t offset_; + size_t size_; +}; + +// Here we use std::list to maintain chunk list. +// NOTE(yy): The traditional implementation of ChunkList is add `prev`/`next` +// pointers in `Chunk`, and split the allocation as `ChunkHeader` and +// `Payload`. Such as +// *-------*---------------*---------------*--------------* +// | Chunk | prev_ pointer | next_ pointer | payload .... | +// *-------*---------------*---------------*--------------* +// This implementation can just return a raw pointer, and we can get the list +// structure by the raw pointer. However, we cannot use the same code on GPU +// since CPU cannot access GPU memory directly. +// +// So we choose to use `std::list` and return an allocation instance, which +// contains the list node iterator, then we can unify CPU/GPU code. +// +// To return an allocation is not a bad idea, since Tensor/Vector should holds +// an allocation instead of raw pointer directly. +using ChunkList = std::list; + +// Here we use a multi-level map of free chunks. +// the map is +// MSB offset --> size --> [ChunkList::iterator] +// +// The time complexities: +// find a free chunk: +// O(logN), +// where N is the number of free nodes with the same MSB offset. +// find the position of a chunk iterator: +// O(logN + K), +// where N is the number of free nodes with the same MSB offset. +// where K is the number of free nodes with the same size. +// insert a free chunk: +// O(logN), +// where N is the number of free nodes with the same MSB offset. +// erase a free chunk: +// O(1) +using FreeChunkBin = + std::array, sizeof(size_t) * 8>; +} // namespace details + +class BestFitAllocator; + +// The BestFitAllocation maintain the List Node iterator. +class BestFitAllocation : public Allocation { + private: + using ListIt = typename details::ChunkList::iterator; + + public: + BestFitAllocation(BestFitAllocator* allocator, ListIt chunk_it); + + const ListIt& ChunkIterator() const { return chunk_it_; } + + private: + typename details::ChunkList::iterator chunk_it_; +}; + +// TODO(yy): Current BestFitAllocator is not thread-safe. To make it thread +// safe, we must wrap a locked_allocator. However, we can implement a thread +// safe allocator by locking each bin and chunks list independently. It will +// make BestFitAllocator faster in multi-thread situation. +// +// This allocator implements a best-fit allocator with merging the free nodes. +// +// To allocate a buffer, it will find the best-fit chunk. If the best-fit chunk +// is larger than request size, the original block will be split into two +// chunks. The first block will be used and the second block will be put into +// free chunks. +// +// To free an allocation, it will set the chunk of allocation to free and merge +// the prev-chunk and the next-chunk when possible. +class BestFitAllocator : public Allocator { + public: + explicit BestFitAllocator(Allocation* allocation); + + void* BasePtr() const { return allocation_->ptr(); } + + const platform::Place& Place() const { return allocation_->place(); } + + size_t NumFreeChunks() const; + + private: + size_t FreeSize() const; + using MapIt = typename details::FreeChunkBin::value_type::iterator; + using ListIt = typename details::ChunkList::iterator; + + ListIt SplitChunk(size_t request_size, size_t free_chunk_offset, + MapIt bin_iterator); + void EraseFreeNode(const ListIt& it); + void InsertFreeNode(const ListIt& it); + + protected: + void Free(Allocation* allocation) override; + Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override; + + private: + Allocation* allocation_; // not owned + details::ChunkList chunks_; + details::FreeChunkBin free_chunks_; +}; +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/best_fit_allocator_test.cc b/paddle/fluid/memory/allocation/best_fit_allocator_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..4122b3d709e095c08b4fb2667103649a03eee64f --- /dev/null +++ b/paddle/fluid/memory/allocation/best_fit_allocator_test.cc @@ -0,0 +1,137 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/best_fit_allocator.h" +#include // NOLINT +#include +#include "gtest/gtest.h" +#include "paddle/fluid/memory/allocation/cpu_allocator.h" +#include "paddle/fluid/memory/allocation/locked_allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +class StubAllocation : public Allocation { + public: + explicit StubAllocation(size_t size) + : Allocation(0, size, platform::CPUPlace()) {} +}; + +TEST(BestFitAllocator, test_allocation) { + StubAllocation stub(4UL * 1024 * 1024 * 1024); + BestFitAllocator allocator(&stub); + { auto allocation = allocator.Allocate(64, allocator.kDefault); } + + { + auto allocation = allocator.Allocate(80, allocator.kDefault); + + { + auto best_fit_allocation = + dynamic_cast(allocation.get()); + ASSERT_NE(best_fit_allocation, nullptr); + ASSERT_FALSE(best_fit_allocation->ChunkIterator()->is_free); + ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 0); + ASSERT_EQ(allocation->size(), 80); + ASSERT_EQ(allocation->ptr(), nullptr); + } + + auto allocation2 = allocator.Allocate(60, allocator.kDefault); + auto allocation3 = allocator.Allocate(90, allocator.kDefault); + allocation2.reset(); + allocation2 = allocator.Allocate(30, allocator.kDefault); + + { + auto best_fit_allocation = + dynamic_cast(allocation2.get()); + ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80); + } + allocation2.reset(); + allocation2 = allocator.Allocate(60, allocator.kDefault); + + { + auto best_fit_allocation = + dynamic_cast(allocation2.get()); + ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80); + } + + allocation.reset(); + allocation2.reset(); + + allocation = allocator.Allocate(80 + 60, allocator.kDefault); + { + auto best_fit_allocation = + dynamic_cast(allocation.get()); + ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 0); + } + + allocation.reset(); + + allocation = allocator.Allocate(80, allocator.kDefault); + allocation2 = allocator.Allocate(60, allocator.kDefault); + allocation = nullptr; + allocation2 = nullptr; + allocation3 = nullptr; + + ASSERT_EQ(allocator.NumFreeChunks(), 1U); + } +} + +TEST(BestFitAllocator, test_concurrent_cpu_allocation) { + CPUAllocator allocator; + auto global_allocation = + allocator.Allocate(256UL * 1024 * 1024, allocator.kDefault); + + std::unique_ptr best_fit_allocator( + new BestFitAllocator(global_allocation.get())); + + LockedAllocator locked_allocator(std::move(best_fit_allocator)); + + auto th_main = [&] { + std::random_device dev; + std::default_random_engine engine(dev()); + std::uniform_int_distribution dist(1U, 1024U); + + for (size_t i = 0; i < 128; ++i) { + size_t allocate_size = dist(engine); + + auto allocation = locked_allocator.Allocate( + sizeof(size_t) * allocate_size, locked_allocator.kDefault); + + size_t* data = reinterpret_cast(allocation->ptr()); + + for (size_t j = 0; j < allocate_size; ++j) { + data[j] = j; + } + std::this_thread::yield(); + + for (size_t j = 0; j < allocate_size; ++j) { + ASSERT_EQ(data[j], j); + } + } + }; + { + std::vector threads; + for (size_t i = 0; i < 1024; ++i) { + threads.emplace_back(th_main); + } + for (auto& th : threads) { + th.join(); + } + } +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/best_fit_allocator_test.cu b/paddle/fluid/memory/allocation/best_fit_allocator_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..50aecda97a9abb64f81c6e0e1d268e57a3aad3f0 --- /dev/null +++ b/paddle/fluid/memory/allocation/best_fit_allocator_test.cu @@ -0,0 +1,87 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include // NOLINT +#include +#include "gtest/gtest.h" +#include "paddle/fluid/memory/allocation/best_fit_allocator.h" +#include "paddle/fluid/memory/allocation/cuda_allocator.h" +#include "paddle/fluid/memory/allocation/locked_allocator.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/platform/for_range.h" +namespace paddle { +namespace memory { +namespace allocation { + +struct ForEachFill { + size_t* ptr_; + + explicit ForEachFill(size_t* ptr) : ptr_(ptr) {} + + __device__ void operator()(size_t i) { ptr_[i] = i; } +}; + +TEST(BestFitAllocator, concurrent_cuda) { + CUDAAllocator allocator(platform::CUDAPlace(0)); + // 256 MB + auto cuda_allocation = + allocator.Allocate(256U * 1024 * 1024, allocator.kDefault); + LockedAllocator concurrent_allocator( + std::unique_ptr(new BestFitAllocator(cuda_allocation.get()))); + + auto th_main = [&] { + std::random_device dev; + std::default_random_engine engine(dev()); + std::uniform_int_distribution dist(1U, 1024U); + platform::CUDAPlace gpu(0); + platform::CUDADeviceContext dev_ctx(gpu); + std::array buf; + for (size_t i = 0; i < 128; ++i) { + size_t allocate_size = dist(engine); + + auto allocation = concurrent_allocator.Allocate( + sizeof(size_t) * allocate_size, concurrent_allocator.kDefault); + + size_t* data = reinterpret_cast(allocation->ptr()); + + ForEachFill fill(data); + platform::ForRange for_range(dev_ctx, + allocate_size); + for_range(fill); + + memory::Copy(platform::CPUPlace(), buf.data(), gpu, data, + sizeof(size_t) * allocate_size, dev_ctx.stream()); + + dev_ctx.Wait(); + for (size_t j = 0; j < allocate_size; ++j) { + ASSERT_EQ(buf[j], j); + } + allocation = nullptr; + } + }; + + { + std::vector threads; + for (size_t i = 0; i < 1024; ++i) { + threads.emplace_back(th_main); + } + for (auto& th : threads) { + th.join(); + } + } +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/buffered_allocator.cc b/paddle/fluid/memory/allocation/buffered_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..fc75abc9dfee6c9df5bc87faa493002cc1fe6298 --- /dev/null +++ b/paddle/fluid/memory/allocation/buffered_allocator.cc @@ -0,0 +1,80 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/buffered_allocator.h" +#include +#include +#include +#include "paddle/fluid/memory/allocation/allocation_with_underlying.h" + +namespace paddle { +namespace memory { +namespace allocation { + +BufferedAllocator::BufferedAllocator(std::unique_ptr &&allocator) + : underlying_allocator_(std::move(allocator)) { + PADDLE_ENFORCE_NOT_NULL( + underlying_allocator_, + "Underlying allocator of BufferedAllocator must be unmanaged"); + if (underlying_allocator_->IsAllocThreadSafe()) { + mtx_.reset(new std::mutex()); + } +} + +BufferedAllocator::~BufferedAllocator() { FreeCache(-1UL); } + +void BufferedAllocator::FreeCache(size_t size) { + platform::LockGuardPtr guard(mtx_); + if (UNLIKELY(size == 0)) return; + size_t cur = 0; + while (!allocations_.empty()) { // free the largest + auto it = --allocations_.end(); + cur += it->second->size(); + delete it->second.release(); + allocations_.erase(it); + if (cur >= size) return; + } +} + +bool BufferedAllocator::IsAllocThreadSafe() const { + return this->underlying_allocator_->IsAllocThreadSafe(); +} +void BufferedAllocator::Free(Allocation *allocation) { + platform::LockGuardPtr guard(mtx_); + allocations_.emplace(allocation->size(), AllocationPtr(allocation)); +} +Allocation *BufferedAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { + { + platform::LockGuardPtr guard(mtx_); + auto it = allocations_.lower_bound(size); + if (it != allocations_.end() && it->first < size * 2) { + AllocationPtr result(std::move(it->second)); + allocations_.erase(it); + return new AllocationWithUnderlying(std::move(result)); + } + } + + try { + return new AllocationWithUnderlying( + underlying_allocator_->Allocate(size, attr)); + } catch (BadAlloc &) { + FreeCache(size); + return new AllocationWithUnderlying( + underlying_allocator_->Allocate(size, attr)); + } +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/buffered_allocator.h b/paddle/fluid/memory/allocation/buffered_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..d44a3f85beba712b1e735ba14008689bce7d0d64 --- /dev/null +++ b/paddle/fluid/memory/allocation/buffered_allocator.h @@ -0,0 +1,58 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/platform/lock_guard_ptr.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// NOTE(zjl): BufferedAllocator maintains a memory pool to accelerate +// memory allocation and reuse memory. +// BufferedAllocator provides the same thread-safety level as +// underlying_allocator_ +class BufferedAllocator : public Allocator { + public: + explicit BufferedAllocator(std::unique_ptr &&allocator); + + ~BufferedAllocator(); + + bool IsAllocThreadSafe() const override; + + // only used in unittest + inline void ClearCache() { FreeCache(-1UL); } + + private: + void FreeCache(size_t size); + + protected: + void Free(Allocation *allocation) override; + Allocation *AllocateImpl(size_t size, Allocator::Attr attr) override; + + private: + std::unique_ptr underlying_allocator_; + std::multimap allocations_; + std::unique_ptr mtx_; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/buffered_allocator_test.cc b/paddle/fluid/memory/allocation/buffered_allocator_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..41ebb9dbeaf36eafe3dff4ae294b84427f660cbf --- /dev/null +++ b/paddle/fluid/memory/allocation/buffered_allocator_test.cc @@ -0,0 +1,144 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/buffered_allocator.h" +#include +#include "paddle/fluid/memory/allocation/best_fit_allocator.h" +#include "paddle/fluid/memory/allocation/cpu_allocator.h" +#include "paddle/fluid/memory/allocation/locked_allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +inline std::unique_ptr GetBufferedAllocator( + Allocation *allocation, bool thread_safe) { + std::unique_ptr allocator(new BestFitAllocator(allocation)); + if (thread_safe) { + allocator.reset(new LockedAllocator(std::move(allocator))); + } + + return std::unique_ptr( + new BufferedAllocator(std::move(allocator))); +} + +TEST(buffered_allocator, thread_safety) { + std::unique_ptr allocator(new CPUAllocator()); + auto chunk = allocator->Allocate(1 << 20, allocator->kDefault); + { + auto buf_allocator = GetBufferedAllocator(chunk.get(), true); + ASSERT_EQ(buf_allocator->IsAllocThreadSafe(), true); + } + + { + auto buf_allocator = GetBufferedAllocator(chunk.get(), false); + ASSERT_EQ(buf_allocator->IsAllocThreadSafe(), false); + } +} + +class StubAllocation : public Allocation { + public: + using Allocation::Allocation; +}; + +class StubAllocator : public Allocator { + public: + void ResetCounter() { + construct_count_ = 0; + destruct_count_ = 0; + } + + size_t GetAllocCount() const { return construct_count_; } + + size_t GetFreeCount() const { return destruct_count_; } + + protected: + void Free(Allocation *allocation) override { + auto *alloc = dynamic_cast(allocation); + PADDLE_ENFORCE_NOT_NULL(alloc); + if (alloc->ptr()) delete[] static_cast(alloc->ptr()); + ++destruct_count_; + delete allocation; + } + Allocation *AllocateImpl(size_t size, Allocator::Attr attr) override { + ++construct_count_; + if (size == 0) { + return new StubAllocation(nullptr, 0, platform::CPUPlace()); + } else { + return new StubAllocation(new uint8_t[size], size, platform::CPUPlace()); + } + } + + private: + size_t construct_count_ = 0; + size_t destruct_count_ = 0; +}; + +constexpr size_t kZero = 0; +constexpr size_t kOne = 1; +constexpr size_t kTwo = 2; + +TEST(buffered_allocator, lazy_free) { + std::unique_ptr stub_allocator(new StubAllocator()); + auto *underlying_allocator = stub_allocator.get(); + std::unique_ptr allocator( + new BufferedAllocator(std::move(stub_allocator))); + + { + underlying_allocator->ResetCounter(); + auto x = allocator->Allocate(1025, allocator->kDefault); + ASSERT_EQ(underlying_allocator->GetAllocCount(), kOne); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + x = nullptr; + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + } + + { + underlying_allocator->ResetCounter(); + auto x = allocator->Allocate(900, allocator->kDefault); + ASSERT_EQ(underlying_allocator->GetAllocCount(), kZero); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + auto y = allocator->Allocate(2048, allocator->kDefault); + ASSERT_EQ(underlying_allocator->GetAllocCount(), kOne); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + x = nullptr; + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + y = nullptr; + ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); + } + + { + underlying_allocator->ResetCounter(); + allocator->ClearCache(); + ASSERT_EQ(underlying_allocator->GetAllocCount(), kZero); + ASSERT_EQ(underlying_allocator->GetFreeCount(), kTwo); + } +} + +TEST(buffered_allocator, garbage_collection) { + std::unique_ptr cpu_allocator(new CPUAllocator()); + auto chunk = cpu_allocator->Allocate(2048, cpu_allocator->kDefault); + auto allocator = GetBufferedAllocator(chunk.get(), false); + auto x1 = allocator->Allocate(1600, allocator->kDefault); + auto x2 = allocator->Allocate(400, allocator->kDefault); + x1 = nullptr; + x2 = nullptr; + auto x3 = allocator->Allocate(1600, allocator->kDefault); + ASSERT_NE(x3, nullptr); + ASSERT_NE(x3->ptr(), nullptr); +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/conditional_allocator.cc b/paddle/fluid/memory/allocation/conditional_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..96a818e03e507c6de720344288312dc2af2ae647 --- /dev/null +++ b/paddle/fluid/memory/allocation/conditional_allocator.cc @@ -0,0 +1,48 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/conditional_allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +ConditionalAllocator& ConditionalAllocator::AddAllocator( + std::function func, + std::shared_ptr allocator) { + underlying_allocators_.emplace_back(std::move(func), std::move(allocator)); + return *this; +} + +bool ConditionalAllocator::IsAllocThreadSafe() const { + return std::all_of(underlying_allocators_.begin(), + underlying_allocators_.end(), + [](const AllocatorWithCond& allocatorWithCond) { + return allocatorWithCond.second->IsAllocThreadSafe(); + }); +} + +Allocation* ConditionalAllocator::AllocateImpl(size_t size, + Allocator::Attr attr) { + for (auto& pair : underlying_allocators_) { + if (pair.first(size, attr)) { + return pair.second->Allocate(size, attr).release(); + } + } + throw BadAlloc("No suitable allocator"); +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/conditional_allocator.h b/paddle/fluid/memory/allocation/conditional_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..94cba4432ed4f72c0a75da9b31d48611a8404ad3 --- /dev/null +++ b/paddle/fluid/memory/allocation/conditional_allocator.h @@ -0,0 +1,61 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// A composite allocator who will dispatch the allocation request by registered +// condition. +// +// For example: +// +// auto* cond_allocator = new ConditionalAllocator(); +// cond_allocator->AddAllocator([](size_t size, Attr attr){ +// // if size > 10 +// return size > 10; +// }, allocator_a).AddAllocator([](size_t size, Attr attr){ +// // elif attr is kDefault +// return attr == kDefault; +// }, allocator_b).AddAllocator([](size_t size, Attr attr){ +// // else +// return true; +// }, allocator_c); +class ConditionalAllocator : public Allocator { + public: + ConditionalAllocator() = default; + + ConditionalAllocator& AddAllocator(std::function func, + std::shared_ptr allocator); + + bool IsAllocThreadSafe() const override; + + protected: + Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override; + + private: + using AllocatorWithCond = + std::pair, std::shared_ptr>; + std::vector underlying_allocators_; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/cpu_allocator.cc b/paddle/fluid/memory/allocation/cpu_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..cc81a6f7b8b1950b07b6fb1571b53d9b5ddb1b9f --- /dev/null +++ b/paddle/fluid/memory/allocation/cpu_allocator.cc @@ -0,0 +1,45 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/cpu_allocator.h" +#include +#include + +namespace paddle { +namespace memory { +namespace allocation { + +CPUAllocation::CPUAllocation(void *ptr, size_t size) + : Allocation(ptr, size, platform::CPUPlace()) {} + +bool CPUAllocator::IsAllocThreadSafe() const { return true; } + +void CPUAllocator::Free(Allocation *allocation) { + PADDLE_ENFORCE_NOT_NULL(dynamic_cast(allocation)); + free(allocation->ptr()); + delete allocation; +} + +Allocation *CPUAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { + void *ptr; + auto status = posix_memalign(&ptr, kAlignment, size); + if (UNLIKELY(status) != 0) { + throw BadAlloc(string::Sprintf("Cannot allocate cpu memory %d. Errno is %d", + size, status)); + } + return new CPUAllocation(ptr, size); +} +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/cpu_allocator.h b/paddle/fluid/memory/allocation/cpu_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..9e0044c47ae4ebde9c828e14d3d0e6c0cb1dc8dc --- /dev/null +++ b/paddle/fluid/memory/allocation/cpu_allocator.h @@ -0,0 +1,45 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { +// CPU system allocator and allocation. +// +// NOTE(yy): Should we just use `malloc` here since there is an +// aligned_allocator. +// +// NOTE(yy): It is no need to use `BestFitAllocator` in CPU. We can import +// an open-sourced allocator into Paddle. +class CPUAllocator; +class CPUAllocation : public Allocation { + public: + CPUAllocation(void* ptr, size_t size); +}; + +class CPUAllocator : public Allocator { + public: + constexpr static size_t kAlignment = 64u; + bool IsAllocThreadSafe() const override; + + protected: + void Free(Allocation* allocation) override; + Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override; +}; +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/cuda_allocator.cc b/paddle/fluid/memory/allocation/cuda_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..430bf0be98e08787ac4412a8b6e0fcc310ffe2b4 --- /dev/null +++ b/paddle/fluid/memory/allocation/cuda_allocator.cc @@ -0,0 +1,48 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/cuda_allocator.h" +#include +#include +#include +#include "paddle/fluid/platform/cuda_device_guard.h" +#include "paddle/fluid/platform/gpu_info.h" + +namespace paddle { +namespace memory { +namespace allocation { +bool CUDAAllocator::IsAllocThreadSafe() const { return true; } +void CUDAAllocator::Free(Allocation* allocation) { + platform::CUDADeviceGuard guard(place_.device); + auto* cuda_allocation = dynamic_cast(allocation); + PADDLE_ENFORCE_NOT_NULL(cuda_allocation); + PADDLE_ENFORCE_EQ(boost::get(cuda_allocation->place()), + place_); + PADDLE_ENFORCE(cudaFree(allocation->ptr())); + delete allocation; +} +Allocation* CUDAAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { + platform::CUDADeviceGuard guard(place_.device); + void* ptr; + auto status = cudaMalloc(&ptr, size); + if (UNLIKELY(status != cudaSuccess)) { + throw BadAlloc(string::Sprintf( + "Cannot allocate %d on GPU %d, cuda status %d, %s", size, place_.device, + status, cudaGetErrorString(status))); + } + return new CUDAAllocation(ptr, size, platform::Place(place_)); +} +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/cuda_allocator.h b/paddle/fluid/memory/allocation/cuda_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..63726f5820b1c81565117c7a9bf798c17c9681f6 --- /dev/null +++ b/paddle/fluid/memory/allocation/cuda_allocator.h @@ -0,0 +1,47 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// CUDA System allocator and allocation. +// Just a flag type. +class CUDAAllocation : public Allocation { + public: + using Allocation::Allocation; +}; + +class CUDAAllocator : public Allocator { + public: + explicit CUDAAllocator(const platform::CUDAPlace& place) : place_(place) {} + explicit CUDAAllocator(const platform::Place& place) + : place_(boost::get(place)) {} + bool IsAllocThreadSafe() const override; + + protected: + void Free(Allocation* allocation) override; + Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override; + + private: + platform::CUDAPlace place_; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/legacy_allocator.cc b/paddle/fluid/memory/allocation/legacy_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..e66537272340e89fe1075325323909213bbe97b8 --- /dev/null +++ b/paddle/fluid/memory/allocation/legacy_allocator.cc @@ -0,0 +1,307 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/legacy_allocator.h" +#include +#include "glog/logging.h" +#include "paddle/fluid/memory/detail/buddy_allocator.h" +#include "paddle/fluid/memory/detail/system_allocator.h" +#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/string/printf.h" + +DEFINE_bool(init_allocated_mem, false, + "It is a mistake that the values of the memory allocated by " + "BuddyAllocator are always zeroed in some op's implementation. " + "To find this error in time, we use init_allocated_mem to indicate " + "that initializing the allocated memory with a small value " + "during unit testing."); +DECLARE_double(fraction_of_gpu_memory_to_use); + +namespace paddle { +namespace memory { +namespace legacy { +template +void *Alloc(const Place &place, size_t size); + +template +void Free(const Place &place, void *p); + +template +size_t Used(const Place &place); + +struct Usage : public boost::static_visitor { + size_t operator()(const platform::CPUPlace &cpu) const; + size_t operator()(const platform::CUDAPlace &gpu) const; + size_t operator()(const platform::CUDAPinnedPlace &cuda_pinned) const; +}; + +size_t memory_usage(const platform::Place &p); + +using BuddyAllocator = detail::BuddyAllocator; + +BuddyAllocator *GetCPUBuddyAllocator() { + // We tried thread_local for inference::RNN1 model, but that not works much + // for multi-thread test. + static std::once_flag init_flag; + static detail::BuddyAllocator *a = nullptr; + + std::call_once(init_flag, []() { + a = new detail::BuddyAllocator( + std::unique_ptr(new detail::CPUAllocator), + platform::CpuMinChunkSize(), platform::CpuMaxChunkSize()); + }); + + return a; +} + +// We compared the NaiveAllocator with BuddyAllocator in CPU memory allocation, +// seems they are almost the same overhead. +struct NaiveAllocator { + void *Alloc(size_t size) { return malloc(size); } + + void Free(void *p) { + PADDLE_ENFORCE(p); + free(p); + } + + static NaiveAllocator *Instance() { + static NaiveAllocator x; + return &x; + } + + private: + std::mutex lock_; +}; + +template <> +void *Alloc(const platform::CPUPlace &place, size_t size) { + VLOG(10) << "Allocate " << size << " bytes on " << platform::Place(place); + void *p = GetCPUBuddyAllocator()->Alloc(size); + if (FLAGS_init_allocated_mem) { + memset(p, 0xEF, size); + } + VLOG(100) << " pointer=" << p; + return p; +} + +template <> +void Free(const platform::CPUPlace &place, void *p) { + VLOG(10) << "Free pointer=" << p << " on " << platform::Place(place); + GetCPUBuddyAllocator()->Free(p); +} + +template <> +size_t Used(const platform::CPUPlace &place) { + return GetCPUBuddyAllocator()->Used(); +} + +#ifdef PADDLE_WITH_CUDA +BuddyAllocator *GetGPUBuddyAllocator(int gpu_id) { + static std::once_flag init_flag; + static detail::BuddyAllocator **a_arr = nullptr; + + std::call_once(init_flag, [gpu_id]() { + int gpu_num = platform::GetCUDADeviceCount(); + PADDLE_ENFORCE(gpu_id < gpu_num, "gpu_id:%d should < gpu_num:%d", gpu_id, + gpu_num); + + a_arr = new BuddyAllocator *[gpu_num]; + for (int i = 0; i < gpu_num; i++) { + a_arr[i] = nullptr; + platform::SetDeviceId(i); + a_arr[i] = new BuddyAllocator( + std::unique_ptr(new detail::GPUAllocator(i)), + platform::GpuMinChunkSize(), platform::GpuMaxChunkSize()); + + VLOG(100) << "\n\nNOTE: each GPU device use " + << FLAGS_fraction_of_gpu_memory_to_use * 100 + << "% of GPU memory.\n" + << "You can set GFlags environment variable '" + << "FLAGS_fraction_of_gpu_memory_to_use" + << "' to change the fraction of GPU usage.\n\n"; + } + }); + + platform::SetDeviceId(gpu_id); + return a_arr[gpu_id]; +} +#endif + +template <> +size_t Used(const platform::CUDAPlace &place) { +#ifdef PADDLE_WITH_CUDA + return GetGPUBuddyAllocator(place.device)->Used(); +#else + PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); +#endif +} + +template <> +void *Alloc(const platform::CUDAPlace &place, + size_t size) { +#ifdef PADDLE_WITH_CUDA + auto *buddy_allocator = GetGPUBuddyAllocator(place.device); + auto *ptr = buddy_allocator->Alloc(size); + if (ptr == nullptr) { + int cur_dev = platform::GetCurrentDeviceId(); + platform::SetDeviceId(place.device); + size_t avail, total; + platform::GpuMemoryUsage(&avail, &total); + LOG(WARNING) << "Cannot allocate " << string::HumanReadableSize(size) + << " in GPU " << place.device << ", available " + << string::HumanReadableSize(avail); + LOG(WARNING) << "total " << total; + LOG(WARNING) << "GpuMinChunkSize " + << string::HumanReadableSize( + buddy_allocator->GetMinChunkSize()); + LOG(WARNING) << "GpuMaxChunkSize " + << string::HumanReadableSize( + buddy_allocator->GetMaxChunkSize()); + LOG(WARNING) << "GPU memory used: " + << string::HumanReadableSize(Used(place)); + platform::SetDeviceId(cur_dev); + } + if (FLAGS_init_allocated_mem) { + cudaMemset(ptr, 0xEF, size); + } + return ptr; +#else + PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); +#endif +} + +template <> +void Free(const platform::CUDAPlace &place, void *p) { +#ifdef PADDLE_WITH_CUDA + GetGPUBuddyAllocator(place.device)->Free(p); +#else + PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); +#endif +} + +#ifdef PADDLE_WITH_CUDA +BuddyAllocator *GetCUDAPinnedBuddyAllocator() { + static std::once_flag init_flag; + static BuddyAllocator *ba = nullptr; + + std::call_once(init_flag, []() { + ba = new BuddyAllocator(std::unique_ptr( + new detail::CUDAPinnedAllocator), + platform::CUDAPinnedMinChunkSize(), + platform::CUDAPinnedMaxChunkSize()); + }); + + return ba; +} +#endif + +template <> +size_t Used(const platform::CUDAPinnedPlace &place) { +#ifdef PADDLE_WITH_CUDA + return GetCUDAPinnedBuddyAllocator()->Used(); +#else + PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); +#endif +} + +template <> +void *Alloc(const platform::CUDAPinnedPlace &place, + size_t size) { +#ifdef PADDLE_WITH_CUDA + auto *buddy_allocator = GetCUDAPinnedBuddyAllocator(); + void *ptr = buddy_allocator->Alloc(size); + + if (ptr == nullptr) { + LOG(WARNING) << "cudaMallocHost Cannot allocate " << size + << " bytes in CUDAPinnedPlace"; + } + if (FLAGS_init_allocated_mem) { + memset(ptr, 0xEF, size); + } + return ptr; +#else + PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); +#endif +} + +template <> +void Free(const platform::CUDAPinnedPlace &place, + void *p) { +#ifdef PADDLE_WITH_CUDA + GetCUDAPinnedBuddyAllocator()->Free(p); +#else + PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); +#endif +} + +struct AllocVisitor : public boost::static_visitor { + inline explicit AllocVisitor(size_t size) : size_(size) {} + + template + inline void *operator()(const Place &place) const { + return Alloc(place, size_); + } + + private: + size_t size_; +}; + +struct FreeVisitor : public boost::static_visitor { + inline explicit FreeVisitor(void *ptr) : ptr_(ptr) {} + + template + inline void operator()(const Place &place) const { + Free(place, ptr_); + } + + private: + void *ptr_; +}; + +size_t Usage::operator()(const platform::CPUPlace &cpu) const { + return Used(cpu); +} + +size_t Usage::operator()(const platform::CUDAPlace &gpu) const { +#ifdef PADDLE_WITH_CUDA + return Used(gpu); +#else + PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); +#endif +} + +size_t Usage::operator()(const platform::CUDAPinnedPlace &cuda_pinned) const { +#ifdef PADDLE_WITH_CUDA + return Used(cuda_pinned); +#else + PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); +#endif +} +} // namespace legacy + +namespace allocation { + +Allocation *LegacyAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { + void *ptr = boost::apply_visitor(legacy::AllocVisitor(size), place_); + return new Allocation(ptr, size, place_); +} + +void LegacyAllocator::Free(Allocation *allocation) { + boost::apply_visitor(legacy::FreeVisitor(allocation->ptr()), + allocation->place()); + delete allocation; +} +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/legacy_allocator.h b/paddle/fluid/memory/allocation/legacy_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..503a7a685cb9d8dbbbbd6c23b5b82c383893e3d8 --- /dev/null +++ b/paddle/fluid/memory/allocation/legacy_allocator.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/platform/place.h" +namespace paddle { +namespace memory { +namespace allocation { + +class LegacyAllocatorPrivate; +class LegacyAllocator : public Allocator { + public: + explicit LegacyAllocator(const platform::Place &p) : place_(p) {} + + protected: + Allocation *AllocateImpl(size_t size, Allocator::Attr attr) override; + void Free(Allocation *allocation) override; + + private: + platform::Place place_; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/locked_allocator.cc b/paddle/fluid/memory/allocation/locked_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..835f6527c8a1d83340167bd9079f7cee25ad24cf --- /dev/null +++ b/paddle/fluid/memory/allocation/locked_allocator.cc @@ -0,0 +1,48 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/locked_allocator.h" +#include // NOLINT +#include "paddle/fluid/memory/allocation/allocation_with_underlying.h" +#include "paddle/fluid/platform/lock_guard_ptr.h" +namespace paddle { +namespace memory { +namespace allocation { + +bool LockedAllocator::IsAllocThreadSafe() const { return true; } + +LockedAllocator::LockedAllocator( + std::unique_ptr &&underlying_allocator) + : underlying_allocator_(std::move(underlying_allocator)) { + PADDLE_ENFORCE_NOT_NULL(underlying_allocator_); + if (!underlying_allocator_->IsAllocThreadSafe()) { + mtx_.reset(new std::mutex()); + } +} +void LockedAllocator::Free(Allocation *allocation) { + { + platform::LockGuardPtr guard(mtx_); + reinterpret_cast(allocation) + ->allocation_.reset(); // Destroy inner allocation + } + delete allocation; +} +Allocation *LockedAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { + platform::LockGuardPtr guard(mtx_); + return new AllocationWithUnderlying( + underlying_allocator_->Allocate(size, attr)); +} +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/locked_allocator.h b/paddle/fluid/memory/allocation/locked_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..4967b9bb8d3ad101cff4657b0a45b49b76e2deb2 --- /dev/null +++ b/paddle/fluid/memory/allocation/locked_allocator.h @@ -0,0 +1,41 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once +#include +#include // NOLINT +#include // NOLINT +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// A allocator to make underlying allocator thread safe. +class LockedAllocator : public Allocator { + public: + explicit LockedAllocator(std::unique_ptr &&underlying_allocator); + bool IsAllocThreadSafe() const override; + + protected: + void Free(Allocation *allocation) override; + Allocation *AllocateImpl(size_t size, Allocator::Attr attr) override; + + private: + std::unique_ptr underlying_allocator_; + std::unique_ptr mtx_; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/pinned_allocator.cc b/paddle/fluid/memory/allocation/pinned_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..6ac3aefdd18d6d9a21dc7ce66511013dfb78bc5b --- /dev/null +++ b/paddle/fluid/memory/allocation/pinned_allocator.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/pinned_allocator.h" +#include +#include + +namespace paddle { +namespace memory { +namespace allocation { +bool CPUPinnedAllocator::IsAllocThreadSafe() const { return true; } +void CPUPinnedAllocator::Free(Allocation *allocation) { + PADDLE_ENFORCE_NOT_NULL(dynamic_cast(allocation)); + PADDLE_ENFORCE(cudaFreeHost(allocation->ptr())); + delete allocation; +} +Allocation *CPUPinnedAllocator::AllocateImpl(size_t size, + Allocator::Attr attr) { + // PADDLE_ENFORCE_EQ( + // attr, kCrossDevice, + // "CPUPinnedAllocator should be used for Cross-Device Communication"); + + void *ptr; + PADDLE_ENFORCE(cudaMallocHost(&ptr, size)); + return new CPUPinnedAllocation(ptr, size); +} +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/pinned_allocator.h b/paddle/fluid/memory/allocation/pinned_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..26d12dd91c7fda31802226a84d883b6a6e9abbe4 --- /dev/null +++ b/paddle/fluid/memory/allocation/pinned_allocator.h @@ -0,0 +1,40 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// Allocator uses `cudaMallocHost` +class CPUPinnedAllocation : public Allocation { + public: + CPUPinnedAllocation(void *ptr, size_t size) + : Allocation(ptr, size, platform::CUDAPinnedPlace()) {} +}; + +class CPUPinnedAllocator : public Allocator { + public: + bool IsAllocThreadSafe() const override; + + protected: + void Free(Allocation *allocation) override; + Allocation *AllocateImpl(size_t size, Allocator::Attr attr) override; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/retry_allocator.cc b/paddle/fluid/memory/allocation/retry_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..981705051b449e6a35c2dcce9138dc2efae52920 --- /dev/null +++ b/paddle/fluid/memory/allocation/retry_allocator.cc @@ -0,0 +1,75 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/retry_allocator.h" +#include "paddle/fluid/memory/allocation/allocation_with_underlying.h" +namespace paddle { +namespace memory { +namespace allocation { + +bool RetryAllocator::IsAllocThreadSafe() const { + return underlying_allocator_->IsAllocThreadSafe(); +} + +void RetryAllocator::Free(Allocation* allocation) { + // Delete underlying allocation first. + reinterpret_cast(allocation)->allocation_.reset(); + { + // notify all waited allocators, they can try to allocate memory after free. + std::lock_guard lock(mutex_); + cv_.notify_all(); + } + delete allocation; +} + +Allocation* RetryAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { + auto alloc_func = [&, this]() { + return new AllocationWithUnderlying( + underlying_allocator_->Allocate(size, attr)); + }; + // In fact, we can unify the code of allocation success and failure + // But it would add lock even when allocation success at the first time + try { + return alloc_func(); + } catch (BadAlloc& bad_alloc) { + { + // We can just write allocation retry inside the predicate function of + // wait_until + // But it needs to acquire the lock when executing predicate function + // For better performance, we use loop here + auto end_time = std::chrono::high_resolution_clock::now() + retry_time_; + auto wait_until = [&, this] { + std::unique_lock lock(mutex_); + return cv_.wait_until(lock, end_time); + }; + while (wait_until() != std::cv_status::timeout) { + try { + return alloc_func(); + } catch (BadAlloc& ex) { + bad_alloc = ex; + } catch (...) { + throw; + } + } + + throw; // rethrow the original exception or throw the internal bad_alloc + } + } catch (...) { + throw; + } +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/retry_allocator.h b/paddle/fluid/memory/allocation/retry_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..5efcac8b108002a2a2da920173d237096de4fffa --- /dev/null +++ b/paddle/fluid/memory/allocation/retry_allocator.h @@ -0,0 +1,66 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include // NOLINT +#include // NOLINT +#include +#include // NOLINT +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +class RetryAllocator; + +class RetryAllocator : public Allocator { + public: + RetryAllocator(std::unique_ptr&& allocator, size_t retry_ms) + : underlying_allocator_(std::move(allocator)), retry_time_(retry_ms) { + EnforceCheck(); + } + + bool IsAllocThreadSafe() const override; + + private: + void EnforceCheck() { + PADDLE_ENFORCE_NOT_NULL( + underlying_allocator_.get(), + "UnderlyingAllocator of RetryAllocator must be UnmanagedAllocator"); + PADDLE_ENFORCE(underlying_allocator_->IsAllocThreadSafe(), + "UnderlyingAllocator of RetryAllocator must be thread-safe"); + } + + protected: + void Free(Allocation* allocation) override; + Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override; + + private: + std::unique_ptr underlying_allocator_; + std::chrono::milliseconds retry_time_; + std::mutex mutex_; + std::condition_variable cv_; + + // For debug, We can add an atomic integer to record how many memory sizes are + // waited to allocate + // std::atomic waited_allocate_size_{0}; + + friend class RetryAllocation; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/retry_allocator_test.cc b/paddle/fluid/memory/allocation/retry_allocator_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..a0ce2875cb8337a59ec03730e5cf66d2fc622001 --- /dev/null +++ b/paddle/fluid/memory/allocation/retry_allocator_test.cc @@ -0,0 +1,98 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/retry_allocator.h" +#include +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include +#include "gtest/gtest.h" +#include "paddle/fluid/memory/allocation/best_fit_allocator.h" +#include "paddle/fluid/memory/allocation/cpu_allocator.h" +#include "paddle/fluid/memory/allocation/locked_allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +TEST(RetryAllocator, RetryAllocator) { + CPUAllocator cpu_allocator; + + size_t size = (1 << 20); + auto cpu_allocation = cpu_allocator.Allocate(size, cpu_allocator.kDefault); + + std::unique_ptr best_fit_allocator( + new BestFitAllocator(cpu_allocation.get())); + std::unique_ptr locked_allocator( + new LockedAllocator(std::move(best_fit_allocator))); + + size_t thread_num = 32; + size_t sleep_time = 40; + size_t extra_time = 2; + + // Reserve to perform more tests in the future + std::vector> allocators; + { + std::unique_ptr best_fit_allocator( + new BestFitAllocator(cpu_allocation.get())); + std::unique_ptr locked_allocator( + new LockedAllocator(std::move(best_fit_allocator))); + allocators.push_back(std::make_shared( + std::move(locked_allocator), + (thread_num - 1) * (sleep_time + extra_time))); + } + + for (auto &allocator : allocators) { + std::vector threads(thread_num); + std::vector addresses(threads.size(), nullptr); + + std::mutex mutex; + std::condition_variable cv; + bool flag = false; + + for (size_t i = 0; i < threads.size(); ++i) { + threads[i] = std::thread([&, i]() { + { + std::unique_lock lock(mutex); + cv.wait(lock, [&] { return flag; }); + } + + auto ret = allocator->Allocate(size - 1); + addresses[i] = ret->ptr(); + std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time)); + }); + } + + { + std::lock_guard lock(mutex); + flag = true; + cv.notify_all(); + } + + for (auto &th : threads) { + th.join(); + } + + void *val = cpu_allocation->ptr(); + bool is_all_equal = std::all_of(addresses.begin(), addresses.end(), + [val](void *p) { return p == val; }); + ASSERT_TRUE(is_all_equal); + } +} + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/zero_size_allocator.cc b/paddle/fluid/memory/allocation/zero_size_allocator.cc new file mode 100644 index 0000000000000000000000000000000000000000..cb2df1a029815478bbc9d3b09425f3ef145c5fb3 --- /dev/null +++ b/paddle/fluid/memory/allocation/zero_size_allocator.cc @@ -0,0 +1,34 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/memory/allocation/zero_size_allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +bool ZeroSizeAllocator::IsAllocThreadSafe() const { + return underlying_allocator_->IsAllocThreadSafe(); +} + +Allocation *ZeroSizeAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { + if (size == 0) { + return new ZeroSizeAllocation(place_); + } else { + return underlying_allocator_->Allocate(size, attr).release(); + } +} +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/allocation/zero_size_allocator.h b/paddle/fluid/memory/allocation/zero_size_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..6b80245a34e7a6834aa75a90218845cc92036881 --- /dev/null +++ b/paddle/fluid/memory/allocation/zero_size_allocator.h @@ -0,0 +1,50 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "paddle/fluid/memory/allocation/allocator.h" + +namespace paddle { +namespace memory { +namespace allocation { + +// The allocator handles the request's size is zero. Allocator will always +// return an allocation even the request size is zero. However, the +// allocation.ptr() is nullptr +class ZeroSizeAllocation : public Allocation { + public: + explicit ZeroSizeAllocation(const platform::Place& p) + : Allocation(nullptr, 0, p) {} +}; + +class ZeroSizeAllocator : public Allocator { + public: + ZeroSizeAllocator(std::shared_ptr underlying_allocator, + const platform::Place& p) + : underlying_allocator_(std::move(underlying_allocator)), place_(p) {} + + bool IsAllocThreadSafe() const override; + + protected: + Allocation* AllocateImpl(size_t size, Allocator::Attr attr) override; + + private: + std::shared_ptr underlying_allocator_; + const platform::Place& place_; +}; + +} // namespace allocation +} // namespace memory +} // namespace paddle diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 1b96798d23cec34a1863f56c1e4027ce32b2eec5..2019d1a14f6dd5ed09c251f26c6ca352faa594ae 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -30,12 +30,7 @@ limitations under the License. */ #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/gpu_info.h" -// If use_pinned_memory is true, CPUAllocator calls mlock, which -// returns pinned and locked memory as staging areas for data exchange -// between host and device. Allocates too much would reduce the amount -// of memory available to the system for paging. So, by default, we -// should set false to use_pinned_memory. -DEFINE_bool(use_pinned_memory, true, "If set, allocate cpu pinned memory."); +DECLARE_bool(use_pinned_memory); DECLARE_double(fraction_of_gpu_memory_to_use); namespace paddle { namespace memory { diff --git a/paddle/fluid/memory/malloc.cc b/paddle/fluid/memory/malloc.cc index 3400b5274679d8e859a008dcf47ac7122ace6b2d..e414ad657a9447142d6e3a42fc7efc86f01e9c9f 100644 --- a/paddle/fluid/memory/malloc.cc +++ b/paddle/fluid/memory/malloc.cc @@ -12,221 +12,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/memory/malloc.h" #include #include - -#include "paddle/fluid/memory/malloc.h" - -#include "glog/logging.h" - -#include "paddle/fluid/memory/detail/buddy_allocator.h" -#include "paddle/fluid/memory/detail/system_allocator.h" -#include "paddle/fluid/platform/gpu_info.h" -#include "paddle/fluid/string/printf.h" - -DEFINE_bool(init_allocated_mem, false, - "It is a mistake that the values of the memory allocated by " - "BuddyAllocator are always zeroed in some op's implementation. " - "To find this error in time, we use init_allocated_mem to indicate " - "that initializing the allocated memory with a small value " - "during unit testing."); -DECLARE_double(fraction_of_gpu_memory_to_use); - +#include "paddle/fluid/memory/allocation/allocator_facade.h" +#include "paddle/fluid/memory/allocation/allocator_strategy.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace memory { - -using BuddyAllocator = detail::BuddyAllocator; - -BuddyAllocator* GetCPUBuddyAllocator() { - // We tried thread_local for inference::RNN1 model, but that not works much - // for multi-thread test. - static std::once_flag init_flag; - static detail::BuddyAllocator* a = nullptr; - - std::call_once(init_flag, []() { - a = new detail::BuddyAllocator( - std::unique_ptr(new detail::CPUAllocator), - platform::CpuMinChunkSize(), platform::CpuMaxChunkSize()); - }); - - return a; -} - -// We compared the NaiveAllocator with BuddyAllocator in CPU memory allocation, -// seems they are almost the same overhead. -struct NaiveAllocator { - void* Alloc(size_t size) { return malloc(size); } - - void Free(void* p) { - PADDLE_ENFORCE(p); - free(p); - } - - static NaiveAllocator* Instance() { - static NaiveAllocator x; - return &x; - } - - private: - std::mutex lock_; -}; - -template <> -void* Alloc(platform::CPUPlace place, size_t size) { - VLOG(100) << "Allocate " << size << " bytes on " << platform::Place(place); - void* p = GetCPUBuddyAllocator()->Alloc(size); - if (FLAGS_init_allocated_mem) { - memset(p, 0xEF, size); - } - VLOG(100) << " pointer=" << p; - return p; -} - -template <> -void Free(platform::CPUPlace place, void* p) { - VLOG(100) << "Free pointer=" << p << " on " << platform::Place(place); - GetCPUBuddyAllocator()->Free(p); -} - -template <> -size_t Used(platform::CPUPlace place) { - return GetCPUBuddyAllocator()->Used(); -} - -#ifdef PADDLE_WITH_CUDA - -BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { - static std::once_flag init_flag; - static detail::BuddyAllocator** a_arr = nullptr; - - std::call_once(init_flag, [gpu_id]() { - int gpu_num = platform::GetCUDADeviceCount(); - PADDLE_ENFORCE(gpu_id < gpu_num, "gpu_id:%d should < gpu_num:%d", gpu_id, - gpu_num); - - a_arr = new BuddyAllocator*[gpu_num]; - for (int i = 0; i < gpu_num; i++) { - a_arr[i] = nullptr; - platform::SetDeviceId(i); - a_arr[i] = new BuddyAllocator( - std::unique_ptr(new detail::GPUAllocator(i)), - platform::GpuMinChunkSize(), platform::GpuMaxChunkSize()); - - VLOG(100) << "\n\nNOTE: each GPU device use " - << FLAGS_fraction_of_gpu_memory_to_use * 100 - << "% of GPU memory.\n" - << "You can set GFlags environment variable '" - << "FLAGS_fraction_of_gpu_memory_to_use" - << "' to change the fraction of GPU usage.\n\n"; - } - }); - - platform::SetDeviceId(gpu_id); - return a_arr[gpu_id]; -} - -template <> -size_t Used(platform::CUDAPlace place) { - return GetGPUBuddyAllocator(place.device)->Used(); -} - -template <> -void* Alloc(platform::CUDAPlace place, size_t size) { - auto* buddy_allocator = GetGPUBuddyAllocator(place.device); - auto* ptr = buddy_allocator->Alloc(size); - if (ptr == nullptr) { - int cur_dev = platform::GetCurrentDeviceId(); - platform::SetDeviceId(place.device); - size_t avail, total; - platform::GpuMemoryUsage(&avail, &total); - LOG(WARNING) << "Cannot allocate " << string::HumanReadableSize(size) - << " in GPU " << place.device << ", available " - << string::HumanReadableSize(avail); - LOG(WARNING) << "total " << total; - LOG(WARNING) << "GpuMinChunkSize " - << string::HumanReadableSize( - buddy_allocator->GetMinChunkSize()); - LOG(WARNING) << "GpuMaxChunkSize " - << string::HumanReadableSize( - buddy_allocator->GetMaxChunkSize()); - LOG(WARNING) << "GPU memory used: " - << string::HumanReadableSize(Used(place)); - platform::SetDeviceId(cur_dev); - } - if (FLAGS_init_allocated_mem) { - cudaMemset(ptr, 0xEF, size); - } - return ptr; -} - -template <> -void Free(platform::CUDAPlace place, void* p) { - GetGPUBuddyAllocator(place.device)->Free(p); -} - -BuddyAllocator* GetCUDAPinnedBuddyAllocator() { - static std::once_flag init_flag; - static BuddyAllocator* ba = nullptr; - - std::call_once(init_flag, []() { - ba = new BuddyAllocator(std::unique_ptr( - new detail::CUDAPinnedAllocator), - platform::CUDAPinnedMinChunkSize(), - platform::CUDAPinnedMaxChunkSize()); - }); - - return ba; -} - -template <> -size_t Used(platform::CUDAPinnedPlace place) { - return GetCUDAPinnedBuddyAllocator()->Used(); -} - -template <> -void* Alloc(platform::CUDAPinnedPlace place, - size_t size) { - auto* buddy_allocator = GetCUDAPinnedBuddyAllocator(); - void* ptr = buddy_allocator->Alloc(size); - - if (ptr == nullptr) { - LOG(WARNING) << "cudaMallocHost Cannot allocate " << size - << " bytes in CUDAPinnedPlace"; - } - if (FLAGS_init_allocated_mem) { - memset(ptr, 0xEF, size); - } - return ptr; -} - -template <> -void Free(platform::CUDAPinnedPlace place, void* p) { - GetCUDAPinnedBuddyAllocator()->Free(p); -} -#endif - -size_t Usage::operator()(const platform::CPUPlace& cpu) const { - return Used(cpu); -} - -size_t Usage::operator()(const platform::CUDAPlace& gpu) const { -#ifdef PADDLE_WITH_CUDA - return Used(gpu); -#else - PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); -#endif -} - -size_t Usage::operator()(const platform::CUDAPinnedPlace& cuda_pinned) const { -#ifdef PADDLE_WITH_CUDA - return Used(cuda_pinned); -#else - PADDLE_THROW("'CUDAPinnedPlace' is not supported in CPU only device."); -#endif +std::shared_ptr AllocShared(const platform::Place& place, + size_t size, Allocator::Attr attr) { + return allocation::AllocatorFacade::Instance().AllocShared(place, size, attr); } -size_t memory_usage(const platform::Place& p) { - return boost::apply_visitor(Usage(), p); +AllocationPtr Alloc(const platform::Place& place, size_t size, + Allocator::Attr attr) { + return allocation::AllocatorFacade::Instance().Alloc(place, size, attr); } } // namespace memory diff --git a/paddle/fluid/memory/malloc.h b/paddle/fluid/memory/malloc.h index 3e6bfddd69cb16edf323d040ea5369cd551f299e..916538b2a659d7d9503fdc337a4ba84fa21f77f9 100644 --- a/paddle/fluid/memory/malloc.h +++ b/paddle/fluid/memory/malloc.h @@ -14,91 +14,21 @@ limitations under the License. */ #pragma once +#include +#include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/platform/place.h" - namespace paddle { namespace memory { +using allocation::Allocation; +using allocation::Allocator; +using allocation::AllocationPtr; -/** - * \brief Allocate memory block in one place. - * - * \param[in] place Allocation place (CPU or GPU). - * \param[in] size Allocation size. - * - * \return Allocated memory block address. - * - * \note If return nullptr, it indicates memory allocation failed - * because insufficient memory in current system. When Alloc - * function is invoked, you must check the returned memory - * address is valid or not. - */ -template -void* Alloc(Place place, size_t size); - -/** - * \brief Free memory block in one place. - * - * \param[in] place Allocation place (CPU or GPU). - * \param[in] ptr Memory block address to free. - * - */ -template -void Free(Place place, void* ptr); - -/** - * \brief Total size of used memory in one place. - * - * \param[in] place Allocation place (CPU or GPU). - * - */ -template -size_t Used(Place place); - -struct Usage : public boost::static_visitor { - size_t operator()(const platform::CPUPlace& cpu) const; - size_t operator()(const platform::CUDAPlace& gpu) const; - size_t operator()(const platform::CUDAPinnedPlace& cuda_pinned) const; -}; - -size_t memory_usage(const platform::Place& p); - -/** - * \brief Free memory block in one place. - * - * \note In some cases, custom deleter is used to - * deallocate the memory automatically for - * std::unique_ptr in tensor.h. - * - */ -template -class PODDeleter { - static_assert(std::is_pod::value, "T must be POD"); - - public: - explicit PODDeleter(Place place) : place_(place) {} - void operator()(T* ptr) { Free(place_, static_cast(ptr)); } - - private: - Place place_; -}; - -/** - * \brief Free memory block in one place does not meet POD - * - * \note In some cases, custom deleter is used to - * deallocate the memory automatically for - * std::unique_ptr in tensor.h. - * - */ -template -class PlainDeleter { - public: - explicit PlainDeleter(Place place) : place_(place) {} - void operator()(T* ptr) { Free(place_, reinterpret_cast(ptr)); } +extern std::shared_ptr AllocShared( + const platform::Place& place, size_t size, + Allocator::Attr attr = Allocator::kDefault); - private: - Place place_; -}; +extern AllocationPtr Alloc(const platform::Place& place, size_t size, + Allocator::Attr attr = Allocator::kDefault); } // namespace memory } // namespace paddle diff --git a/paddle/fluid/memory/malloc_test.cc b/paddle/fluid/memory/malloc_test.cc deleted file mode 100644 index d39466ef60c3750600dea726a6570397423d42f6..0000000000000000000000000000000000000000 --- a/paddle/fluid/memory/malloc_test.cc +++ /dev/null @@ -1,198 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/memory/malloc.h" - -#include - -#include "gtest/gtest.h" -#include "paddle/fluid/memory/detail/memory_block.h" -#include "paddle/fluid/platform/cpu_info.h" -#include "paddle/fluid/platform/gpu_info.h" -#include "paddle/fluid/platform/place.h" - -inline bool is_aligned(void const *p) { - return 0 == (reinterpret_cast(p) & 0x3); -} - -size_t align(size_t size, paddle::platform::CPUPlace place) { - size += sizeof(paddle::memory::detail::MemoryBlock::Desc); - size_t alignment = paddle::platform::CpuMinChunkSize(); - size_t remaining = size % alignment; - return remaining == 0 ? size : size + (alignment - remaining); -} - -TEST(BuddyAllocator, CPUAllocation) { - void *p = nullptr; - - EXPECT_EQ(p, nullptr); - - paddle::platform::CPUPlace cpu; - p = paddle::memory::Alloc(cpu, 4096); - - EXPECT_NE(p, nullptr); - - paddle::platform::Place place = cpu; - EXPECT_EQ(paddle::memory::Used(cpu), paddle::memory::memory_usage(place)); - - paddle::memory::Free(cpu, p); -} - -TEST(BuddyAllocator, CPUMultAlloc) { - paddle::platform::CPUPlace cpu; - - std::unordered_map ps; - - size_t total_size = paddle::memory::Used(cpu); - EXPECT_EQ(total_size, 0UL); - - for (auto size : - {0, 128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { - ps[paddle::memory::Alloc(cpu, size)] = size; - - // Buddy Allocator doesn't manage too large memory chunk - if (paddle::memory::Used(cpu) == total_size) continue; - - size_t aligned_size = align(size, cpu); - total_size += aligned_size; - EXPECT_EQ(total_size, paddle::memory::Used(cpu)); - } - - for (auto p : ps) { - EXPECT_EQ(is_aligned(p.first), true); - paddle::memory::Free(cpu, p.first); - - // Buddy Allocator doesn't manage too large memory chunk - if (paddle::memory::Used(cpu) == total_size) continue; - - size_t aligned_size = align(p.second, cpu); - total_size -= aligned_size; - EXPECT_EQ(total_size, paddle::memory::Used(cpu)); - } -} - -#ifdef PADDLE_WITH_CUDA - -size_t align(size_t size, paddle::platform::CUDAPlace place) { - size += sizeof(paddle::memory::detail::MemoryBlock::Desc); - size_t alignment = paddle::platform::GpuMinChunkSize(); - size_t remaining = size % alignment; - return remaining == 0 ? size : size + (alignment - remaining); -} - -TEST(BuddyAllocator, GPUAllocation) { - void *p = nullptr; - - EXPECT_EQ(p, nullptr); - - paddle::platform::CUDAPlace gpu(0); - p = paddle::memory::Alloc(gpu, 4096); - - EXPECT_NE(p, nullptr); - - paddle::platform::Place place = gpu; - EXPECT_EQ(paddle::memory::Used(gpu), paddle::memory::memory_usage(place)); - - paddle::memory::Free(gpu, p); -} - -TEST(BuddyAllocator, GPUMultAlloc) { - paddle::platform::CUDAPlace gpu; - - std::unordered_map ps; - - size_t total_size = paddle::memory::Used(gpu); - EXPECT_EQ(total_size, 0UL); - - for (auto size : - {0, 128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { - ps[paddle::memory::Alloc(gpu, size)] = size; - - // Buddy Allocator doesn't manage too large memory chunk - if (paddle::memory::Used(gpu) == total_size) continue; - - size_t aligned_size = align(size, gpu); - total_size += aligned_size; - EXPECT_EQ(total_size, paddle::memory::Used(gpu)); - } - - for (auto p : ps) { - EXPECT_EQ(is_aligned(p.first), true); - paddle::memory::Free(gpu, p.first); - - // Buddy Allocator doesn't manage too large memory chunk - if (paddle::memory::Used(gpu) == total_size) continue; - - size_t aligned_size = align(p.second, gpu); - total_size -= aligned_size; - EXPECT_EQ(total_size, paddle::memory::Used(gpu)); - } -} - -size_t align(size_t size, paddle::platform::CUDAPinnedPlace place) { - size += sizeof(paddle::memory::detail::MemoryBlock::Desc); - size_t alignment = paddle::platform::CUDAPinnedMinChunkSize(); - size_t remaining = size % alignment; - return remaining == 0 ? size : size + (alignment - remaining); -} - -TEST(BuddyAllocator, CUDAPinnedAllocator) { - void *p = nullptr; - - EXPECT_EQ(p, nullptr); - - paddle::platform::CUDAPinnedPlace cpu; - p = paddle::memory::Alloc(cpu, 4096); - - EXPECT_NE(p, nullptr); - - paddle::platform::Place place = cpu; - EXPECT_EQ(paddle::memory::Used(cpu), paddle::memory::memory_usage(place)); - - paddle::memory::Free(cpu, p); -} - -TEST(BuddyAllocator, CUDAPinnedMultAllocator) { - paddle::platform::CUDAPinnedPlace cpu; - - std::unordered_map ps; - - size_t total_size = paddle::memory::Used(cpu); - EXPECT_EQ(total_size, 0UL); - - for (auto size : - {0, 128, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304}) { - ps[paddle::memory::Alloc(cpu, size)] = size; - - // Buddy Allocator doesn't manage too large memory chunk - if (paddle::memory::Used(cpu) == total_size) continue; - - size_t aligned_size = align(size, cpu); - total_size += aligned_size; - EXPECT_EQ(total_size, paddle::memory::Used(cpu)); - } - - for (auto p : ps) { - EXPECT_EQ(is_aligned(p.first), true); - paddle::memory::Free(cpu, p.first); - - // Buddy Allocator doesn't manage too large memory chunk - if (paddle::memory::Used(cpu) == total_size) continue; - - size_t aligned_size = align(p.second, cpu); - total_size -= aligned_size; - EXPECT_EQ(total_size, paddle::memory::Used(cpu)); - } -} -#endif diff --git a/paddle/fluid/memory/memcpy.cc b/paddle/fluid/memory/memcpy.cc index a177d4985fd0e2cca983b6873af89c60f526b811..2a6f70a01e303aa1b608248cbeb8dcfa24837a0c 100644 --- a/paddle/fluid/memory/memcpy.cc +++ b/paddle/fluid/memory/memcpy.cc @@ -27,6 +27,8 @@ void Copy(platform::CPUPlace, void* dst, } #ifdef PADDLE_WITH_CUDA +static constexpr size_t kMaxGpuAsyncCopyBytes = 64 * 1024; // 64K + template <> void Copy( platform::CPUPlace dst_place, void* dst, platform::CUDAPlace src_place, @@ -36,6 +38,10 @@ void Copy( platform::GpuMemcpyAsync(dst, src, num, cudaMemcpyDeviceToHost, stream); } else { platform::GpuMemcpySync(dst, src, num, cudaMemcpyDeviceToHost); + // FIXME(zjl): do we really need it? + if (num <= kMaxGpuAsyncCopyBytes) { + cudaStreamSynchronize(0); + } } } @@ -48,6 +54,10 @@ void Copy( platform::GpuMemcpyAsync(dst, src, num, cudaMemcpyHostToDevice, stream); } else { platform::GpuMemcpySync(dst, src, num, cudaMemcpyHostToDevice); + // FIXME(zjl): do we really need it? + if (num <= kMaxGpuAsyncCopyBytes) { + cudaStreamSynchronize(0); + } } } diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 0117a24c1b308e528e19b9e948cc1decd2b5eab5..975c3bfc3362413b9af0edf1a3e5b4b64635132d 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -1,387 +1,83 @@ -file(GLOB GENERAL_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*_op.cc") -string(REPLACE "_mkldnn" "" GENERAL_OPS "${GENERAL_OPS}") -string(REPLACE ".cc" "" GENERAL_OPS "${GENERAL_OPS}") -list(REMOVE_DUPLICATES GENERAL_OPS) -set(DEPS_OPS "") -set(pybind_file ${PADDLE_BINARY_DIR}/paddle/fluid/pybind/pybind.h) -file(WRITE ${pybind_file} "// Generated by the paddle/fluid/operator/CMakeLists.txt. DO NOT EDIT!\n\n") - -set(PART_CUDA_KERNEL_FILES) -function(op_library TARGET) - # op_library is a function to create op library. The interface is same as - # cc_library. But it handle split GPU/CPU code and link some common library - # for ops. - set(cc_srcs) - set(cu_srcs) - set(hip_cu_srcs) - set(miopen_hip_cc_srcs) - set(cu_cc_srcs) - set(cudnn_cu_cc_srcs) - set(CUDNN_FILE) - set(mkldnn_cc_srcs) - set(MKLDNN_FILE) - set(op_common_deps operator op_registry math_function) - set(options "") - set(oneValueArgs "") - set(multiValueArgs SRCS DEPS) - set(pybind_flag 0) - cmake_parse_arguments(op_library "${options}" "${oneValueArgs}" - "${multiValueArgs}" ${ARGN}) - - list(LENGTH op_library_SRCS op_library_SRCS_len) - if (${op_library_SRCS_len} EQUAL 0) - if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cc) - list(APPEND cc_srcs ${TARGET}.cc) - endif() - if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu.cc) - list(APPEND cu_cc_srcs ${TARGET}.cu.cc) - endif() - if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.cu) - list(APPEND cu_srcs ${TARGET}.cu) - endif() - if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.part.cu) - set(PART_CUDA_KERNEL_FILES ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.part.cu - ${PART_CUDA_KERNEL_FILES} PARENT_SCOPE) - list(APPEND cu_srcs ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.part.cu) - endif() - - if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${TARGET}.hip.cu) - list(APPEND hip_cu_srcs ${TARGET}.hip.cu) - endif() - string(REPLACE "_op" "_cudnn_op" CUDNN_FILE "${TARGET}") - if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${CUDNN_FILE}.cu.cc) - list(APPEND cudnn_cu_cc_srcs ${CUDNN_FILE}.cu.cc) - endif() - if(WITH_AMD_GPU) - string(REPLACE "_op" "_miopen_op" MIOPEN_FILE "${TARGET}") - if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${MIOPEN_FILE}.hip.cc) - list(APPEND miopen_hip_cc_srcs ${MIOPEN_FILE}.hip.cc) - endif() - endif() - if(WITH_MKLDNN) - string(REPLACE "_op" "_mkldnn_op" MKLDNN_FILE "${TARGET}") - if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${MKLDNN_FILE}.cc) - list(APPEND mkldnn_cc_srcs ${MKLDNN_FILE}.cc) - endif() - endif() - else() - foreach(src ${op_library_SRCS}) - if (${src} MATCHES ".*\\.hip.cu$") - list(APPEND hip_cu_srcs ${src}) - elseif (${src} MATCHES ".*\\.cu$") - list(APPEND cu_srcs ${src}) - elseif(${src} MATCHES ".*_cudnn_op.cu.cc$") - list(APPEND cudnn_cu_cc_srcs ${src}) - elseif(WITH_AMD_GPU AND ${src} MATCHES ".*_miopen_op.hip.cc$") - list(APPEND miopen_hip_cc_srcs ${src}) - elseif(WITH_MKLDNN AND ${src} MATCHES ".*_mkldnn_op.cc$") - list(APPEND mkldnn_cc_srcs ${src}) - elseif(${src} MATCHES ".*\\.cu.cc$") - list(APPEND cu_cc_srcs ${src}) - elseif(${src} MATCHES ".*\\.cc$") - list(APPEND cc_srcs ${src}) - else() - message(FATAL_ERROR "${TARGET} Source file ${src} should only be .cc or .cu") - endif() - endforeach() - endif() - - list(LENGTH cc_srcs cc_srcs_len) - if (${cc_srcs_len} EQUAL 0) - message(FATAL_ERROR "The op library ${TARGET} should contains at least one .cc file") - endif() - if (WIN32) - # remove windows unsupported op, because windows has no nccl, no warpctc such ops. - foreach(windows_unsupport_op "nccl_op" "gen_nccl_id_op" "warpctc_op" "hierarchical_sigmoid_op" - "crf_decoding_op" "select_op" "lstmp_op" "gru_op" "fusion_gru_op" "lstm_op" "fusion_lstm_op" "cumsum_op" - "fusion_seqconv_eltadd_relu_op" "channel_send_op" "channel_create_op" "channel_close_op" "channel_recv_op" - "fusion_seqexpand_concat_fc_op" "attention_lstm_op" "fused_embedding_fc_lstm_op" "fc_op") - if ("${TARGET}" STREQUAL "${windows_unsupport_op}") - return() - endif() - endforeach() - endif(WIN32) - set(OP_LIBRARY ${TARGET} ${OP_LIBRARY} PARENT_SCOPE) +include(operators) - list(LENGTH op_library_DEPS op_library_DEPS_len) - if (${op_library_DEPS_len} GREATER 0) - set(DEPS_OPS ${TARGET} ${DEPS_OPS} PARENT_SCOPE) - endif() - if (WITH_GPU) - nv_library(${TARGET} SRCS ${cc_srcs} ${cu_cc_srcs} ${cudnn_cu_cc_srcs} ${mkldnn_cc_srcs} ${cu_srcs} DEPS ${op_library_DEPS} - ${op_common_deps}) - elseif (WITH_AMD_GPU) - hip_library(${TARGET} SRCS ${cc_srcs} ${hip_cu_srcs} ${miopen_hip_cc_srcs} ${mkldnn_cc_srcs} DEPS ${op_library_DEPS} - ${op_common_deps}) - else() - cc_library(${TARGET} SRCS ${cc_srcs} ${mkldnn_cc_srcs} DEPS ${op_library_DEPS} - ${op_common_deps}) - endif() - - # Define operators that don't need pybind here. - foreach(manual_pybind_op "compare_op" "logical_op" "nccl_op" -"tensor_array_read_write_op" "tensorrt_engine_op") - if ("${TARGET}" STREQUAL "${manual_pybind_op}") - set(pybind_flag 1) - endif() - endforeach() - - # The registration of USE_OP, please refer to paddle/fluid/framework/op_registry.h. - # Note that it's enough to just adding one operator to pybind in a *_op.cc file. - # And for detail pybind information, please see generated paddle/pybind/pybind.h. - file(READ ${TARGET}.cc TARGET_CONTENT) - string(REGEX MATCH "REGISTER_OPERATOR\\(.*REGISTER_OPERATOR\\(" multi_register "${TARGET_CONTENT}") - string(REGEX MATCH "REGISTER_OPERATOR\\([a-z0-9_]*," one_register "${multi_register}") - if (one_register STREQUAL "") - string(REPLACE "_op" "" TARGET "${TARGET}") - else () - string(REPLACE "REGISTER_OPERATOR(" "" TARGET "${one_register}") - string(REPLACE "," "" TARGET "${TARGET}") - endif() - - # pybind USE_NO_KERNEL_OP - # HACK: if REGISTER_OP_CPU_KERNEL presents the operator must have kernel - string(REGEX MATCH "REGISTER_OP_CPU_KERNEL" regex_result "${TARGET_CONTENT}") - string(REPLACE "_op" "" TARGET "${TARGET}") - if (${pybind_flag} EQUAL 0 AND regex_result STREQUAL "") - file(APPEND ${pybind_file} "USE_NO_KERNEL_OP(${TARGET});\n") - set(pybind_flag 1) - endif() - - # pybind USE_CPU_ONLY_OP - list(LENGTH cu_srcs cu_srcs_len) - list(LENGTH cu_cc_srcs cu_cc_srcs_len) - list(LENGTH mkldnn_cc_srcs mkldnn_cc_srcs_len) - list(LENGTH hip_cu_srcs hip_cu_srcs_len) - list(LENGTH miopen_hip_cc_srcs miopen_hip_cc_srcs_len) - if (${pybind_flag} EQUAL 0 AND ${mkldnn_cc_srcs_len} EQUAL 0 AND ${cu_srcs_len} EQUAL 0 AND ${cu_cc_srcs_len} EQUAL 0 AND - ${hip_cu_srcs_len} EQUAL 0 AND ${miopen_hip_cc_srcs_len} EQUAL 0) - file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(${TARGET});\n") - set(pybind_flag 1) - endif() - - # pybind USE_OP_DEVICE_KERNEL for CUDNN - list(LENGTH cudnn_cu_cc_srcs cudnn_cu_cc_srcs_len) - if (WITH_GPU AND ${cudnn_cu_cc_srcs_len} GREATER 0) - file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, CUDNN);\n") - endif() - - # pybind USE_OP_DEVICE_KERNEL for MIOPEN - if (WITH_AMD_GPU AND ${miopen_hip_cc_srcs_len} GREATER 0) - file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, MIOPEN);\n") - endif() - - # pybind USE_OP_DEVICE_KERNEL for MKLDNN - if (WITH_MKLDNN AND ${mkldnn_cc_srcs_len} GREATER 0) - # Append first implemented MKLDNN activation operator - if (${MKLDNN_FILE} STREQUAL "activation_mkldnn_op") - file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(relu, MKLDNN);\n") - else() - file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${TARGET}, MKLDNN);\n") - endif() - endif() - - # pybind USE_OP - if (${pybind_flag} EQUAL 0) - # NOTE(*): activation use macro to regist the kernels, set use_op manually. - if(${TARGET} STREQUAL "activation") - file(APPEND ${pybind_file} "USE_OP(relu);\n") - elseif(${TARGET} STREQUAL "fake_dequantize") - file(APPEND ${pybind_file} "USE_OP(fake_dequantize_max_abs);\n") - elseif(${TARGET} STREQUAL "fake_quantize") - file(APPEND ${pybind_file} "USE_OP(fake_quantize_abs_max);\n") - elseif(${TARGET} STREQUAL "tensorrt_engine_op") - message(STATUS "Pybind skips [tensorrt_engine_op], for this OP is only used in inference") - elseif(${TARGET} STREQUAL "fc") - # HACK: fc only have mkldnn and cpu, which would mismatch the cpu only condition - file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(${TARGET});\n") - else() - file(APPEND ${pybind_file} "USE_OP(${TARGET});\n") - endif() - endif() -endfunction() +# clean cache and pybind_file content first when rebuild +unset(GLOB_OP_LIB CACHE) +unset(OP_LIBRARY CACHE) +set(pybind_file ${PADDLE_BINARY_DIR}/paddle/fluid/pybind/pybind.h CACHE INTERNAL "pybind.h file") +file(WRITE ${pybind_file} "// Generated by the paddle/fluid/operator/CMakeLists.txt. DO NOT EDIT!\n\n") add_subdirectory(math) -if (NOT WIN32) -add_subdirectory(nccl) -if(WITH_GPU) - op_library(nccl_op DEPS nccl_common) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(ncclAllReduce);\n") -else() - set(DEPS_OPS ${DEPS_OPS} nccl_op) -endif() -endif() # NOT WIN32 +add_subdirectory(controlflow) +add_subdirectory(csp) +add_subdirectory(detection) +add_subdirectory(elementwise) +add_subdirectory(fused) +add_subdirectory(metrics) +add_subdirectory(optimizers) +add_subdirectory(reduce_ops) +add_subdirectory(sequence_ops) -set(DISTRIBUTE_DEPS "") if(WITH_DISTRIBUTE) add_subdirectory(distributed) - set(DISTRIBUTE_DEPS "") - if(WITH_GRPC) - set(DISTRIBUTE_DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf node) - else() - set(DISTRIBUTE_DEPS sendrecvop_brpc brpc leveldb snappystream snappy protobuf ssl crypto zlib node) - if(WITH_BRPC_RDMA) - find_library(IBVERBS_LIBRARY NAMES ibverbs) - ADD_LIBRARY(ibverbs SHARED IMPORTED GLOBAL) - SET_PROPERTY(TARGET ibverbs PROPERTY IMPORTED_LOCATION ${IBVERBS_LIBRARY}) - - - find_library(RDMACM_LIBRARY NAMES rdmacm) - ADD_LIBRARY(rdmacm SHARED IMPORTED GLOBAL) - SET_PROPERTY(TARGET rdmacm PROPERTY IMPORTED_LOCATION ${RDMACM_LIBRARY}) - - set(DISTRIBUTE_DEPS ${DISTRIBUTE_DEPS} ibverbs rdmacm) - endif() - endif() - - set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") - foreach(dist_op "prefetch_op" "checkpoint_notify_op" "listen_and_serv_op" "send_op" "recv_op" "send_barrier_op" "fetch_barrier_op") - op_library(${dist_op} DEPS ${DISTRIBUTE_DEPS}) - set_source_files_properties(${dist_op}.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - endforeach() + add_subdirectory(distributed_ops) +endif() - #set_source_files_properties(send_recv_op_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - #cc_test(test_send_recv SRCS send_recv_op_test.cc DEPS prefetch_op send_op - # listen_and_serv_op sum_op executor SERIAL) - if(WITH_GPU AND NOT WIN32) - set_source_files_properties(test_send_nccl_id.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - cc_test(test_send_nccl_id SRCS test_send_nccl_id.cc DEPS listen_and_serv_op ${DISTRIBUTE_DEPS} executor SERIAL) - if(WITH_GRPC) - op_library(gen_nccl_id_op DEPS nccl_common sendrecvop_grpc) - else() - op_library(gen_nccl_id_op DEPS nccl_common sendrecvop_brpc) - endif() - set_source_files_properties(gen_nccl_id_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - else() - set(DEPS_OPS ${DEPS_OPS} gen_nccl_id_op) - endif() # WITH_GPU AND NOT WIN32 -else() - set(DEPS_OPS ${DEPS_OPS} checkpoint_notify_op prefetch_op recv_op listen_and_serv_op send_op send_barrier_op fetch_barrier_op gen_nccl_id_op) +if (NOT WIN32) + add_subdirectory(reader) endif() -op_library(cross_entropy_op DEPS cross_entropy) -if(WITH_GPU) - op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax cub) - op_library(sequence_softmax_op DEPS cub) -else() - op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) +if (NOT WIN32) + add_subdirectory(nccl) endif() -op_library(softmax_op DEPS softmax) if (WITH_GPU AND TENSORRT_FOUND) - op_library(tensorrt_engine_op DEPS tensorrt_engine tensorrt_converter) - file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(tensorrt_engine);\n") - nv_test(test_tensorrt_engine_op SRCS tensorrt_engine_op_test.cc - DEPS tensorrt_engine_op - analysis) -else() - set(DEPS_OPS ${DEPS_OPS} tensorrt_engine_op) + add_subdirectory(tensorrt) endif() -op_library(hash_op DEPS xxhash) -op_library(clip_by_norm_op DEPS selected_rows_functor selected_rows) -op_library(sum_op DEPS selected_rows_functor) -op_library(sgd_op DEPS selected_rows_functor) -op_library(print_op DEPS lod_tensor) -op_library(adagrad_op DEPS selected_rows_functor) -op_library(maxout_op DEPS maxouting) -op_library(unpool_op DEPS unpooling) -op_library(pool_op DEPS pooling) -op_library(pool_with_index_op DEPS pooling) -op_library(lod_rank_table_op DEPS lod_rank_table) -op_library(lod_tensor_to_array_op DEPS lod_rank_table_op) -op_library(array_to_lod_tensor_op DEPS lod_rank_table_op) -op_library(max_sequence_len_op DEPS lod_rank_table) -op_library(sequence_conv_op DEPS context_project) -op_library(sequence_pool_op DEPS sequence_pooling) -if (NOT WIN32) - op_library(lstm_op DEPS sequence2batch lstm_compute) - op_library(hierarchical_sigmoid_op DEPS matrix_bit_code) - op_library(lstmp_op DEPS sequence2batch lstm_compute) - op_library(gru_op DEPS sequence2batch gru_compute) -endif(NOT WIN32) -op_library(recurrent_op DEPS executor) -op_library(cos_sim_op DEPS cos_sim_functor) -op_library(parallel_do_op DEPS executor) -op_library(unsqueeze_op DEPS reshape_op) -op_library(squeeze_op DEPS reshape_op) -op_library(flatten_op DEPS reshape_op) -op_library(sequence_pad_op DEPS sequence_padding) -op_library(unstack_op DEPS stack_op) -op_library(fake_quantize_op DEPS memory) -op_library(nce_op DEPS sampler) -if (NOT WIN32) -op_library(crf_decoding_op DEPS jit_kernel) -op_library(fusion_lstm_op DEPS jit_kernel) -endif(NOT WIN32) -if (WITH_GPU) - op_library(conv_op DEPS vol2col depthwise_conv im2col) - op_library(layer_norm_op DEPS cub) - op_library(reduce_mean_op DEPS cub) - op_library(affine_channel_op DEPS cub) -else() - op_library(conv_op DEPS vol2col im2col) -endif() -op_library(conv_transpose_op DEPS vol2col im2col) -# FIXME(typhoonzero): save/load depends lodtensor serialization functions -op_library(save_op DEPS lod_tensor) -op_library(load_op DEPS lod_tensor) -op_library(save_combine_op DEPS lod_tensor) -op_library(load_combine_op DEPS lod_tensor) -op_library(concat_op DEPS concat_and_split) -op_library(tensor_array_to_tensor_op DEPS concat_op) +register_operators(EXCLUDES warpctc_op conv_fusion_op) -set(DEPS_OPS ${DEPS_OPS} warpctc_op) +# warpctc_cudnn need cudnn 7 above if (WITH_GPU) if (${CUDNN_MAJOR_VERSION} VERSION_LESS 7) op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale SRCS warpctc_op.cc warpctc_op.cu.cc) + else() + op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale) endif() + op_library(conv_fusion_op) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(conv2d_fusion);\n") +else() + op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale) endif() -op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale) - -list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) - -foreach(src ${GENERAL_OPS}) - op_library(${src}) -endforeach() - -file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(logical_and);\nUSE_NO_KERNEL_OP(read_from_array);\n") +set(COMMON_OP_DEPS "") +set(COMMON_OP_DEPS ${COMMON_OP_DEPS} xxhash selected_rows_functor selected_rows lod_tensor maxouting unpooling pooling lod_rank_table context_project sequence_pooling executor dynload_warpctc sequence_padding sequence_scale cos_sim_functor memory jit_kernel concat_and_split cross_entropy softmax vol2col im2col sampler) if (NOT WIN32) -add_subdirectory(reader) -endif(NOT WIN32) -foreach(src ${READER_LIBRARY}) - set(OP_LIBRARY ${src} ${OP_LIBRARY}) -endforeach() + set(COMMON_OP_DEPS ${COMMON_OP_DEPS} sequence2batch lstm_compute matrix_bit_code gru_compute activation_functions) +endif() +if (WITH_GPU) + set(COMMON_OP_DEPS ${COMMON_OP_DEPS} depthwise_conv cub) +endif() -add_subdirectory(detection) -foreach(src ${DETECTION_LIBRARY}) - set(OP_LIBRARY ${src} ${OP_LIBRARY}) -endforeach() +# FIXME(typhoonzero): operator deps may not needed. +# op_library(lod_tensor_to_array_op DEPS lod_rank_table_op) +# op_library(array_to_lod_tensor_op DEPS lod_rank_table_op) +# op_library(unsqueeze_op DEPS reshape_op) +# op_library(squeeze_op DEPS reshape_op) +# op_library(flatten_op DEPS reshape_op) +# op_library(unstack_op DEPS stack_op) +# op_library(tensor_array_to_tensor_op DEPS concat_op) -set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") -set(GLOB_DISTRIBUTE_DEPS ${DISTRIBUTE_DEPS} CACHE INTERNAL "distributed dependency") +set(OPERATOR_DEPS ${OPERATOR_DEPS} ${COMMON_OP_DEPS}) +set(GLOB_OPERATOR_DEPS ${OPERATOR_DEPS} CACHE INTERNAL "Global Op dependencies") cc_test(gather_test SRCS gather_test.cc DEPS tensor) -cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) +cc_test(scatter_test SRCS scatter_test.cc DEPS tensor math_function) cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor) cc_test(beam_search_op_test SRCS beam_search_op_test.cc DEPS lod_tensor beam_search_op) cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor memory) cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op) cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op) -if(NOT WIN32) - nv_test(nccl_op_test SRCS nccl_op_test.cu.cc DEPS nccl_op gpu_info device_context) -endif() nv_test(dropout_op_test SRCS dropout_op_test.cc DEPS dropout_op tensor) -if(WITH_GPU) - foreach(CUDA_KERNEL_FILE ${PART_CUDA_KERNEL_FILES}) - file(READ ${CUDA_KERNEL_FILE} TARGET_CONTENT) - string(REGEX MATCH "REGISTER_OP_CUDA_KERNEL\\(\\n?([^,]+),.*" MATCHED ${TARGET_CONTENT}) - if (MATCHED) - string(STRIP ${CMAKE_MATCH_1} MATCHED) - file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${MATCHED}, CUDA);\n") - endif() - endforeach() -endif() +set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") diff --git a/paddle/fluid/operators/beam_search_op_test.cc b/paddle/fluid/operators/beam_search_op_test.cc index c4f4b478fbfc87e4178155132781214575c1e6b0..501807e7f3e04ae75386bfa00797d244cd9eac9c 100644 --- a/paddle/fluid/operators/beam_search_op_test.cc +++ b/paddle/fluid/operators/beam_search_op_test.cc @@ -54,7 +54,8 @@ void CreateInput(LoDTensor* ids, LoDTensor* scores) { } } -TEST(beam_search_op, run) { +// It seems that beam_search_op has bugs. +TEST(DISABLED_beam_search_op, run) { CPUPlace place; LoDTensor ids, scores; CreateInput(&ids, &scores); diff --git a/paddle/fluid/operators/controlflow/CMakeLists.txt b/paddle/fluid/operators/controlflow/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..b1c2ee22951a3881b4ce5b82f9ff7eb01fde6e9e --- /dev/null +++ b/paddle/fluid/operators/controlflow/CMakeLists.txt @@ -0,0 +1,4 @@ +include(operators) +register_operators() + +file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(logical_and);\nUSE_NO_KERNEL_OP(read_from_array);\n") diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/controlflow/compare_op.cc similarity index 98% rename from paddle/fluid/operators/compare_op.cc rename to paddle/fluid/operators/controlflow/compare_op.cc index f40b1ba338d429c248103eeb930ac7e1bb690218..488ca7fe95f5119c59b861011993a379d08008ba 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/controlflow/compare_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/compare_op.h" +#include "paddle/fluid/operators/controlflow/compare_op.h" #include #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/compare_op.cu b/paddle/fluid/operators/controlflow/compare_op.cu similarity index 94% rename from paddle/fluid/operators/compare_op.cu rename to paddle/fluid/operators/controlflow/compare_op.cu index 1bf85c64fb5b4d79c62118959fd72b13ed1c63ed..b1f306358359764b919f9e570cf44f9733a7d178 100644 --- a/paddle/fluid/operators/compare_op.cu +++ b/paddle/fluid/operators/controlflow/compare_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/compare_op.h" +#include "paddle/fluid/operators/controlflow/compare_op.h" REGISTER_COMPARE_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); REGISTER_COMPARE_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); diff --git a/paddle/fluid/operators/compare_op.h b/paddle/fluid/operators/controlflow/compare_op.h similarity index 97% rename from paddle/fluid/operators/compare_op.h rename to paddle/fluid/operators/controlflow/compare_op.h index 1cbabdaf6767815c1fedba0eabec9b5de678e047..b7529e4ae632d31524846d9d5aa4b1883f4509a1 100644 --- a/paddle/fluid/operators/compare_op.h +++ b/paddle/fluid/operators/controlflow/compare_op.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/platform/transform.h" namespace paddle { diff --git a/paddle/fluid/operators/conditional_block_op.cc b/paddle/fluid/operators/controlflow/conditional_block_op.cc similarity index 100% rename from paddle/fluid/operators/conditional_block_op.cc rename to paddle/fluid/operators/controlflow/conditional_block_op.cc diff --git a/paddle/fluid/operators/feed_op.cc b/paddle/fluid/operators/controlflow/feed_op.cc similarity index 100% rename from paddle/fluid/operators/feed_op.cc rename to paddle/fluid/operators/controlflow/feed_op.cc diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/controlflow/fetch_op.cc similarity index 100% rename from paddle/fluid/operators/fetch_op.cc rename to paddle/fluid/operators/controlflow/fetch_op.cc diff --git a/paddle/fluid/operators/get_places_op.cc b/paddle/fluid/operators/controlflow/get_places_op.cc similarity index 100% rename from paddle/fluid/operators/get_places_op.cc rename to paddle/fluid/operators/controlflow/get_places_op.cc diff --git a/paddle/fluid/operators/logical_op.cc b/paddle/fluid/operators/controlflow/logical_op.cc similarity index 99% rename from paddle/fluid/operators/logical_op.cc rename to paddle/fluid/operators/controlflow/logical_op.cc index 26970db8d2af62bb06fce4eb1a1f21fd41617bd1..6446cab5ec5f889dccaef90484476e55c4852dee 100644 --- a/paddle/fluid/operators/logical_op.cc +++ b/paddle/fluid/operators/controlflow/logical_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/logical_op.h" +#include "paddle/fluid/operators/controlflow/logical_op.h" #include #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/logical_op.cu b/paddle/fluid/operators/controlflow/logical_op.cu similarity index 94% rename from paddle/fluid/operators/logical_op.cu rename to paddle/fluid/operators/controlflow/logical_op.cu index 7ffe4dfc268b1ad3894dd54cb13c2f424818aa05..7ca54b488bfbb260c422941b82145f092a150be7 100644 --- a/paddle/fluid/operators/logical_op.cu +++ b/paddle/fluid/operators/controlflow/logical_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/logical_op.h" +#include "paddle/fluid/operators/controlflow/logical_op.h" REGISTER_BINARY_LOGICAL_KERNEL(logical_and, CUDA, paddle::operators::LogicalAndFunctor); diff --git a/paddle/fluid/operators/logical_op.h b/paddle/fluid/operators/controlflow/logical_op.h similarity index 100% rename from paddle/fluid/operators/logical_op.h rename to paddle/fluid/operators/controlflow/logical_op.h diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/controlflow/parallel_do_op.cc similarity index 100% rename from paddle/fluid/operators/parallel_do_op.cc rename to paddle/fluid/operators/controlflow/parallel_do_op.cc diff --git a/paddle/fluid/operators/tensor_array_read_write_op.cc b/paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc similarity index 100% rename from paddle/fluid/operators/tensor_array_read_write_op.cc rename to paddle/fluid/operators/controlflow/tensor_array_read_write_op.cc diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/controlflow/while_op.cc similarity index 100% rename from paddle/fluid/operators/while_op.cc rename to paddle/fluid/operators/controlflow/while_op.cc diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index 3a4086274d8a4bf6725df9f3195cec2446ceae6c..42c2b3a24c116f92f4dd6ad0966dcb963ec702d6 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -43,26 +43,6 @@ using DataLayout = platform::DataLayout; template using ScalingParamType = typename platform::CudnnDataType::ScalingParamType; -static constexpr char kCUDNNFwdAlgoCache[] = "kCUDNNFwdAlgoCache"; -static constexpr char kCUDNNBwdDataAlgoCache[] = "kCUDNNBwdDataAlgoCache"; -static constexpr char kCUDNNBwdFilterAlgoCache[] = "kCUDNNBwdFilterAlgoCache"; - -static constexpr size_t kCONV_CUDNN_WORKSPACE_LIMIT_BYTES = - static_cast(1024) * 1024 * 1024; - -#if CUDNN_VERSION_MIN(6, 0, 5) -static constexpr size_t kNUM_CUDNN_FWD_ALGS = CUDNN_CONVOLUTION_FWD_ALGO_COUNT; -static constexpr size_t kNUM_CUDNN_BWD_FILTER_ALGS = - CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT; -static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS = - CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT; -#else -// cuDNN v5 has no CUDNN_CONVOLUTION_FWD_ALGO_COUNT etc. -static constexpr size_t kNUM_CUDNN_FWD_ALGS = 7; -static constexpr size_t kNUM_CUDNN_BWD_FILTER_ALGS = 4; -static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS = 5; -#endif - template class CUDNNConvOpKernel : public framework::OpKernel { public: diff --git a/paddle/fluid/operators/conv_cudnn_op_cache.h b/paddle/fluid/operators/conv_cudnn_op_cache.h index 4b534321f746d5620005743eb8d45b71259156dd..92d394eb3c5aeb84605179cb2b5106f56a13f88e 100644 --- a/paddle/fluid/operators/conv_cudnn_op_cache.h +++ b/paddle/fluid/operators/conv_cudnn_op_cache.h @@ -17,10 +17,31 @@ limitations under the License. */ #include #include #include +#include "paddle/fluid/platform/cudnn_helper.h" namespace paddle { namespace operators { +static constexpr char kCUDNNFwdAlgoCache[] = "kCUDNNFwdAlgoCache"; +static constexpr char kCUDNNBwdDataAlgoCache[] = "kCUDNNBwdDataAlgoCache"; +static constexpr char kCUDNNBwdFilterAlgoCache[] = "kCUDNNBwdFilterAlgoCache"; + +static constexpr size_t kCONV_CUDNN_WORKSPACE_LIMIT_BYTES = + static_cast(1024) * 1024 * 1024; + +#if CUDNN_VERSION_MIN(6, 0, 5) +static constexpr size_t kNUM_CUDNN_FWD_ALGS = CUDNN_CONVOLUTION_FWD_ALGO_COUNT; +static constexpr size_t kNUM_CUDNN_BWD_FILTER_ALGS = + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT; +static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS = + CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT; +#else +// cuDNN v5 has no CUDNN_CONVOLUTION_FWD_ALGO_COUNT etc. +static constexpr size_t kNUM_CUDNN_FWD_ALGS = 7; +static constexpr size_t kNUM_CUDNN_BWD_FILTER_ALGS = 4; +static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS = 5; +#endif + template class AlgorithmsCache { public: diff --git a/paddle/fluid/operators/conv_fusion_op.cc b/paddle/fluid/operators/conv_fusion_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..9bdedb10e0b1bc2d45c084bbc070875117675b75 --- /dev/null +++ b/paddle/fluid/operators/conv_fusion_op.cc @@ -0,0 +1,48 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "paddle/fluid/operators/conv_op.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/cudnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +// This fused conv follows the equation: +// y = act ( alpha1 * conv(x) + alpha2 * z + bias ). +// here, y is Output, +// x is Input, +// z is ResidualData, +// bias is Bias +class Conv2DFusionOpMaker : public Conv2DOpMaker { + protected: + void Apply() override { + AddAttr( + "activation", + "The activation type can be 'identity', 'sigmoid', 'relu', 'relu6' " + "'relux' , 'tanh', 'band_pass'") + .SetDefault("relu"); + } +}; +// TODO(qingqing): add gradient operator for conv2d_fusion + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(conv2d_fusion, ops::ConvOp, ops::Conv2DFusionOpMaker, + ops::ConvOpInferVarType, paddle::framework::EmptyGradOpMaker); diff --git a/paddle/fluid/operators/conv_fusion_op.cu.cc b/paddle/fluid/operators/conv_fusion_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..bd1041ce0836014dc73fabd4a3896243a943bd38 --- /dev/null +++ b/paddle/fluid/operators/conv_fusion_op.cu.cc @@ -0,0 +1,187 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/conv_cudnn_op_cache.h" +#include "paddle/fluid/platform/cudnn_helper.h" + +DECLARE_uint64(conv_workspace_size_limit); +DECLARE_bool(cudnn_exhaustive_search); + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; +using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; +using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; +using ScopedActivationDescriptor = platform::ScopedActivationDescriptor; +using DataLayout = platform::DataLayout; +template +using ScalingParamType = typename platform::CudnnDataType::ScalingParamType; + +template +class CUDNNConvFusionOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto& dev_ctx = ctx.template device_context(); + auto* input = ctx.Input("Input"); + auto* filter = ctx.Input("Filter"); + auto* bias = ctx.Input("Bias"); + PADDLE_ENFORCE(bias, "The bias should not be null."); + auto* residual = ctx.Input("ResidualData"); + auto* output = ctx.Output("Output"); + + std::vector strides = ctx.Attr>("strides"); + std::vector paddings = ctx.Attr>("paddings"); + std::vector dilations = ctx.Attr>("dilations"); + const std::string activation = ctx.Attr("activation"); + int groups = ctx.Attr("groups"); + int64_t user_workspace_size = + static_cast(ctx.Attr("workspace_size_MB")); + bool exhaustive_search = + FLAGS_cudnn_exhaustive_search || ctx.Attr("exhaustive_search"); + + const T* input_data = input->data(); + const T* filter_data = filter->data(); + const T* bias_data = bias->data(); + T* output_data = output->mutable_data(ctx.GetPlace()); + const T* residual_data = residual ? residual->data() : output_data; + + // ------------------- cudnn descriptors --------------------- + ScopedTensorDescriptor input_desc; + ScopedTensorDescriptor output_desc; + ScopedFilterDescriptor filter_desc; + ScopedTensorDescriptor bias_desc; + ScopedConvolutionDescriptor conv_desc; + ScopedActivationDescriptor act_desc; + DataLayout layout = DataLayout::kNCHW; + if (input->dims().size() == 5) { + layout = DataLayout::kNCDHW; + } + + cudnnConvolutionDescriptor_t cudnn_conv_desc = + conv_desc.descriptor(paddings, strides, dilations); + CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionGroupCount( + cudnn_conv_desc, groups)); + + cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( + layout, framework::vectorize2int(input->dims())); + cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( + layout, framework::vectorize2int(output->dims())); + cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor( + layout, framework::vectorize2int(filter->dims())); + // Now only support NCHW + std::vector bias_dim = {1, static_cast(output->dims()[1]), 1, 1}; + cudnnTensorDescriptor_t cudnn_bias_desc = + bias_desc.descriptor(layout, bias_dim); + cudnnActivationDescriptor_t cudnn_act_desc = + act_desc.descriptor(activation); + + // ------------------- cudnn conv workspace --------------------- + size_t workspace_size_in_bytes; // final workspace to allocate. + size_t workspace_size_limit = kCONV_CUDNN_WORKSPACE_LIMIT_BYTES; + if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) { + int64_t max_user_size = + std::max(static_cast(FLAGS_conv_workspace_size_limit), + user_workspace_size); + workspace_size_limit = max_user_size * 1024 * 1024; + } + + // ------------------- cudnn conv algorithm --------------------- + cudnnConvolutionFwdAlgo_t algo; + auto handle = dev_ctx.cudnn_handle(); + auto workspace_handle = dev_ctx.cudnn_workspace_handle(); + + CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + cudnn_conv_desc, CUDNN_DEFAULT_MATH)); + + auto x_dims = framework::vectorize(input->dims()); + auto f_dims = framework::vectorize(filter->dims()); + if (activation == "identity") { + // Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is + // enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib. + algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; + } else if (!exhaustive_search) { + CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( + handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, + cudnn_output_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, + workspace_size_limit, &algo)); + VLOG(3) << "cuDNN forward algo " << algo; + } else { + AlgorithmsCache* algo_cache = nullptr; + if (ctx.scope().FindVar(kCUDNNFwdAlgoCache)) { + algo_cache = + ctx.scope() + .FindVar(kCUDNNFwdAlgoCache) + ->GetMutable>(); + } else { + algo_cache = + const_cast(ctx.scope()) + .Var(kCUDNNFwdAlgoCache) + ->GetMutable>(); + } + algo = algo_cache->GetAlgorithm( + x_dims, f_dims, strides, paddings, dilations, 0, [&]() { + int returned_algo_count; + std::array + fwd_perf_stat; + auto cudnn_find_func = [&](void* cudnn_workspace) { + CUDNN_ENFORCE( + platform::dynload::cudnnFindConvolutionForwardAlgorithmEx( + handle, cudnn_input_desc, input_data, cudnn_filter_desc, + filter_data, cudnn_conv_desc, cudnn_output_desc, + output_data, kNUM_CUDNN_FWD_ALGS, &returned_algo_count, + fwd_perf_stat.data(), cudnn_workspace, + workspace_size_limit)); + }; + workspace_handle.RunFunc(cudnn_find_func, workspace_size_limit); + VLOG(3) << "Perf result: (algo: stat, time, memory)"; + for (int i = 0; i < returned_algo_count; ++i) { + const auto& stat = fwd_perf_stat[i]; + VLOG(3) << stat.algo << ": " << stat.status << " " << stat.time + << " " << stat.memory; + } + return fwd_perf_stat[0].algo; + }); + VLOG(3) << "choose algo " << algo; + } + + CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( + handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, + cudnn_output_desc, algo, &workspace_size_in_bytes)); + PADDLE_ENFORCE_LE(workspace_size_in_bytes, workspace_size_limit, + "workspace_size to be allocated exceeds the limit"); + + // ------------------- cudnn conv+bias+act forward -------------------- + ScalingParamType alpha1 = 1.0f; + ScalingParamType alpha2 = residual ? 1.0f : 0.0f; + auto cudnn_func = [&](void* cudnn_workspace) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBiasActivationForward( + handle, &alpha1, cudnn_input_desc, input_data, cudnn_filter_desc, + filter_data, cudnn_conv_desc, algo, cudnn_workspace, + workspace_size_in_bytes, &alpha2, cudnn_output_desc, residual_data, + cudnn_bias_desc, bias_data, cudnn_act_desc, cudnn_output_desc, + output_data)); + }; + workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(conv2d_fusion, ops::CUDNNConvFusionOpKernel, + ops::CUDNNConvFusionOpKernel); diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index c3c7c90f150a472e0f19626d71bc1c25643d0ca6..9e2e2cf818000d9181447a0aa6b4ac4878781f35 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -12,11 +12,11 @@ See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/framework/data_layout_transform.h" +#include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/platform/mkldnn_helper.h" -#include "paddle/fluid/framework/data_layout_transform.h" - namespace paddle { namespace operators { @@ -428,8 +428,9 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { "same dimension sizes"); if (residual_param->format() != handler.GetDstFormat()) { - auto output_data = - output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); + auto output_data = output->mutable_data( + ctx.GetPlace(), ::paddle::memory::Allocator::kDefault, + handler.GetDstMemorySize()); auto residual_data_tz = paddle::framework::vectorize2int(residual_param->dims()); auto residual_data_type = @@ -449,8 +450,9 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { handler.AcquireDstMemoryFromPrimitive(to_void_cast(output_data)); } } else { - auto output_data = - output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); + auto output_data = output->mutable_data( + ctx.GetPlace(), paddle::memory::Allocator::kDefault, + handler.GetDstMemorySize()); dst_memory_p = handler.AcquireDstMemoryFromPrimitive(to_void_cast(output_data)); } @@ -692,7 +694,8 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { user_diff_dst_memory_p, pipeline); const size_t size = handler.GetDiffWeightsMemorySize(); - filter_grad_data = filter_grad->mutable_data(ctx.GetPlace(), size); + filter_grad_data = filter_grad->mutable_data( + ctx.GetPlace(), paddle::memory::Allocator::kDefault, size); auto diff_weights_memory_p = handler.AcquireDiffWeightsMemoryFromWeightsPrimitive( @@ -717,7 +720,8 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { pipeline); const size_t size = handler.GetDiffSourceMemorySize(); - input_grad_data = input_grad->mutable_data(ctx.GetPlace(), size); + input_grad_data = input_grad->mutable_data( + ctx.GetPlace(), paddle::memory::Allocator::kDefault, size); auto diff_src_memory_p = handler.AcquireDiffSrcMemoryFromDataPrimitive( reinterpret_cast(input_grad_data)); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 1ac4bef615a546ec97aefd83653c649f187caba0..342525be49e28f1785e25d4daad38c3c81b4774f 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -225,17 +225,9 @@ $$ W_{out}= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]}+ 1 $$ )DOC"); + Apply(); } -class ConvOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput { - protected: - std::unordered_map GetInputOutputWithSameType() - const override { - return std::unordered_map{ - {"Input", /*->*/ "Output"}}; - } -}; - void Conv3DOpMaker::Make() { AddInput( "Input", @@ -334,6 +326,7 @@ Example: W_{out}= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{ strides[2]}+ 1 $$ )DOC"); + Apply(); } void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const { diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index ef76106f17218a03d24ebc0eca43dbb0ae935093..e69814001e4da5d10e51ee57c1dbe291338b8b49 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" @@ -60,12 +61,27 @@ inline bool IsExpand(const std::vector& filter_dim, // operator implementations can reuse the code. class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker { public: - void Make() override; + void Make() final; + + protected: + virtual void Apply() {} }; class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker { public: - void Make() override; + void Make() final; + + protected: + virtual void Apply() {} +}; + +class ConvOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput { + protected: + std::unordered_map GetInputOutputWithSameType() + const override { + return std::unordered_map{ + {"Input", /*->*/ "Output"}}; + } }; class ConvOp : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/csp/CMakeLists.txt b/paddle/fluid/operators/csp/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d468316e8eacb73c4a4ce81c784880bb5e46c2d --- /dev/null +++ b/paddle/fluid/operators/csp/CMakeLists.txt @@ -0,0 +1,2 @@ +include(operators) +register_operators() diff --git a/paddle/fluid/operators/go_op.cc b/paddle/fluid/operators/csp/go_op.cc similarity index 100% rename from paddle/fluid/operators/go_op.cc rename to paddle/fluid/operators/csp/go_op.cc diff --git a/paddle/fluid/operators/detection/CMakeLists.txt b/paddle/fluid/operators/detection/CMakeLists.txt index e5c3f0eeb385e1a15fdbb12a989603996420efe3..58f6f48467310ffb2429ad440f58fcd823edf079 100644 --- a/paddle/fluid/operators/detection/CMakeLists.txt +++ b/paddle/fluid/operators/detection/CMakeLists.txt @@ -40,4 +40,8 @@ endif() detection_library(roi_perspective_transform_op SRCS roi_perspective_transform_op.cc roi_perspective_transform_op.cu) #Export local libraries to parent -set(DETECTION_LIBRARY ${LOCAL_DETECTION_LIBS} PARENT_SCOPE) +# set(DETECTION_LIBRARY ${LOCAL_DETECTION_LIBS} PARENT_SCOPE) + +foreach(src ${LOCAL_DETECTION_LIBS}) + set(OP_LIBRARY ${src} ${OP_LIBRARY} CACHE INTERNAL "op libs") +endforeach() diff --git a/paddle/fluid/operators/detection/box_coder_op.cc b/paddle/fluid/operators/detection/box_coder_op.cc index d0f95f727fdbc82777147e3e8ada6ad4f7a35e60..06fbb9815c52ea69e3aa9e893512e039853b9514 100644 --- a/paddle/fluid/operators/detection/box_coder_op.cc +++ b/paddle/fluid/operators/detection/box_coder_op.cc @@ -30,27 +30,30 @@ class BoxCoderOp : public framework::OperatorWithKernel { auto prior_box_dims = ctx->GetInputDim("PriorBox"); auto target_box_dims = ctx->GetInputDim("TargetBox"); - PADDLE_ENFORCE_EQ(prior_box_dims.size(), 2, - "The rank of Input of PriorBoxVar must be 2"); - PADDLE_ENFORCE_EQ(prior_box_dims[1], 4, "The shape of PriorBox is [N, 4]"); - if (ctx->HasInput("PriorBoxVar")) { - auto prior_box_var_dims = ctx->GetInputDim("PriorBoxVar"); - PADDLE_ENFORCE_EQ(prior_box_dims, prior_box_var_dims); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(prior_box_dims.size(), 2, + "The rank of Input of PriorBoxVar must be 2"); + PADDLE_ENFORCE_EQ(prior_box_dims[1], 4, + "The shape of PriorBox is [N, 4]"); + if (ctx->HasInput("PriorBoxVar")) { + auto prior_box_var_dims = ctx->GetInputDim("PriorBoxVar"); + PADDLE_ENFORCE_EQ(prior_box_dims, prior_box_var_dims); + } + + auto code_type = + GetBoxCodeType(ctx->Attrs().Get("code_type")); + if (code_type == BoxCodeType::kEncodeCenterSize) { + PADDLE_ENFORCE_EQ(target_box_dims.size(), 2, + "The rank of Input of TargetBox must be 2"); + PADDLE_ENFORCE_EQ(target_box_dims[1], 4, + "The shape of TargetBox is [M, 4]"); + } else if (code_type == BoxCodeType::kDecodeCenterSize) { + PADDLE_ENFORCE_EQ(target_box_dims.size(), 3, + "The rank of Input of TargetBox must be 3"); + PADDLE_ENFORCE_EQ(target_box_dims[1], prior_box_dims[0]); + PADDLE_ENFORCE_EQ(target_box_dims[2], prior_box_dims[1]); + } } - - auto code_type = GetBoxCodeType(ctx->Attrs().Get("code_type")); - if (code_type == BoxCodeType::kEncodeCenterSize) { - PADDLE_ENFORCE_EQ(target_box_dims.size(), 2, - "The rank of Input of TargetBox must be 2"); - PADDLE_ENFORCE_EQ(target_box_dims[1], 4, - "The shape of TargetBox is [M, 4]"); - } else if (code_type == BoxCodeType::kDecodeCenterSize) { - PADDLE_ENFORCE_EQ(target_box_dims.size(), 3, - "The rank of Input of TargetBox must be 3"); - PADDLE_ENFORCE_EQ(target_box_dims[1], prior_box_dims[0]); - PADDLE_ENFORCE_EQ(target_box_dims[2], prior_box_dims[1]); - } - ctx->SetOutputDim( "OutputBox", framework::make_ddim({target_box_dims[0], prior_box_dims[0], 4})); diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cu b/paddle/fluid/operators/detection/generate_proposals_op.cu index 91213b3c4d9db54469ec151ff1dd8e56c3118fea..a0b99377109aef4776fadd68101d011a9191b1cc 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cu +++ b/paddle/fluid/operators/detection/generate_proposals_op.cu @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include #include #include @@ -67,17 +68,15 @@ static void SortDescending(const platform::CUDADeviceContext &ctx, size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairsDescending( nullptr, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num); - // Allocate temporary storage auto place = boost::get(ctx.GetPlace()); - void *d_temp_storage = memory::Alloc(place, temp_storage_bytes); + auto d_temp_storage = + memory::Alloc(place, temp_storage_bytes, memory::Allocator::kScratchpad); // Run sorting operation cub::DeviceRadixSort::SortPairsDescending( - d_temp_storage, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, - num); - - memory::Free(place, d_temp_storage); + d_temp_storage->ptr(), temp_storage_bytes, keys_in, keys_out, idx_in, + idx_out, num); } template diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index 9e78b28a6011bb7bd299ca3438eb407f600d7000..f0f8851be0ec2b532c570dc82b8ed5c290981aab 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -36,24 +36,26 @@ class MultiClassNMSOp : public framework::OperatorWithKernel { auto box_dims = ctx->GetInputDim("BBoxes"); auto score_dims = ctx->GetInputDim("Scores"); - PADDLE_ENFORCE_EQ(box_dims.size(), 3, - "The rank of Input(BBoxes) must be 3."); - PADDLE_ENFORCE_EQ(score_dims.size(), 3, - "The rank of Input(Scores) must be 3."); - PADDLE_ENFORCE(box_dims[2] == 4 || box_dims[2] == 8 || box_dims[2] == 16 || - box_dims[2] == 24 || box_dims[2] == 32, - "The 2nd dimension of Input(BBoxes) must be 4 or 8, " - "represents the layout of coordinate " - "[xmin, ymin, xmax, ymax] or " - "4 points: [x1, y1, x2, y2, x3, y3, x4, y4] or " - "8 points: [xi, yi] i= 1,2,...,8 or " - "12 points: [xi, yi] i= 1,2,...,12 or " - "16 points: [xi, yi] i= 1,2,...,16"); - PADDLE_ENFORCE_EQ(box_dims[1], score_dims[2], - "The 1st dimensiong of Input(BBoxes) must be equal to " - "3rd dimension of Input(Scores), which represents the " - "predicted bboxes."); - + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(box_dims.size(), 3, + "The rank of Input(BBoxes) must be 3."); + PADDLE_ENFORCE_EQ(score_dims.size(), 3, + "The rank of Input(Scores) must be 3."); + PADDLE_ENFORCE(box_dims[2] == 4 || box_dims[2] == 8 || + box_dims[2] == 16 || box_dims[2] == 24 || + box_dims[2] == 32, + "The 2nd dimension of Input(BBoxes) must be 4 or 8, " + "represents the layout of coordinate " + "[xmin, ymin, xmax, ymax] or " + "4 points: [x1, y1, x2, y2, x3, y3, x4, y4] or " + "8 points: [xi, yi] i= 1,2,...,8 or " + "12 points: [xi, yi] i= 1,2,...,12 or " + "16 points: [xi, yi] i= 1,2,...,16"); + PADDLE_ENFORCE_EQ(box_dims[1], score_dims[2], + "The 1st dimensiong of Input(BBoxes) must be equal to " + "3rd dimension of Input(Scores), which represents the " + "predicted bboxes."); + } // Here the box_dims[0] is not the real dimension of output. // It will be rewritten in the computing kernel. ctx->SetOutputDim("Out", {box_dims[1], box_dims[2] + 2}); diff --git a/paddle/fluid/operators/distributed/grpc_serde.cc b/paddle/fluid/operators/distributed/grpc_serde.cc index b201c4a5763148165f517c719227d6317ecbe350..f27b70a5a3dd2927b51a95af7bd1b84a6e232f86 100644 --- a/paddle/fluid/operators/distributed/grpc_serde.cc +++ b/paddle/fluid/operators/distributed/grpc_serde.cc @@ -32,17 +32,20 @@ namespace paddle { namespace operators { namespace distributed { +static void SerializeDestroyCallback(void* payload) { + if (payload != nullptr) { + auto* shared_payload = reinterpret_cast(payload); + delete shared_payload; + } +} + void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, ::grpc::ByteBuffer* msg, const std::string& out_name, const int trainer_id) { platform::RecordRPCEvent record_event("serial", &ctx); - // Default DestroyCallback does nothing, When using GPU - // the CPU buffer need to be freed. - DestroyCallback destroy_callback = [](void* backing) {}; VarMsg request; - void* payload = nullptr; - size_t payload_size; + TensorPayload* payload = nullptr; request.set_varname(name); request.set_trainer_id(trainer_id); @@ -62,10 +65,10 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, } if (var->IsType()) { request.set_type(::sendrecv::LOD_TENSOR); - GetTensorPayload(var, ctx, &request, &payload, &payload_size); + payload = new TensorPayload(GetTensorPayload(var, ctx, &request)); } else if (var->IsType()) { request.set_type(::sendrecv::SELECTED_ROWS); - GetSelectedRowsPayload(var, ctx, &request, &payload, &payload_size); + payload = new TensorPayload(GetSelectedRowsPayload(var, ctx, &request)); #ifdef PADDLE_WITH_CUDA } else if (var->IsType()) { request.set_type(::sendrecv::NCCL_ID); @@ -75,17 +78,6 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, typeid(var->Type()).name()); } - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - // GPU data is copied to CPU buffer when sending, - // free the buffer when possible. - destroy_callback = [](void* backing) { - platform::CUDAPinnedPlace cuda_pinned; - memory::Free(cuda_pinned, backing); - }; -#endif - } - std::string header; request.AppendToString(&header); auto buffer = std::unique_ptr(new char[1024]); @@ -109,16 +101,18 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, return; } #endif + PADDLE_ENFORCE_NOT_NULL(payload); - e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size); + e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, + payload->memory_size()); // steal reference of tensor data ::grpc::Slice slices[4]; // metadata, tensor, rows meta, rows int num_slices = 2; // only SelectedRows have rows buffer slices[0] = ::grpc::Slice(e.size()); memcpy(const_cast(slices[0].begin()), e.data(), e.size()); slices[1] = ::grpc::Slice( - grpc_slice_new_with_user_data(payload, payload_size, destroy_callback, - static_cast(payload)), + grpc_slice_new_with_user_data(payload->ptr(), payload->memory_size(), + SerializeDestroyCallback, payload), ::grpc::Slice::STEAL_REF); if (var->IsType()) { diff --git a/paddle/fluid/operators/distributed/sendrecvop_utils.cc b/paddle/fluid/operators/distributed/sendrecvop_utils.cc index 6a3f8fd544bc5d669b725765a863b42ec069a7b6..374fa680e3681d2e4b1d7513a9522810a15fe485 100644 --- a/paddle/fluid/operators/distributed/sendrecvop_utils.cc +++ b/paddle/fluid/operators/distributed/sendrecvop_utils.cc @@ -28,16 +28,34 @@ namespace distributed { using VarMsg = sendrecv::VariableMessage; +static TensorPayload GetCommunicationAllocationFromTensor( + const platform::DeviceContext& ctx, const framework::Tensor& tensor) { + if (is_gpu_place(ctx.GetPlace())) { #ifdef PADDLE_WITH_CUDA -void* GetVarPayLoad(const std::string varname, int64_t size) { - platform::CUDAPinnedPlace cuda_pinned; - return memory::Alloc(cuda_pinned, size); -} -#endif + PADDLE_ENFORCE(is_gpu_place(tensor.place())); + auto& gpu_dev_ctx = + reinterpret_cast(ctx); + auto copy_size = tensor.numel() * framework::SizeOfType(tensor.type()); + platform::CUDAPinnedPlace cuda_pinned; + auto result = memory::AllocShared( + cuda_pinned, copy_size, memory::allocation::Allocator::kCrossDevice); -void GetTensorPayload(framework::Variable* var, - const platform::DeviceContext& ctx, VarMsg* request, - void** payload, size_t* payload_size) { + memory::Copy(cuda_pinned, result->ptr(), + boost::get(tensor.place()), + tensor.data(), copy_size, gpu_dev_ctx.stream()); + + ctx.Wait(); + return TensorPayload(result); +#else + PADDLE_THROW("This situation should not be happened"); +#endif + } else { + return TensorPayload(tensor); + } +} +TensorPayload GetTensorPayload(framework::Variable* var, + const platform::DeviceContext& ctx, + VarMsg* request) { auto tensor = var->Get(); // FIXME(wuyi): data types in send_recv.proto is copied from // framework.proto @@ -56,31 +74,12 @@ void GetTensorPayload(framework::Variable* var, } } } - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE(platform::is_gpu_place(tensor.place())); - // platform::CUDAPinnedPlace cuda_pinned; - auto& gpu_dev_ctx = static_cast(ctx); - auto copy_size = tensor.numel() * framework::SizeOfType(tensor.type()); - *payload = GetVarPayLoad(request->varname(), copy_size); - - platform::CUDAPinnedPlace cuda_pinned; - memory::Copy(cuda_pinned, *payload, - boost::get(tensor.place()), - reinterpret_cast(tensor.data()), copy_size, - gpu_dev_ctx.stream()); - - ctx.Wait(); -#endif - } else { - *payload = tensor.data(); - } - *payload_size = tensor.numel() * framework::SizeOfType(tensor.type()); + return GetCommunicationAllocationFromTensor(ctx, tensor); } -void GetSelectedRowsPayload(framework::Variable* var, - const platform::DeviceContext& ctx, VarMsg* request, - void** payload, size_t* payload_size) { +TensorPayload GetSelectedRowsPayload(framework::Variable* var, + const platform::DeviceContext& ctx, + VarMsg* request) { auto* slr = var->GetMutable(); request->set_data_type( static_cast(framework::ToDataType(slr->value().type()))); @@ -92,25 +91,20 @@ void GetSelectedRowsPayload(framework::Variable* var, } auto* tensor = slr->mutable_value(); - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - auto& gpu_dev_ctx = static_cast(ctx); - auto copy_size = tensor->numel() * framework::SizeOfType(tensor->type()); - *payload = GetVarPayLoad(request->varname(), copy_size); - - platform::CUDAPinnedPlace cuda_pinned; - memory::Copy(cuda_pinned, *payload, - boost::get(tensor->place()), - reinterpret_cast(tensor->data()), copy_size, - gpu_dev_ctx.stream()); - ctx.Wait(); -#endif - } else { - *payload = slr->mutable_value()->data(); - } - *payload_size = tensor->numel() * framework::SizeOfType(tensor->type()); + return GetCommunicationAllocationFromTensor(ctx, *tensor); } +TensorPayload::TensorPayload(std::shared_ptr allocation) + : allocation_(allocation), offset_(0), memory_size_(allocation->size()) {} +TensorPayload::TensorPayload(const framework::Tensor& tensor) + : allocation_(tensor.Holder()), + offset_(tensor.offset()), + memory_size_(tensor.numel() * framework::SizeOfType(tensor.type())) {} +void* TensorPayload::ptr() const { + return reinterpret_cast( + reinterpret_cast(allocation_->ptr()) + offset_); +} +size_t TensorPayload::memory_size() const { return memory_size_; } } // namespace distributed } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/distributed/sendrecvop_utils.h b/paddle/fluid/operators/distributed/sendrecvop_utils.h index 4d08d3c77afa3c1f2b4d7602f7199558bb5a79c0..480fc59c4281edbfa5f08e07a86c5f1257adb4be 100644 --- a/paddle/fluid/operators/distributed/sendrecvop_utils.h +++ b/paddle/fluid/operators/distributed/sendrecvop_utils.h @@ -33,13 +33,30 @@ namespace distributed { using VarMsg = sendrecv::VariableMessage; -void GetTensorPayload(framework::Variable* var, - const platform::DeviceContext& ctx, VarMsg* request, - void** payload, size_t* payload_size); +class TensorPayload final { + public: + explicit TensorPayload(const framework::Tensor& tensor); + explicit TensorPayload(std::shared_ptr allocation); -void GetSelectedRowsPayload(framework::Variable* var, - const platform::DeviceContext& ctx, VarMsg* request, - void** payload, size_t* payload_size); + TensorPayload(const TensorPayload& o) = default; + TensorPayload& operator=(const TensorPayload& o) = default; + + void* ptr() const; + size_t memory_size() const; + + private: + std::shared_ptr allocation_; + size_t offset_; + size_t memory_size_; +}; + +TensorPayload GetTensorPayload(framework::Variable* var, + const platform::DeviceContext& ctx, + VarMsg* request); + +TensorPayload GetSelectedRowsPayload(framework::Variable* var, + const platform::DeviceContext& ctx, + VarMsg* request); inline std::type_index ToTypeIndex(sendrecv::VariableMessage::Type type) { switch (type) { diff --git a/paddle/fluid/operators/distributed/variable_response.cc b/paddle/fluid/operators/distributed/variable_response.cc index d1572ce01aa17273988955c27bdea5b2f40c27ea..f831793e9b2aeedb6a073013494a86fcd3246b38 100644 --- a/paddle/fluid/operators/distributed/variable_response.cc +++ b/paddle/fluid/operators/distributed/variable_response.cc @@ -115,11 +115,11 @@ bool VariableResponse::CopyLodTensorData( void* tensor_data = tensor->mutable_data(ctx.GetPlace(), ToTypeIndex(meta_.data_type())); - if (!ReadRaw(input, ctx, tensor->place(), tensor_data, length)) { - return false; - } - return true; + VLOG(6) << "Tensor.memory_size = " << tensor->memory_size() + << ", Buffer Size = " << length; + PADDLE_ENFORCE_EQ(tensor->memory_size(), length); + return ReadRaw(input, ctx, tensor->place(), tensor_data, length); } inline framework::DDim GetDims( diff --git a/paddle/fluid/operators/distributed_ops/CMakeLists.txt b/paddle/fluid/operators/distributed_ops/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..28bb90af5675b2fe14813675ad001c0cf1d71e12 --- /dev/null +++ b/paddle/fluid/operators/distributed_ops/CMakeLists.txt @@ -0,0 +1,40 @@ +include(operators) + +set(DISTRIBUTE_DEPS "") +if(WITH_GRPC) + set(DISTRIBUTE_DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf node) +else() + set(DISTRIBUTE_DEPS sendrecvop_brpc brpc leveldb snappystream snappy protobuf ssl crypto zlib node) + if(WITH_BRPC_RDMA) + find_library(IBVERBS_LIBRARY NAMES ibverbs) + ADD_LIBRARY(ibverbs SHARED IMPORTED GLOBAL) + SET_PROPERTY(TARGET ibverbs PROPERTY IMPORTED_LOCATION ${IBVERBS_LIBRARY}) + + + find_library(RDMACM_LIBRARY NAMES rdmacm) + ADD_LIBRARY(rdmacm SHARED IMPORTED GLOBAL) + SET_PROPERTY(TARGET rdmacm PROPERTY IMPORTED_LOCATION ${RDMACM_LIBRARY}) + + set(DISTRIBUTE_DEPS ${DISTRIBUTE_DEPS} ibverbs rdmacm) + endif() +endif() + +set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") + + +file(GLOB OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*_op.cc") +list(REMOVE_DUPLICATES OPS) + +foreach(src ${OPS}) + set_source_files_properties(${src} PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) +endforeach() + +register_operators(EXCLUDES gen_nccl_id_op DEPS ${DISTRIBUTE_DEPS}) + +if(WITH_GPU AND NOT WIN32) + set(DISTRIBUTE_DEPS ${DISTRIBUTE_DEPS} nccl_common) + op_library(gen_nccl_id_op ${DISTRIBUTE_DEPS} nccl_common) +endif() + +set(OPERATOR_DEPS ${OPERATOR_DEPS} ${DISTRIBUTE_DEPS} PARENT_SCOPE) +set(GLOB_DISTRIBUTE_DEPS ${DISTRIBUTE_DEPS} CACHE INTERNAL "distributed dependency") diff --git a/paddle/fluid/operators/checkpoint_notify_op.cc b/paddle/fluid/operators/distributed_ops/checkpoint_notify_op.cc similarity index 98% rename from paddle/fluid/operators/checkpoint_notify_op.cc rename to paddle/fluid/operators/distributed_ops/checkpoint_notify_op.cc index defa287bdb913e406aa7e2a383cefc3cca8c4d94..ed4dced51356515d5910e2962c9ee91a1997dbf0 100644 --- a/paddle/fluid/operators/checkpoint_notify_op.cc +++ b/paddle/fluid/operators/distributed_ops/checkpoint_notify_op.cc @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/macros.h" -#include "paddle/fluid/operators/send_recv_util.h" +#include "paddle/fluid/operators/distributed_ops/send_recv_util.h" #include "paddle/fluid/string/printf.h" namespace paddle { diff --git a/paddle/fluid/operators/fake_init_op.cc b/paddle/fluid/operators/distributed_ops/fake_init_op.cc similarity index 100% rename from paddle/fluid/operators/fake_init_op.cc rename to paddle/fluid/operators/distributed_ops/fake_init_op.cc diff --git a/paddle/fluid/operators/fetch_barrier_op.cc b/paddle/fluid/operators/distributed_ops/fetch_barrier_op.cc similarity index 100% rename from paddle/fluid/operators/fetch_barrier_op.cc rename to paddle/fluid/operators/distributed_ops/fetch_barrier_op.cc diff --git a/paddle/fluid/operators/gen_nccl_id_op.cc b/paddle/fluid/operators/distributed_ops/gen_nccl_id_op.cc similarity index 100% rename from paddle/fluid/operators/gen_nccl_id_op.cc rename to paddle/fluid/operators/distributed_ops/gen_nccl_id_op.cc diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/distributed_ops/listen_and_serv_op.cc similarity index 99% rename from paddle/fluid/operators/listen_and_serv_op.cc rename to paddle/fluid/operators/distributed_ops/listen_and_serv_op.cc index e3d09e2d14817fe0f2ccda18ed90c9436b399ae3..9f0c7db0e1133f6d73e73a9d162a945ba4c17dc6 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/distributed_ops/listen_and_serv_op.cc @@ -25,7 +25,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/distributed/request_handler_impl.h" -#include "paddle/fluid/operators/listen_and_serv_op.h" +#include "paddle/fluid/operators/distributed_ops/listen_and_serv_op.h" DEFINE_int32(rpc_send_thread_num, 5, "number of threads for rpc send"); DEFINE_int32(rpc_get_thread_num, 5, "number of threads for rpc get"); diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/distributed_ops/listen_and_serv_op.h similarity index 100% rename from paddle/fluid/operators/listen_and_serv_op.h rename to paddle/fluid/operators/distributed_ops/listen_and_serv_op.h diff --git a/paddle/fluid/operators/merge_ids_op.cc b/paddle/fluid/operators/distributed_ops/merge_ids_op.cc similarity index 98% rename from paddle/fluid/operators/merge_ids_op.cc rename to paddle/fluid/operators/distributed_ops/merge_ids_op.cc index 6e0e13698097ade36449f2e8ff6ab981a1b24311..252a63cb605f65e8572281a05e884fb8b020a820 100644 --- a/paddle/fluid/operators/merge_ids_op.cc +++ b/paddle/fluid/operators/distributed_ops/merge_ids_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/merge_ids_op.h" +#include "paddle/fluid/operators/distributed_ops/merge_ids_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/merge_ids_op.h b/paddle/fluid/operators/distributed_ops/merge_ids_op.h similarity index 100% rename from paddle/fluid/operators/merge_ids_op.h rename to paddle/fluid/operators/distributed_ops/merge_ids_op.h diff --git a/paddle/fluid/operators/prefetch_op.cc b/paddle/fluid/operators/distributed_ops/prefetch_op.cc similarity index 98% rename from paddle/fluid/operators/prefetch_op.cc rename to paddle/fluid/operators/distributed_ops/prefetch_op.cc index 55853d25460bf6e3d07c829d686e71cc9367118c..faa67a28d86235625a87b8bd7b87685e09c75f0b 100644 --- a/paddle/fluid/operators/prefetch_op.cc +++ b/paddle/fluid/operators/distributed_ops/prefetch_op.cc @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/macros.h" -#include "paddle/fluid/operators/send_recv_util.h" +#include "paddle/fluid/operators/distributed_ops/send_recv_util.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/distributed_ops/recv_op.cc similarity index 100% rename from paddle/fluid/operators/recv_op.cc rename to paddle/fluid/operators/distributed_ops/recv_op.cc diff --git a/paddle/fluid/operators/ref_by_trainer_id_op.cc b/paddle/fluid/operators/distributed_ops/ref_by_trainer_id_op.cc similarity index 97% rename from paddle/fluid/operators/ref_by_trainer_id_op.cc rename to paddle/fluid/operators/distributed_ops/ref_by_trainer_id_op.cc index 6cb651af6dc3d8e301365968787c199acc4c60ee..98b0af7688b928f21573247b327bee1d22a73f17 100644 --- a/paddle/fluid/operators/ref_by_trainer_id_op.cc +++ b/paddle/fluid/operators/distributed_ops/ref_by_trainer_id_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/ref_by_trainer_id_op.h" +#include "paddle/fluid/operators/distributed_ops/ref_by_trainer_id_op.h" #include namespace paddle { diff --git a/paddle/fluid/operators/ref_by_trainer_id_op.cu.cc b/paddle/fluid/operators/distributed_ops/ref_by_trainer_id_op.cu.cc similarity index 94% rename from paddle/fluid/operators/ref_by_trainer_id_op.cu.cc rename to paddle/fluid/operators/distributed_ops/ref_by_trainer_id_op.cu.cc index b98e2b5c9c7341f2a424fb4b32ff1e8bc45a056c..168cd51355de56c2e2a83ba73d7eb14f6ba6e533 100644 --- a/paddle/fluid/operators/ref_by_trainer_id_op.cu.cc +++ b/paddle/fluid/operators/distributed_ops/ref_by_trainer_id_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/ref_by_trainer_id_op.h" +#include "paddle/fluid/operators/distributed_ops/ref_by_trainer_id_op.h" REGISTER_OP_CUDA_KERNEL( ref_by_trainer_id, diff --git a/paddle/fluid/operators/ref_by_trainer_id_op.h b/paddle/fluid/operators/distributed_ops/ref_by_trainer_id_op.h similarity index 100% rename from paddle/fluid/operators/ref_by_trainer_id_op.h rename to paddle/fluid/operators/distributed_ops/ref_by_trainer_id_op.h diff --git a/paddle/fluid/operators/send_barrier_op.cc b/paddle/fluid/operators/distributed_ops/send_barrier_op.cc similarity index 100% rename from paddle/fluid/operators/send_barrier_op.cc rename to paddle/fluid/operators/distributed_ops/send_barrier_op.cc diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/distributed_ops/send_op.cc similarity index 98% rename from paddle/fluid/operators/send_op.cc rename to paddle/fluid/operators/distributed_ops/send_op.cc index 0ad43d56d3cd7500290dc1e386a2dbaf4453a191..be53a1a32b59d7c0235382f5db18d2203b4a035a 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/distributed_ops/send_op.cc @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/macros.h" -#include "paddle/fluid/operators/send_recv_util.h" +#include "paddle/fluid/operators/distributed_ops/send_recv_util.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/distributed_ops/send_recv_op_test.cc similarity index 99% rename from paddle/fluid/operators/send_recv_op_test.cc rename to paddle/fluid/operators/distributed_ops/send_recv_op_test.cc index d79b16e3cca714d44c88834082cea9367480da9a..bf798a8251fcb4148db486f26d32525b59299c81 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/distributed_ops/send_recv_op_test.cc @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/operators/listen_and_serv_op.h" +#include "paddle/fluid/operators/distributed_ops/listen_and_serv_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/string/printf.h" diff --git a/paddle/fluid/operators/send_recv_util.h b/paddle/fluid/operators/distributed_ops/send_recv_util.h similarity index 100% rename from paddle/fluid/operators/send_recv_util.h rename to paddle/fluid/operators/distributed_ops/send_recv_util.h diff --git a/paddle/fluid/operators/split_byref_op.cc b/paddle/fluid/operators/distributed_ops/split_byref_op.cc similarity index 98% rename from paddle/fluid/operators/split_byref_op.cc rename to paddle/fluid/operators/distributed_ops/split_byref_op.cc index bc998e1abbd7131a7497288cc9d66315a6fedc85..d65e7ffe5a492fe5df038bb6bd469e09de6f95ca 100644 --- a/paddle/fluid/operators/split_byref_op.cc +++ b/paddle/fluid/operators/distributed_ops/split_byref_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/split_byref_op.h" +#include "paddle/fluid/operators/distributed_ops/split_byref_op.h" #include "paddle/fluid/operators/split_op.h" namespace paddle { diff --git a/paddle/fluid/operators/split_byref_op.cu.cc b/paddle/fluid/operators/distributed_ops/split_byref_op.cu.cc similarity index 91% rename from paddle/fluid/operators/split_byref_op.cu.cc rename to paddle/fluid/operators/distributed_ops/split_byref_op.cu.cc index 5ee6186f3541b7dcb845ce0c6d28081685925da0..056659c3ea61f6233a6dda56ca1e272e72770d4a 100644 --- a/paddle/fluid/operators/split_byref_op.cu.cc +++ b/paddle/fluid/operators/distributed_ops/split_byref_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/split_byref_op.h" +#include "paddle/fluid/operators/distributed_ops/split_byref_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( split_byref, diff --git a/paddle/fluid/operators/split_byref_op.h b/paddle/fluid/operators/distributed_ops/split_byref_op.h similarity index 100% rename from paddle/fluid/operators/split_byref_op.h rename to paddle/fluid/operators/distributed_ops/split_byref_op.h diff --git a/paddle/fluid/operators/split_ids_op.cc b/paddle/fluid/operators/distributed_ops/split_ids_op.cc similarity index 98% rename from paddle/fluid/operators/split_ids_op.cc rename to paddle/fluid/operators/distributed_ops/split_ids_op.cc index 01d432e13068f7b718d08dc15d8cc99a7fbb0afe..f61d387fbef636298c412c227bf7a56a04f69c63 100644 --- a/paddle/fluid/operators/split_ids_op.cc +++ b/paddle/fluid/operators/distributed_ops/split_ids_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/split_ids_op.h" +#include "paddle/fluid/operators/distributed_ops/split_ids_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/split_ids_op.h b/paddle/fluid/operators/distributed_ops/split_ids_op.h similarity index 100% rename from paddle/fluid/operators/split_ids_op.h rename to paddle/fluid/operators/distributed_ops/split_ids_op.h diff --git a/paddle/fluid/operators/test_send_nccl_id.cc b/paddle/fluid/operators/distributed_ops/test_send_nccl_id.cc similarity index 96% rename from paddle/fluid/operators/test_send_nccl_id.cc rename to paddle/fluid/operators/distributed_ops/test_send_nccl_id.cc index b5426e17aac19dc07ee56545fac8472d9ef0d93c..a73cb08eca272b044501d48e7b8c5b7dc8553a50 100644 --- a/paddle/fluid/operators/test_send_nccl_id.cc +++ b/paddle/fluid/operators/distributed_ops/test_send_nccl_id.cc @@ -22,14 +22,14 @@ limitations under the License. */ #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/operators/detail/macros.h" #include "paddle/fluid/operators/distributed/request_handler_impl.h" -#include "paddle/fluid/operators/listen_and_serv_op.h" +#include "paddle/fluid/operators/distributed_ops/listen_and_serv_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/nccl_helper.h" #include "paddle/fluid/string/printf.h" #ifdef PADDLE_WITH_GRPC -#include "paddle/fluid/operators/send_recv_util.h" +#include "paddle/fluid/operators/distributed_ops/send_recv_util.h" #endif USE_NO_KERNEL_OP(listen_and_serv); diff --git a/paddle/fluid/operators/elementwise/CMakeLists.txt b/paddle/fluid/operators/elementwise/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d468316e8eacb73c4a4ce81c784880bb5e46c2d --- /dev/null +++ b/paddle/fluid/operators/elementwise/CMakeLists.txt @@ -0,0 +1,2 @@ +include(operators) +register_operators() diff --git a/paddle/fluid/operators/elementwise_add_mkldnn_op.cc b/paddle/fluid/operators/elementwise/elementwise_add_mkldnn_op.cc similarity index 97% rename from paddle/fluid/operators/elementwise_add_mkldnn_op.cc rename to paddle/fluid/operators/elementwise/elementwise_add_mkldnn_op.cc index 9ad82aec8182d6ba06b67391d71317a3d0df1833..6a6741d8fc54d22addca91b75dfabf5950c1a35a 100644 --- a/paddle/fluid/operators/elementwise_add_mkldnn_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_add_mkldnn_op.cc @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/memcpy.h" -#include "paddle/fluid/operators/elementwise_add_op.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_add_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/platform/mkldnn_helper.h" diff --git a/paddle/fluid/operators/elementwise_add_op.cc b/paddle/fluid/operators/elementwise/elementwise_add_op.cc similarity index 92% rename from paddle/fluid/operators/elementwise_add_op.cc rename to paddle/fluid/operators/elementwise/elementwise_add_op.cc index 3c97ac995c649ecd0d196a584240e1e7ac04f08e..7e789cd8d9143164c2346b067855eb904e00075f 100644 --- a/paddle/fluid/operators/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/elementwise_add_op.h" -#include "paddle/fluid/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_add_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" namespace ops = paddle::operators; REGISTER_ELEMWISE_GRAD_MAKER(elementwise_add, Add); REGISTER_ELEMWISE_EXPLICIT_OP(elementwise_add, "Add", "Out = X + Y", "Out", diff --git a/paddle/fluid/operators/elementwise_add_op.cu b/paddle/fluid/operators/elementwise/elementwise_add_op.cu similarity index 95% rename from paddle/fluid/operators/elementwise_add_op.cu rename to paddle/fluid/operators/elementwise/elementwise_add_op.cu index f9f5c66d34fa1d73db00173e493f9953b8579518..2fb7eeb4b9e3119a6eea3e69a2a6002a80f6c0f3 100644 --- a/paddle/fluid/operators/elementwise_add_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/elementwise_add_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_add_op.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_add_op.h b/paddle/fluid/operators/elementwise/elementwise_add_op.h similarity index 97% rename from paddle/fluid/operators/elementwise_add_op.h rename to paddle/fluid/operators/elementwise/elementwise_add_op.h index 9edbdbefe76600dc4bf937d95e70d11450206cd4..69f640ab6649df673f07ac0cef81bf80d16eb98d 100644 --- a/paddle/fluid/operators/elementwise_add_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/operators/elementwise_op.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/math/blas.h" namespace paddle { diff --git a/paddle/fluid/operators/elementwise_div_op.cc b/paddle/fluid/operators/elementwise/elementwise_div_op.cc similarity index 91% rename from paddle/fluid/operators/elementwise_div_op.cc rename to paddle/fluid/operators/elementwise/elementwise_div_op.cc index 84c8a65e5f859d276ae6d5f1a3f25c9d713a7a61..85612ba47448a7b0d712e9314e3980019c96e9c3 100644 --- a/paddle/fluid/operators/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_div_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/elementwise_div_op.h" -#include "paddle/fluid/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_div_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" namespace ops = paddle::operators; REGISTER_ELEMWISE_OP(elementwise_div, "Div", "Out = X / Y"); diff --git a/paddle/fluid/operators/elementwise_div_op.cu b/paddle/fluid/operators/elementwise/elementwise_div_op.cu similarity index 95% rename from paddle/fluid/operators/elementwise_div_op.cu rename to paddle/fluid/operators/elementwise/elementwise_div_op.cu index 588d1f7420241ba1697e5141e4e4a2870f2dc87c..c5a1a7e08d89f3ef205af4c37246f8fa288189f3 100644 --- a/paddle/fluid/operators/elementwise_div_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_div_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/elementwise_div_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_div_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_div_op.h b/paddle/fluid/operators/elementwise/elementwise_div_op.h similarity index 94% rename from paddle/fluid/operators/elementwise_div_op.h rename to paddle/fluid/operators/elementwise/elementwise_div_op.h index cdb1264d298ef48d6b3da39d63ff1d09e1561aa4..8a07339077aeaa4403ffd1e1e30e0d58a9cc30e7 100644 --- a/paddle/fluid/operators/elementwise_div_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_div_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/operators/elementwise_op.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_max_op.cc b/paddle/fluid/operators/elementwise/elementwise_max_op.cc similarity index 91% rename from paddle/fluid/operators/elementwise_max_op.cc rename to paddle/fluid/operators/elementwise/elementwise_max_op.cc index 411671335a19ae2283ca9db8b8f6bcbb6a6b630a..ea0dcd736e5700fb0f341938ac3e3e3b178f29c1 100644 --- a/paddle/fluid/operators/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_max_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/elementwise_max_op.h" -#include "paddle/fluid/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_max_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" namespace ops = paddle::operators; REGISTER_ELEMWISE_OP(elementwise_max, "Max", "Out = max(X, Y)"); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/fluid/operators/elementwise_max_op.cu b/paddle/fluid/operators/elementwise/elementwise_max_op.cu similarity index 95% rename from paddle/fluid/operators/elementwise_max_op.cu rename to paddle/fluid/operators/elementwise/elementwise_max_op.cu index 32c99835d66d8b11b72af162230aa383c7e4a57c..a90dcd3ecf0da114110db5946e111a8b3a925e42 100644 --- a/paddle/fluid/operators/elementwise_max_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_max_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/elementwise_max_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_max_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_max_op.h b/paddle/fluid/operators/elementwise/elementwise_max_op.h similarity index 94% rename from paddle/fluid/operators/elementwise_max_op.h rename to paddle/fluid/operators/elementwise/elementwise_max_op.h index 367489dd563f7d8bdf430517cadf49d4ef2a0105..3ee0c32e0d5d5df02d5d157416918fb4fb3aca92 100644 --- a/paddle/fluid/operators/elementwise_max_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_max_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/operators/elementwise_op.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_min_op.cc b/paddle/fluid/operators/elementwise/elementwise_min_op.cc similarity index 91% rename from paddle/fluid/operators/elementwise_min_op.cc rename to paddle/fluid/operators/elementwise/elementwise_min_op.cc index 816192083d2275b26e6dd9afc76f2c021a01cf73..b263b9addd40cfd329d2cc8588c278df2cb008e9 100644 --- a/paddle/fluid/operators/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_min_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/elementwise_min_op.h" -#include "paddle/fluid/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_min_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" namespace ops = paddle::operators; REGISTER_ELEMWISE_OP(elementwise_min, "Min", "Out = min(X, Y)"); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/fluid/operators/elementwise_min_op.cu b/paddle/fluid/operators/elementwise/elementwise_min_op.cu similarity index 95% rename from paddle/fluid/operators/elementwise_min_op.cu rename to paddle/fluid/operators/elementwise/elementwise_min_op.cu index a237c9c503ec998fd74fec50a1d7949279bb38f0..ab77709c28c15a925bd3deac07c43e12b12cb781 100644 --- a/paddle/fluid/operators/elementwise_min_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_min_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/elementwise_min_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_min_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_min_op.h b/paddle/fluid/operators/elementwise/elementwise_min_op.h similarity index 94% rename from paddle/fluid/operators/elementwise_min_op.h rename to paddle/fluid/operators/elementwise/elementwise_min_op.h index 1bd0a6279766c8eba92d1e3a76191c59410286b2..d04e372faaa4e6296e982afe6155cdde2fec4f81 100644 --- a/paddle/fluid/operators/elementwise_min_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_min_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/fluid/operators/elementwise_op.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc similarity index 95% rename from paddle/fluid/operators/elementwise_mul_op.cc rename to paddle/fluid/operators/elementwise/elementwise_mul_op.cc index 86a8459a79135d1fbcba6886172acc5a2abdb88b..d5e3300ac954aebf34a9c65fbca8de8fa2685932 100644 --- a/paddle/fluid/operators/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/elementwise_mul_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_mul_op.h" #include -#include "paddle/fluid/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_mul_op.cu b/paddle/fluid/operators/elementwise/elementwise_mul_op.cu similarity index 95% rename from paddle/fluid/operators/elementwise_mul_op.cu rename to paddle/fluid/operators/elementwise/elementwise_mul_op.cu index 2fb1b4bee689c059625e3dbd59f80c541ace83a0..4d16bc38e1d8e4cbbe3afbe08f233e14329e0f2e 100644 --- a/paddle/fluid/operators/elementwise_mul_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/elementwise_mul_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_mul_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_mul_op.h b/paddle/fluid/operators/elementwise/elementwise_mul_op.h similarity index 96% rename from paddle/fluid/operators/elementwise_mul_op.h rename to paddle/fluid/operators/elementwise/elementwise_mul_op.h index 29e4ab7db1377b6aa80e94a26ab3cb8669f9154a..dc25bc57103286ce183a4649964fd96c62169b7f 100644 --- a/paddle/fluid/operators/elementwise_mul_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/fluid/operators/elementwise_op.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/math/blas.h" namespace paddle { diff --git a/paddle/fluid/operators/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h similarity index 100% rename from paddle/fluid/operators/elementwise_op.h rename to paddle/fluid/operators/elementwise/elementwise_op.h diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise/elementwise_op_function.h similarity index 100% rename from paddle/fluid/operators/elementwise_op_function.h rename to paddle/fluid/operators/elementwise/elementwise_op_function.h diff --git a/paddle/fluid/operators/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc similarity index 90% rename from paddle/fluid/operators/elementwise_pow_op.cc rename to paddle/fluid/operators/elementwise/elementwise_pow_op.cc index 5fd6bde9ba0930e29f2161f1ff23ff9f5e7dc85d..6335e67a8a48c8702f0cb14ce947275d47e01d17 100644 --- a/paddle/fluid/operators/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/elementwise_pow_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_pow_op.h" #include -#include "paddle/fluid/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_pow_op.cu b/paddle/fluid/operators/elementwise/elementwise_pow_op.cu similarity index 92% rename from paddle/fluid/operators/elementwise_pow_op.cu rename to paddle/fluid/operators/elementwise/elementwise_pow_op.cu index 1f19ebd470973137b465381e498ab07a36323c14..6ee0779f23bc2c734aa1d439abb12f366227e686 100644 --- a/paddle/fluid/operators/elementwise_pow_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.cu @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/elementwise_pow_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_pow_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_pow_op.h b/paddle/fluid/operators/elementwise/elementwise_pow_op.h similarity index 95% rename from paddle/fluid/operators/elementwise_pow_op.h rename to paddle/fluid/operators/elementwise/elementwise_pow_op.h index 8c1c5f9f98018d8d4368a9333e2004141615775d..dc584b4c32fc3063da0c6de50577d28afcb63b83 100644 --- a/paddle/fluid/operators/elementwise_pow_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc similarity index 92% rename from paddle/fluid/operators/elementwise_sub_op.cc rename to paddle/fluid/operators/elementwise/elementwise_sub_op.cc index b7224261e6a7ca82dff92a25f5fe8818c08e676d..efc66374c812cbd07adef6ac25c9616b880ec383 100644 --- a/paddle/fluid/operators/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/elementwise_sub_op.h" -#include "paddle/fluid/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_sub_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" namespace ops = paddle::operators; REGISTER_ELEMWISE_GRAD_MAKER(elementwise_sub, Sub); REGISTER_ELEMWISE_EXPLICIT_OP(elementwise_sub, "Sub", "Out = X - Y", "Out", diff --git a/paddle/fluid/operators/elementwise_sub_op.cu b/paddle/fluid/operators/elementwise/elementwise_sub_op.cu similarity index 95% rename from paddle/fluid/operators/elementwise_sub_op.cu rename to paddle/fluid/operators/elementwise/elementwise_sub_op.cu index 8709f686f9af1bf4dacbc2dfc3e2d5dcc1c59b9a..8d9bf7c4d81d49d83b5d1cf0369be5c9957242b4 100644 --- a/paddle/fluid/operators/elementwise_sub_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/elementwise_sub_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_sub_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_sub_op.h b/paddle/fluid/operators/elementwise/elementwise_sub_op.h similarity index 94% rename from paddle/fluid/operators/elementwise_sub_op.h rename to paddle/fluid/operators/elementwise/elementwise_sub_op.h index 7204c43464e0b81126148b86f64a36b0e299368b..770323fe5a8fe7c1051b418b2541ab4c669635b4 100644 --- a/paddle/fluid/operators/elementwise_sub_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/fluid/operators/elementwise_op.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fused/CMakeLists.txt b/paddle/fluid/operators/fused/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d468316e8eacb73c4a4ce81c784880bb5e46c2d --- /dev/null +++ b/paddle/fluid/operators/fused/CMakeLists.txt @@ -0,0 +1,2 @@ +include(operators) +register_operators() diff --git a/paddle/fluid/operators/fused_elemwise_activation_op.cc b/paddle/fluid/operators/fused/fused_elemwise_activation_op.cc similarity index 99% rename from paddle/fluid/operators/fused_elemwise_activation_op.cc rename to paddle/fluid/operators/fused/fused_elemwise_activation_op.cc index d88ef15949da3809bffe41e4bf303d1fee568675..3771aac0dfd98a52dcd8b789e5a6114e977e22f8 100644 --- a/paddle/fluid/operators/fused_elemwise_activation_op.cc +++ b/paddle/fluid/operators/fused/fused_elemwise_activation_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/fused_elemwise_activation_op.h" +#include "paddle/fluid/operators/fused/fused_elemwise_activation_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fused_elemwise_activation_op.cu b/paddle/fluid/operators/fused/fused_elemwise_activation_op.cu similarity index 94% rename from paddle/fluid/operators/fused_elemwise_activation_op.cu rename to paddle/fluid/operators/fused/fused_elemwise_activation_op.cu index e1d2b16b4b5e3a480777f834c2cbeb6d00a755e4..e10693bae1859307c9cf266965d4ce20e6de1bf9 100644 --- a/paddle/fluid/operators/fused_elemwise_activation_op.cu +++ b/paddle/fluid/operators/fused/fused_elemwise_activation_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/fused_elemwise_activation_op.h" +#include "paddle/fluid/operators/fused/fused_elemwise_activation_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/fused_elemwise_activation_op.h b/paddle/fluid/operators/fused/fused_elemwise_activation_op.h similarity index 99% rename from paddle/fluid/operators/fused_elemwise_activation_op.h rename to paddle/fluid/operators/fused/fused_elemwise_activation_op.h index 5ae9aea959c268985c17643f2f47199c852c2bcb..01dc2dbfd61cc88f72174233382aa49f61c9b60f 100644 --- a/paddle/fluid/operators/fused_elemwise_activation_op.h +++ b/paddle/fluid/operators/fused/fused_elemwise_activation_op.h @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/math/compound_functors.h" #include "paddle/fluid/operators/math/functors.h" diff --git a/paddle/fluid/operators/fused_embedding_fc_lstm_op.cc b/paddle/fluid/operators/fused/fused_embedding_fc_lstm_op.cc similarity index 99% rename from paddle/fluid/operators/fused_embedding_fc_lstm_op.cc rename to paddle/fluid/operators/fused/fused_embedding_fc_lstm_op.cc index fdc9cb4888b3468b85abfa0c693ed8ac5b0d450b..6d463538d232e1a38f845e7abc3786568ca3bb21 100644 --- a/paddle/fluid/operators/fused_embedding_fc_lstm_op.cc +++ b/paddle/fluid/operators/fused/fused_embedding_fc_lstm_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/fused_embedding_fc_lstm_op.h" +#include "paddle/fluid/operators/fused/fused_embedding_fc_lstm_op.h" #include #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/cpu_vec.h" diff --git a/paddle/fluid/operators/fused_embedding_fc_lstm_op.h b/paddle/fluid/operators/fused/fused_embedding_fc_lstm_op.h similarity index 100% rename from paddle/fluid/operators/fused_embedding_fc_lstm_op.h rename to paddle/fluid/operators/fused/fused_embedding_fc_lstm_op.h diff --git a/paddle/fluid/operators/fusion_gru_op.cc b/paddle/fluid/operators/fused/fusion_gru_op.cc similarity index 99% rename from paddle/fluid/operators/fusion_gru_op.cc rename to paddle/fluid/operators/fused/fusion_gru_op.cc index 120b2ab440156f6020fd6005dd64a48e9a6918ec..7e34d1019c9e6577b50ff8c2fa3d767124b5ff3b 100644 --- a/paddle/fluid/operators/fusion_gru_op.cc +++ b/paddle/fluid/operators/fused/fusion_gru_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/fusion_gru_op.h" +#include "paddle/fluid/operators/fused/fusion_gru_op.h" #include // for memcpy #include #include "paddle/fluid/operators/math/blas.h" diff --git a/paddle/fluid/operators/fusion_gru_op.h b/paddle/fluid/operators/fused/fusion_gru_op.h similarity index 100% rename from paddle/fluid/operators/fusion_gru_op.h rename to paddle/fluid/operators/fused/fusion_gru_op.h diff --git a/paddle/fluid/operators/fusion_lstm_op.cc b/paddle/fluid/operators/fused/fusion_lstm_op.cc similarity index 99% rename from paddle/fluid/operators/fusion_lstm_op.cc rename to paddle/fluid/operators/fused/fusion_lstm_op.cc index 067e6a3e7cccc1f15ebdd984f3a2441339a989ab..0959539068eef5b550a8e3997d3f11ea67ae0707 100644 --- a/paddle/fluid/operators/fusion_lstm_op.cc +++ b/paddle/fluid/operators/fused/fusion_lstm_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/fusion_lstm_op.h" +#include "paddle/fluid/operators/fused/fusion_lstm_op.h" #include #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/fc_compute.h" diff --git a/paddle/fluid/operators/fusion_lstm_op.h b/paddle/fluid/operators/fused/fusion_lstm_op.h similarity index 100% rename from paddle/fluid/operators/fusion_lstm_op.h rename to paddle/fluid/operators/fused/fusion_lstm_op.h diff --git a/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc b/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc similarity index 99% rename from paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc rename to paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc index b0910dc19edb246d9acfe3bdb15071c64cbdaba7..40bba09f3ef71021b7daff83b9d63005f7580395 100644 --- a/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.h" +#include "paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.h" #include // for min, max #include #include "paddle/fluid/operators/math/blas.h" diff --git a/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.h b/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.h similarity index 100% rename from paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.h rename to paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.h diff --git a/paddle/fluid/operators/fusion_seqexpand_concat_fc_op.cc b/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc similarity index 99% rename from paddle/fluid/operators/fusion_seqexpand_concat_fc_op.cc rename to paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc index 8d2f055d53a0c5bbef624ff3b01b01724d0b3a21..288b56fc2485138b20c5b53af3e950f1c1886ba5 100644 --- a/paddle/fluid/operators/fusion_seqexpand_concat_fc_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/fusion_seqexpand_concat_fc_op.h" +#include "paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.h" #include #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/cpu_vec.h" diff --git a/paddle/fluid/operators/fusion_seqexpand_concat_fc_op.h b/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.h similarity index 100% rename from paddle/fluid/operators/fusion_seqexpand_concat_fc_op.h rename to paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.h diff --git a/paddle/fluid/operators/layer_norm_op.h b/paddle/fluid/operators/layer_norm_op.h index 2e54bb497dec11eaeda03a1aa6acfd4cc261dbfe..78d20ddf5fd63b81fd5e7fba656d825897a67a11 100644 --- a/paddle/fluid/operators/layer_norm_op.h +++ b/paddle/fluid/operators/layer_norm_op.h @@ -15,8 +15,12 @@ limitations under the License. */ #pragma once #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/math/blas.h" +#if !defined(PADDLE_WITH_CUDA) && !defined(_WIN32) && !defined(__APPLE__) && \ + !defined(__OSX__) +#include "paddle/fluid/operators/math/jit_kernel.h" +#endif #include "paddle/fluid/operators/math/math_function.h" namespace paddle { @@ -191,6 +195,8 @@ class LayerNormKernel : public framework::OpKernel { out.ShareDataWith(*y); out.Resize(matrix_shape); +#if defined(PADDLE_WITH_CUDA) || defined(_WIN32) || defined(__APPLE__) || \ + defined(__OSX__) auto& dev_ctx = ctx.template device_context(); RowwiseMean2D row_mean(left, right, ctx.device_context()); @@ -217,6 +223,19 @@ class LayerNormKernel : public framework::OpKernel { ElementwiseComputeEx, DeviceContext, T>( ctx, &out, bias, /*axis*/ 1, AddFunctor(), &out); } +#else + PADDLE_ENFORCE_EQ(mean->numel(), left); + PADDLE_ENFORCE_EQ(var->numel(), left); + PADDLE_ENFORCE_EQ(scale->numel(), right); + PADDLE_ENFORCE_EQ(bias->numel(), right); + + const auto& ker = math::jitkernel::KernelPool::Instance() + .template Get>( + static_cast(right)); + ker->Compute(x.data(), out.data(), mean->data(), var->data(), + scale->data(), bias->data(), static_cast(left), + static_cast(epsilon)); +#endif } }; diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index 4cd014cbadb888a2afe118785336e673e2b5eafb..83ee9f6c51c64c6b000b20d73d41036b8590da5c 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -72,12 +72,12 @@ cc_test(sequence_padding_test SRCS sequence_padding_test.cc DEPS sequence_paddin cc_test(sequence_pooling_test SRCS sequence_pooling_test.cc DEPS sequence_pooling) if(WITH_GPU) nv_test(math_function_gpu_test SRCS math_function_test.cu DEPS math_function) - nv_test(selected_rows_functor_gpu_test SRCS selected_rows_functor_test.cu DEPS selected_rows_functor math_function) + nv_test(selected_rows_functor_gpu_test SRCS selected_rows_functor_test.cu.cc DEPS selected_rows_functor math_function) endif() cc_test(concat_test SRCS concat_test.cc DEPS concat_and_split) cc_test(cpu_vec_test SRCS cpu_vec_test.cc DEPS blas cpu_info) if (NOT WIN32) - set(JIT_KERNEL_SRCS jit_kernel.cc jit_kernel_blas.cc jit_kernel_exp.cc jit_kernel_rnn.cc jit_kernel_crf_decode.cc) + set(JIT_KERNEL_SRCS jit_kernel.cc jit_kernel_blas.cc jit_kernel_exp.cc jit_kernel_rnn.cc jit_kernel_crf_decode.cc jit_kernel_layer_norm.cc) set(JIT_KERNEL_DEPS cpu_info cblas gflags enforce) if(WITH_XBYAK) list(APPEND JIT_KERNEL_SRCS jit_gen.cc jit_code.cc) diff --git a/paddle/fluid/operators/math/cpu_vec.h b/paddle/fluid/operators/math/cpu_vec.h index 0aed253c80fc28560716cbcfa70f74ef9c84f9b6..7d81aee596934308763002d440f52400f45b5f20 100644 --- a/paddle/fluid/operators/math/cpu_vec.h +++ b/paddle/fluid/operators/math/cpu_vec.h @@ -33,11 +33,11 @@ namespace math { #define SIGMOID_THRESHOLD_MIN -40.0 #define SIGMOID_THRESHOLD_MAX 13.0 -#define AVX_FLOAT_BLOCK 8 +#define YMM_FLOAT_BLOCK 8 #define AVX_DOUBLE_BLOCK 4 -#define AVX2_FLOAT_BLOCK 8 +#define YMM_FLOAT_BLOCK 8 #define AVX2_DOUBLE_BLOCK 4 -#define AVX512_FLOAT_BLOCK 16 +#define ZMM_FLOAT_BLOCK 16 #define AVX512_DOUBLE_BLOCK 8 template @@ -88,7 +88,7 @@ template <> inline void vec_scal(const int n, const float a, const float* x, float* y) { #ifdef __AVX__ - constexpr int block = AVX_FLOAT_BLOCK; + constexpr int block = YMM_FLOAT_BLOCK; if (n < block) { vec_scal(n, a, x, y); return; @@ -142,7 +142,7 @@ template <> inline void vec_bias_sub(const int n, const float a, const float* x, float* y) { #ifdef __AVX__ - constexpr int block = AVX_FLOAT_BLOCK; + constexpr int block = YMM_FLOAT_BLOCK; if (n < block) { vec_bias_sub(n, a, x, y); return; @@ -200,7 +200,7 @@ inline void vec_cross(const int n, const float* x, const float* y, const float* z, float* out) { #ifdef __AVX__ - constexpr int block = AVX_FLOAT_BLOCK; + constexpr int block = YMM_FLOAT_BLOCK; if (n < block) { vec_cross(n, x, y, z, out); return; @@ -257,7 +257,7 @@ template <> inline void vec_add_bias(const int n, const float a, const float* x, float* y) { #ifdef __AVX__ - constexpr int block = AVX_FLOAT_BLOCK; + constexpr int block = YMM_FLOAT_BLOCK; if (n < block) { vec_add_bias(n, a, x, y); return; @@ -326,7 +326,7 @@ template <> inline void vec_sigmoid(const int n, const float* x, float* y) { #ifdef __AVX__ - constexpr int block = AVX_FLOAT_BLOCK; + constexpr int block = YMM_FLOAT_BLOCK; if (n < block) { vec_sigmoid(n, x, y); return; @@ -415,7 +415,7 @@ template <> inline void vec_relu(const int n, const float* x, float* y) { #ifdef __AVX__ - constexpr int block = AVX_FLOAT_BLOCK; + constexpr int block = YMM_FLOAT_BLOCK; if (n < block * 4) { vec_relu(n, x, y); return; diff --git a/paddle/fluid/operators/math/jit_code.cc b/paddle/fluid/operators/math/jit_code.cc index e46f60f764ab9f1c292db339a5b38b976de5a11a..e484e9a3c705c5638fa94010a4513ae1566a8be3 100644 --- a/paddle/fluid/operators/math/jit_code.cc +++ b/paddle/fluid/operators/math/jit_code.cc @@ -13,8 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/jit_code.h" -#include "paddle/fluid/operators/math/jit_kernel.h" -#include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/operators/math/jit_kernel.h" // TODO(TJ): remove me namespace paddle { namespace operators { @@ -41,7 +40,7 @@ void VXXJitCode::generate() { } else if (scalar_index_ == 2) { vbroadcastss(ymm_src2, ptr[param2]); } - for (int i = 0; i < num_ / AVX_FLOAT_BLOCK; ++i) { + for (int i = 0; i < num_ / YMM_FLOAT_BLOCK; ++i) { if (scalar_index_ != 1) { vmovups(ymm_src1, ptr[param1 + offset]); } @@ -57,100 +56,160 @@ void VXXJitCode::generate() { vmaxps(ymm_dst, ymm_zero, ymm_dst); } vmovups(ptr[param3 + offset], ymm_dst); - offset += sizeof(float) * AVX_FLOAT_BLOCK; + offset += sizeof(float) * YMM_FLOAT_BLOCK; } - int rest = num_ % AVX_FLOAT_BLOCK; - if (rest >= 4) { - if (scalar_index_ != 1) { - vmovups(xmm_src1, ptr[param1 + offset]); - } - if (scalar_index_ != 2) { - vmovups(xmm_src2, ptr[param2 + offset]); - } - if (type_ == operand_type::mul) { - vmulps(xmm_dst, xmm_src1, xmm_src2); - } else if (type_ == operand_type::add) { - vaddps(xmm_dst, xmm_src1, xmm_src2); - } - if (with_relu_) { - vmaxps(xmm_dst, xmm_zero, xmm_dst); - } - vmovups(ptr[param3 + offset], xmm_dst); - offset += sizeof(float) * 4; - rest -= 4; - } - if (rest >= 2) { - if (scalar_index_ != 1) { - vmovups(xmm_src1, ptr[param1 + offset]); - } - if (scalar_index_ != 2) { - vmovups(xmm_src2, ptr[param2 + offset]); - } - if (type_ == operand_type::mul) { - vmulps(xmm_dst, xmm_src1, xmm_src2); - } else if (type_ == operand_type::add) { - vaddps(xmm_dst, xmm_src1, xmm_src2); + int rest = num_ % YMM_FLOAT_BLOCK; + while (rest > 0) { + int block = XMM_FLOAT_BLOCK; + if (rest >= 4) { + block = 4; + if (scalar_index_ != 1) { + vmovups(xmm_src1, ptr[param1 + offset]); + } + if (scalar_index_ != 2) { + vmovups(xmm_src2, ptr[param2 + offset]); + } + } else if (rest >= 2) { + block = 2; + if (scalar_index_ != 1) { + vmovq(xmm_src1, ptr[param1 + offset]); + } + if (scalar_index_ != 2) { + vmovq(xmm_src2, ptr[param2 + offset]); + } + } else { + block = 1; + if (scalar_index_ != 1) { + vmovss(xmm_src1, ptr[param1 + offset]); + } + if (scalar_index_ != 2) { + vmovss(xmm_src2, ptr[param2 + offset]); + } + } + switch (type_) { + case operand_type::mul: + vmulps(xmm_dst, xmm_src1, xmm_src2); + break; + case operand_type::add: + vaddps(xmm_dst, xmm_src1, xmm_src2); + break; + default: + break; } if (with_relu_) { vmaxps(xmm_dst, xmm_zero, xmm_dst); } - vmovq(ptr[param3 + offset], xmm_dst); - offset += sizeof(float) * 2; - rest -= 2; - } - if (rest > 0) { - if (scalar_index_ != 1) { - vmovups(xmm_src1, ptr[param1 + offset]); - } - if (scalar_index_ != 2) { - vmovups(xmm_src2, ptr[param2 + offset]); - } - if (type_ == operand_type::mul) { - vmulss(xmm_dst, xmm_src1, xmm_src2); - } else if (type_ == operand_type::add) { - vaddss(xmm_dst, xmm_src1, xmm_src2); - } - if (with_relu_) { - vmaxps(xmm_dst, xmm_zero, xmm_dst); + if (rest >= 4) { + vmovups(ptr[param3 + offset], xmm_dst); + } else if (rest >= 2) { + vmovq(ptr[param3 + offset], xmm_dst); + } else { + vmovss(ptr[param3 + offset], xmm_dst); } - vmovss(ptr[param3 + offset], xmm_dst); + offset += sizeof(float) * block; + rest -= block; } ret(); } -bool ReluJitCode::init(int d) { return MayIUse(avx); } +const float exp_float_consts[] ALIGN32 = {REPEAT_8TIMES(1.f), + REPEAT_8TIMES(2.f), + REPEAT_8TIMES(0.5f), + REPEAT_8TIMES(EXP_HIG), + REPEAT_8TIMES(EXP_LOW), + REPEAT_8TIMES(CEPHES_LOG2EF), + REPEAT_8TIMES(CEPHES_EXP_C1), + REPEAT_8TIMES(CEPHES_EXP_C2), + REPEAT_8TIMES(CEPHES_EXP_P0), + REPEAT_8TIMES(CEPHES_EXP_P1), + REPEAT_8TIMES(CEPHES_EXP_P2), + REPEAT_8TIMES(CEPHES_EXP_P3), + REPEAT_8TIMES(CEPHES_EXP_P4), + REPEAT_8TIMES(CEPHES_EXP_P5), + REPEAT_8TIMES(EXP_MAX_INPUT), + REPEAT_8TIMES(SIGMOID_THRESHOLD_MAX), + REPEAT_8TIMES(SIGMOID_THRESHOLD_MIN)}; -void ReluJitCode::generate() { +const int exp_int_0x7f[] ALIGN32 = {REPEAT_8TIMES(0x7f)}; +int g_tmp_mem[16] ALIGN32 = {0}; + +bool VActJitCode::init(int d, operand_type type) { + // TODO(TJ): implement avx512, avx_exp is slower than mkl when d >= 256 + return MayIUse(avx); +} + +void VActJitCode::generate() { + xmm_t xmm_zero = xmm_t(2); + ymm_t ymm_zero = ymm_t(2); + if (type_ == operand_type::relu) { + vxorps(ymm_zero, ymm_zero, ymm_zero); + } int offset = 0; - vxorps(ymm_zero, ymm_zero, ymm_zero); - for (int i = 0; i < num_ / AVX_FLOAT_BLOCK; ++i) { + for (int i = 0; i < num_ / YMM_FLOAT_BLOCK; ++i) { vmovups(ymm_src, ptr[param1 + offset]); - vmaxps(ymm_dst, ymm_zero, ymm_src); + switch (type_) { + case operand_type::relu: + relu_jmm(ymm_dst, ymm_src, ymm_zero); + break; + case operand_type::exp: + exp_jmm(ymm_dst, ymm_src, 2, 3, 4, 5); + break; + case operand_type::sigmoid: + sigmoid_jmm(ymm_dst, ymm_src, 2, 3, 4, 5); + break; + case operand_type::tanh: + tanh_jmm(ymm_dst, ymm_src, 2, 3, 4, 5); + break; + case operand_type::identity: + break; + default: + break; + } vmovups(ptr[param2 + offset], ymm_dst); - offset += sizeof(float) * AVX_FLOAT_BLOCK; - } - int rest = num_ % AVX_FLOAT_BLOCK; - if (rest >= 4) { - vmovups(xmm_src, ptr[param1 + offset]); - vmaxps(xmm_dst, xmm_zero, xmm_src); - vmovups(ptr[param2 + offset], xmm_dst); - offset += sizeof(float) * 4; - rest -= 4; + offset += sizeof(float) * YMM_FLOAT_BLOCK; } - if (rest >= 2) { - vmovups(xmm_src, ptr[param1 + offset]); - vmaxps(xmm_dst, xmm_zero, xmm_src); - vmovq(ptr[param2 + offset], xmm_dst); - offset += sizeof(float) * 2; - rest -= 2; - } - if (rest > 0) { - vmovups(xmm_src, ptr[param1 + offset]); - vmaxps(xmm_dst, xmm_zero, xmm_src); - vmovss(ptr[param2 + offset], xmm_dst); + int rest = num_ % YMM_FLOAT_BLOCK; + while (rest > 0) { + int block = XMM_FLOAT_BLOCK; + if (rest >= 4) { + block = 4; + vmovups(xmm_src, ptr[param1 + offset]); + } else if (rest >= 2) { + block = 2; + vmovq(xmm_src, ptr[param1 + offset]); + } else { + block = 1; + vmovss(xmm_src, ptr[param1 + offset]); + } + switch (type_) { + case operand_type::relu: + relu_jmm(xmm_dst, xmm_src, xmm_zero); + break; + case operand_type::exp: + exp_jmm(xmm_dst, xmm_src, 2, 3, 4, 5); + break; + case operand_type::sigmoid: + sigmoid_jmm(xmm_dst, xmm_src, 2, 3, 4, 5); + break; + case operand_type::tanh: + tanh_jmm(xmm_dst, xmm_src, 2, 3, 4, 5); + break; + default: + break; + } + if (rest >= 4) { + vmovups(ptr[param2 + offset], xmm_dst); + } else if (rest >= 2) { + vmovq(ptr[param2 + offset], xmm_dst); + } else { + vmovss(ptr[param2 + offset], xmm_dst); + } + offset += sizeof(float) * block; + rest -= block; } ret(); } + } // namespace gen } // namespace jitkernel } // namespace math diff --git a/paddle/fluid/operators/math/jit_code.h b/paddle/fluid/operators/math/jit_code.h index 3c242870a24c5bb29d34d4b99406c5df8cec6763..65f83ff4846601d1575daa994772cd869d526f56 100644 --- a/paddle/fluid/operators/math/jit_code.h +++ b/paddle/fluid/operators/math/jit_code.h @@ -16,6 +16,8 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/jit_gen.h" +#include "paddle/fluid/platform/cpu_info.h" + namespace paddle { namespace operators { namespace math { @@ -29,7 +31,61 @@ using ymm_t = const Xbyak::Ymm; using zmm_t = const Xbyak::Zmm; using Label = Xbyak::Label; -typedef enum { mul = 0, add } operand_type; +typedef enum { + mul = 0, + add, + sub, + relu, + exp, + sigmoid, + tanh, + identity +} operand_type; + +extern const float exp_float_consts[]; +extern const int exp_int_0x7f[]; +extern int g_tmp_mem[]; + +// TODO(TJ): move these to some proper place +#define SIGMOID_THRESHOLD_MIN -40.0 +#define SIGMOID_THRESHOLD_MAX 13.0 +#define EXP_MAX_INPUT 40.0 +#define XMM_FLOAT_BLOCK 4 +#define YMM_FLOAT_BLOCK 8 +#define ZMM_FLOAT_BLOCK 16 + +#define ALIGN32 __attribute__((aligned(32))) +#define EXP_HIG 88.3762626647949f +#define EXP_LOW -88.3762626647949f +#define CEPHES_LOG2EF 1.44269504088896341 +#define CEPHES_EXP_C1 0.693359375 +#define CEPHES_EXP_C2 -2.12194440e-4 +#define CEPHES_EXP_P0 1.9875691500E-4 +#define CEPHES_EXP_P1 1.3981999507E-3 +#define CEPHES_EXP_P2 8.3334519073E-3 +#define CEPHES_EXP_P3 4.1665795894E-2 +#define CEPHES_EXP_P4 1.6666665459E-1 +#define CEPHES_EXP_P5 5.0000001201E-1 + +#define REPEAT_8TIMES(val) val, val, val, val, val, val, val, val + +#define OFFSET_EXP_ONE 0 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_TWO 1 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_0P5 2 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_HIG 3 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_LOW 4 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_LOG2EF 5 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_C1 6 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_C2 7 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_P0 8 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_P1 9 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_P2 10 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_P3 11 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_P4 12 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_P5 13 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_EXP_MAX_INPUT 14 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_SIGMOID_MAX 15 * YMM_FLOAT_BLOCK * sizeof(float) +#define OFFSET_SIGMOID_MIN 16 * YMM_FLOAT_BLOCK * sizeof(float) // function: vec = Operand(vec(or scalar), vec(or scalar)) (maybe with relu) class VXXJitCode : public JitCode { @@ -85,26 +141,184 @@ class VXXJitCode : public JitCode { ymm_t ymm_zero = ymm_t(3); }; -class ReluJitCode : public JitCode { +class VActJitCode : public JitCode { public: - DECLARE_JIT_CODE(ReluJitCode); - explicit ReluJitCode(int d, size_t code_size = 256 * 1024, + const char* name() const override { + std::string base = "VActJitCode"; + switch (type_) { + case operand_type::relu: + base += "_Relu"; + break; + case operand_type::exp: + base += "_Exp"; + break; + case operand_type::sigmoid: + base += "_Sigmoid"; + break; + case operand_type::tanh: + base += "_Tanh"; + break; + case operand_type::identity: + base += "_Identity"; + break; + default: + break; + } + return base.c_str(); + } + + explicit VActJitCode(int d, operand_type type, size_t code_size = 256 * 1024, void* code_ptr = nullptr) - : JitCode(code_size, code_ptr), num_(d) {} - static bool init(int d); + : JitCode(code_size, code_ptr), num_(d), type_(type) {} + static bool init(int d, operand_type type); void generate() override; - private: + protected: + // compute relu with ymm, xmm + template + void relu_jmm(JMM& dst, JMM& src, JMM& zero) { // NOLINT + vmaxps(dst, src, zero); + } + + // compute exp with ymm, xmm + template + void exp_jmm(JMM& dst, JMM& src, int fx_idx = 2, int fy_idx = 3, // NOLINT + int mask_idx = 4, int tmp_idx = 5) { + using namespace platform::jit; // NOLINT + assert(src.getIdx() != dst.getIdx()); // TODO(TJ): use enfore + // check all idx can not equal + JMM jmm_fx = JMM(fx_idx); + JMM jmm_fy = JMM(fy_idx); + JMM jmm_mask = JMM(mask_idx); + JMM jmm_tmp = JMM(tmp_idx); + reg64_t reg_ptr_global = rax; + push(reg_ptr_global); + mov(reg_ptr_global, reinterpret_cast(exp_float_consts)); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_HIG]); + vminps(src, src, jmm_tmp); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_LOW]); + vmaxps(src, src, jmm_tmp); + // express exp(x) as exp(g + n*log(2)) + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_LOG2EF]); + vmulps(jmm_fx, src, jmm_tmp); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_0P5]); + vaddps(jmm_fx, jmm_fx, jmm_tmp); + vroundps(jmm_fy, jmm_fx, 0x01); + // if greater, substract 1 + vcmpgtps(jmm_mask, jmm_fy, jmm_fx); + vmovaps(jmm_tmp, ptr[reg_ptr_global]); + vandps(jmm_mask, jmm_mask, jmm_tmp); + vsubps(jmm_fx, jmm_fy, jmm_mask); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_C1]); + vmulps(jmm_fy, jmm_fx, jmm_tmp); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_C2]); + JMM ymm_z = JMM(jmm_mask.getIdx()); + vmulps(ymm_z, jmm_fx, jmm_tmp); + vsubps(src, src, jmm_fy); + vsubps(src, src, ymm_z); + vmulps(ymm_z, src, src); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_P0]); + vmulps(dst, src, jmm_tmp); + for (size_t i = OFFSET_EXP_P1; i < OFFSET_EXP_P5; + i += (YMM_FLOAT_BLOCK * sizeof(float))) { + vmovaps(jmm_tmp, ptr[reg_ptr_global + i]); // P1~P4 + vaddps(dst, dst, jmm_tmp); + vmulps(dst, dst, src); + } + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_P5]); + vaddps(dst, dst, jmm_tmp); + vmulps(dst, dst, ymm_z); + vaddps(dst, dst, src); + vmovaps(jmm_tmp, ptr[reg_ptr_global]); + vaddps(dst, dst, jmm_tmp); + // build 2^n + JMM ymm_int = jmm_fx; + vcvttps2dq(ymm_int, jmm_fx); + mov(reg_ptr_global, reinterpret_cast(exp_int_0x7f)); + vmovdqa(jmm_tmp, ptr[reg_ptr_global]); + if (MayIUse(avx2) || std::is_same::value) { + vpaddd(ymm_int, ymm_int, jmm_tmp); + vpslld(ymm_int, ymm_int, 23); + } else if (MayIUse(avx)) { + xmm_t xtmp1 = xmm_t(ymm_int.getIdx()); + xmm_t xtmp2 = xmm_t(jmm_tmp.getIdx()); + reg64_t reg_ptr_tmp = reg_ptr_global; + mov(reg_ptr_tmp, reinterpret_cast(g_tmp_mem)); + vmovdqa(ptr[reg_ptr_tmp], ymm_int); + vmovdqa(ptr[reg_ptr_tmp + YMM_FLOAT_BLOCK * sizeof(float)], jmm_tmp); + vpaddd(xtmp1, xtmp1, xtmp2); + vpslld(xtmp1, xtmp1, 23); + vmovdqa(ptr[reg_ptr_tmp], xtmp1); + // next 128bits + vmovdqa(xtmp1, ptr[reg_ptr_tmp + XMM_FLOAT_BLOCK * sizeof(float)]); + vmovdqa(xtmp2, ptr[reg_ptr_tmp + + (YMM_FLOAT_BLOCK + XMM_FLOAT_BLOCK) * sizeof(float)]); + vpaddd(xtmp1, xtmp1, xtmp2); + vpslld(xtmp1, xtmp1, 23); + vmovdqa(ptr[reg_ptr_tmp + XMM_FLOAT_BLOCK * sizeof(float)], xtmp1); + // load out + vmovdqa(ymm_int, ptr[reg_ptr_tmp]); + } + vmulps(dst, dst, ymm_int); + pop(reg_ptr_global); + } + + // compute sigmoid with ymm, xmm + template + void sigmoid_jmm(JMM& dst, JMM& src, int fx_idx = 2, // NOLINT + int fy_idx = 3, int mask_idx = 4, int tmp_idx = 5) { + // y = 1 / (1 + e^-x) + JMM jmm_tmp = JMM(tmp_idx); + reg64_t reg_ptr_global = rax; + push(reg_ptr_global); + mov(reg_ptr_global, reinterpret_cast(exp_float_consts)); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_SIGMOID_MAX]); + vminps(src, src, jmm_tmp); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_SIGMOID_MIN]); + vmaxps(src, src, jmm_tmp); + vxorps(jmm_tmp, jmm_tmp, jmm_tmp); + vsubps(src, jmm_tmp, src); + exp_jmm(dst, src, fx_idx, fy_idx, mask_idx, tmp_idx); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_ONE]); + vaddps(dst, dst, jmm_tmp); + vdivps(dst, jmm_tmp, dst); + pop(reg_ptr_global); + } + + // compute tanh with ymm, xmm + template + void tanh_jmm(JMM& dst, JMM& src, int fx_idx = 2, int fy_idx = 3, // NOLINT + int mask_idx = 4, int tmp_idx = 5) { + // y = 2 / (1 + e^(-2x)) - 1 + JMM jmm_tmp = JMM(tmp_idx); + JMM jmm_zero = JMM(mask_idx); + reg64_t reg_ptr_global = rax; + push(reg_ptr_global); + mov(reg_ptr_global, reinterpret_cast(exp_float_consts)); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_TWO]); + vxorps(jmm_zero, jmm_zero, jmm_zero); + vsubps(jmm_tmp, jmm_zero, jmm_tmp); + vmulps(src, src, jmm_tmp); + exp_jmm(dst, src, fx_idx, fy_idx, mask_idx, tmp_idx); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_ONE]); + vaddps(dst, dst, jmm_tmp); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_TWO]); + vdivps(dst, jmm_tmp, dst); + vmovaps(jmm_tmp, ptr[reg_ptr_global + OFFSET_EXP_ONE]); + vsubps(dst, dst, jmm_tmp); + pop(reg_ptr_global); + } + + protected: int num_; + operand_type type_; reg64_t param1{abi_param1}; reg64_t param2{abi_param2}; - xmm_t xmm_zero = xmm_t(0); - xmm_t xmm_src = xmm_t(1); - xmm_t xmm_dst = xmm_t(1); + xmm_t xmm_src = xmm_t(0); + ymm_t ymm_src = ymm_t(0); - ymm_t ymm_zero = ymm_t(0); - ymm_t ymm_src = ymm_t(1); + xmm_t xmm_dst = xmm_t(1); ymm_t ymm_dst = ymm_t(1); }; diff --git a/paddle/fluid/operators/math/jit_kernel.h b/paddle/fluid/operators/math/jit_kernel.h index cd3a45e66773c89e45e80ab77ebd925abd6cbe53..7e163c1349e73d8fe5e436b98c9a8f67e6439506 100644 --- a/paddle/fluid/operators/math/jit_kernel.h +++ b/paddle/fluid/operators/math/jit_kernel.h @@ -26,12 +26,13 @@ namespace operators { namespace math { namespace jitkernel { +// TODO(TJ): move these to some proper place #define SIGMOID_THRESHOLD_MIN -40.0 #define SIGMOID_THRESHOLD_MAX 13.0 #define EXP_MAX_INPUT 40.0 -#define AVX_FLOAT_BLOCK 8 -#define AVX2_FLOAT_BLOCK 8 -#define AVX512_FLOAT_BLOCK 16 +#define XMM_FLOAT_BLOCK 4 +#define YMM_FLOAT_BLOCK 8 +#define ZMM_FLOAT_BLOCK 16 typedef enum { kLT8, kEQ8, kGT8LT16, kEQ16, kGT16 } jit_block; @@ -97,39 +98,23 @@ class VAddBiasKernel : public Kernel { template class VActKernel : public Kernel { public: - virtual void ComputeDeprecated(const T *x, T *y) const = 0; + void (*Compute)(const T *, T *, int); }; template -class VReluKernel : public VActKernel { - public: - virtual void ComputeDeprecated(const T *x, T *y) const = 0; - void (*Compute)(const T *, T *, int); -}; +class VReluKernel : public VActKernel {}; template -class VIdentityKernel : public VActKernel { - public: - virtual void ComputeDeprecated(const T *x, T *y) const = 0; -}; +class VIdentityKernel : public VActKernel {}; template -class VExpKernel : public VActKernel { - public: - virtual void ComputeDeprecated(const T *x, T *y) const = 0; -}; +class VExpKernel : public VActKernel {}; template -class VSigmoidKernel : public VActKernel { - public: - virtual void ComputeDeprecated(const T *x, T *y) const = 0; -}; +class VSigmoidKernel : public VActKernel {}; template -class VTanhKernel : public VActKernel { - public: - virtual void ComputeDeprecated(const T *x, T *y) const = 0; -}; +class VTanhKernel : public VActKernel {}; template class LSTMKernel : public Kernel { @@ -161,6 +146,14 @@ class CRFDecodeKernel : public Kernel { int *track) const = 0; }; +template +class LayerNormKernel : public Kernel { + public: + virtual void Compute(T *x, T *out, T *mean, T *var, const T *scale, + const T *bias, int height, + const float epsilon) const = 0; +}; + } // namespace jitkernel } // namespace math } // namespace operators diff --git a/paddle/fluid/operators/math/jit_kernel_blas.cc b/paddle/fluid/operators/math/jit_kernel_blas.cc index cf46a210afbd4903dc3841f27765c390f721c763..36a50f20434f313e93bfa3dd2c9d46963024caf7 100644 --- a/paddle/fluid/operators/math/jit_kernel_blas.cc +++ b/paddle/fluid/operators/math/jit_kernel_blas.cc @@ -25,10 +25,6 @@ limitations under the License. */ #include "paddle/fluid/platform/dynload/mklml.h" #endif -#ifdef __AVX__ -#include -#endif - namespace paddle { namespace operators { namespace math { @@ -128,23 +124,16 @@ void VScalMKL(const double* a, const double* x, double* y, int n) { #endif -#define DECLARE_STATIC_FUNC \ - static inline std::string name(int d) { \ - PADDLE_THROW("DType should be either float or double"); \ - } \ - static inline bool useJIT(int d) { return false; } \ - static inline bool useMKL(int d) { return false; } - /* VMUL JitKernel */ template class VMulKernelImpl : public VMulKernel { public: - DECLARE_STATIC_FUNC; + JITKERNEL_DECLARE_STATIC_FUNC; explicit VMulKernelImpl(int d) : VMulKernel() { #ifdef PADDLE_WITH_XBYAK if (useJIT(d)) { // roughly estimate the size of code - size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + size_t sz = 96 + d / YMM_FLOAT_BLOCK * 4 * 8; jitcode_.reset(new gen::VXXJitCode(d, gen::operand_type::mul, 0, false, sz > 4096 ? sz : 4096)); this->Compute = @@ -191,11 +180,11 @@ bool VMulKernelImpl::useMKL(int d) { template class VAddKernelImpl : public VAddKernel { public: - DECLARE_STATIC_FUNC; + JITKERNEL_DECLARE_STATIC_FUNC; explicit VAddKernelImpl(int d) : VAddKernel() { #ifdef PADDLE_WITH_XBYAK if (useJIT(d)) { - size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + size_t sz = 96 + d / YMM_FLOAT_BLOCK * 4 * 8; jitcode_.reset(new gen::VXXJitCode(d, gen::operand_type::add, 0, false, sz > 4096 ? sz : 4096)); this->Compute = @@ -241,11 +230,11 @@ bool VAddKernelImpl::useMKL(int d) { template class VAddReluKernelImpl : public VAddReluKernel { public: - DECLARE_STATIC_FUNC; + JITKERNEL_DECLARE_STATIC_FUNC; explicit VAddReluKernelImpl(int d) : VAddReluKernel() { #ifdef PADDLE_WITH_XBYAK if (useJIT(d)) { - size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + size_t sz = 96 + d / YMM_FLOAT_BLOCK * 4 * 8; jitcode_.reset(new gen::VXXJitCode(d, gen::operand_type::add, 0, true, sz > 4096 ? sz : 4096)); this->Compute = @@ -273,11 +262,11 @@ bool VAddReluKernelImpl::useJIT(int d) { template class VScalKernelImpl : public VScalKernel { public: - DECLARE_STATIC_FUNC; + JITKERNEL_DECLARE_STATIC_FUNC; explicit VScalKernelImpl(int d) : VScalKernel() { #ifdef PADDLE_WITH_XBYAK if (useJIT(d)) { - size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + size_t sz = 96 + d / YMM_FLOAT_BLOCK * 4 * 8; jitcode_.reset(new gen::VXXJitCode(d, gen::operand_type::mul, 1, false, sz > 4096 ? sz : 4096)); this->Compute = @@ -322,11 +311,11 @@ bool VScalKernelImpl::useMKL(int d) { template class VAddBiasKernelImpl : public VAddBiasKernel { public: - DECLARE_STATIC_FUNC; + JITKERNEL_DECLARE_STATIC_FUNC; explicit VAddBiasKernelImpl(int d) : VAddBiasKernel() { #ifdef PADDLE_WITH_XBYAK if (useJIT(d)) { - size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + size_t sz = 96 + d / YMM_FLOAT_BLOCK * 4 * 8; jitcode_.reset(new gen::VXXJitCode(d, gen::operand_type::add, 1, false, sz > 4096 ? sz : 4096)); this->Compute = @@ -355,15 +344,15 @@ bool VAddBiasKernelImpl::useJIT(int d) { template class VReluKernelImpl : public VReluKernel { public: - DECLARE_STATIC_FUNC; + JITKERNEL_DECLARE_STATIC_FUNC; explicit VReluKernelImpl(int d) : VReluKernel() { - this->num_ = d; // TODO(TJ): remove me when ComputeDeprecated done #ifdef PADDLE_WITH_XBYAK if (useJIT(d)) { - size_t sz = 96 /*init*/ + - d / AVX_FLOAT_BLOCK * 4 /* instructions*/ * - 8 /*everage byte for each instruction*/; - jitcode_.reset(new gen::ReluJitCode(d, sz > 4096 ? sz : 4096)); + size_t sz = 96 /* init size */ + + d / YMM_FLOAT_BLOCK * 4 /* instructions */ * + 8 /* average bytes for each instruction */; + jitcode_.reset(new gen::VActJitCode(d, gen::operand_type::relu, + sz > 4096 ? sz : 4096)); this->Compute = jitcode_->getCode(); return; } @@ -371,24 +360,32 @@ class VReluKernelImpl : public VReluKernel { this->Compute = VReluRefer; } - void ComputeDeprecated(const T* x, T* y) const override { - VReluRefer(x, y, this->num_); - } #ifdef PADDLE_WITH_XBYAK private: - std::unique_ptr jitcode_{nullptr}; + std::unique_ptr jitcode_{nullptr}; #endif }; #ifdef PADDLE_WITH_XBYAK template <> bool VReluKernelImpl::useJIT(int d) { - return gen::ReluJitCode::init(d); + return gen::VActJitCode::init(d, gen::operand_type::relu); } #endif -#undef DECLARE_STATIC_FUNC +template +inline void VIdentityRefer(const T* x, T* y, int n) {} + +/* An empty JitKernel */ +template +class VIdentityKernelImpl : public VIdentityKernel { + public: + JITKERNEL_DECLARE_STATIC_FUNC; + explicit VIdentityKernelImpl(int d) : VIdentityKernel() { + this->Compute = VIdentityRefer; + } +}; REGISTER_JITKERNEL(vmul, VMulKernel); REGISTER_JITKERNEL(vadd, VAddKernel); @@ -396,16 +393,7 @@ REGISTER_JITKERNEL(vaddrelu, VAddReluKernel); REGISTER_JITKERNEL(vscal, VScalKernel); REGISTER_JITKERNEL(vaddbias, VAddBiasKernel); REGISTER_JITKERNEL(vrelu, VReluKernel); - -/* An empty JitKernel */ -template -class VIdentityKernelImpl : public VIdentityKernel { - public: - explicit VIdentityKernelImpl(int d) : VIdentityKernel() { this->num_ = d; } - void ComputeDeprecated(const T* x, T* y) const override {} -}; - -REGISTER_JITKERNEL_DEPRECATED(videntity, VIdentityKernel); +REGISTER_JITKERNEL(videntity, VIdentityKernel); } // namespace jitkernel } // namespace math diff --git a/paddle/fluid/operators/math/jit_kernel_crf_decode.cc b/paddle/fluid/operators/math/jit_kernel_crf_decode.cc index a4861c347e44ad86a066861d3375b556302a84bc..4d26b81948238f18b097f535534fcfe9049b93c3 100644 --- a/paddle/fluid/operators/math/jit_kernel_crf_decode.cc +++ b/paddle/fluid/operators/math/jit_kernel_crf_decode.cc @@ -105,14 +105,14 @@ class CRFDecodeKernelImpl : public CRFDecodeKernel { int tag_num) \ : CRFDecodeKernel() { \ this->num_ = tag_num; \ - this->end_ = this->num_ / AVX_FLOAT_BLOCK; \ - this->rest_ = this->num_ % AVX_FLOAT_BLOCK; \ + this->end_ = this->num_ / YMM_FLOAT_BLOCK; \ + this->rest_ = this->num_ % YMM_FLOAT_BLOCK; \ } \ template <> \ void CRFDecodeKernelImpl::Compute( \ const int seq_len, const float* x, const float* w, float* alpha, \ int* track) const { \ - INIT_ALPHA(AVX_FLOAT_BLOCK) \ + INIT_ALPHA(YMM_FLOAT_BLOCK) \ /* Use the column-major strategy to get the location of maximum score.*/ \ int seq_offset = 0; \ constexpr int state_trans_base_idx = 2; \ @@ -150,7 +150,7 @@ class CRFDecodeKernelImpl : public CRFDecodeKernel { max_score = _mm256_max_ps(max_score, score_v); \ trans_offset += this->num_; \ } \ - UPDATE_ALPHA(AVX_FLOAT_BLOCK) \ + UPDATE_ALPHA(YMM_FLOAT_BLOCK) \ } \ seq_offset += this->num_; \ } \ @@ -161,14 +161,14 @@ class CRFDecodeKernelImpl : public CRFDecodeKernel { CRFDecodeKernelImpl::CRFDecodeKernelImpl(int tag_num) \ : CRFDecodeKernel() { \ this->num_ = tag_num; \ - this->end_ = this->num_ / AVX2_FLOAT_BLOCK; \ - this->rest_ = this->num_ % AVX2_FLOAT_BLOCK; \ + this->end_ = this->num_ / YMM_FLOAT_BLOCK; \ + this->rest_ = this->num_ % YMM_FLOAT_BLOCK; \ } \ template <> \ void CRFDecodeKernelImpl::Compute( \ const int seq_len, const float* x, const float* w, float* alpha, \ int* track) const { \ - INIT_ALPHA(AVX2_FLOAT_BLOCK) \ + INIT_ALPHA(YMM_FLOAT_BLOCK) \ /* Use the column-major strategy to get the location of maximum score.*/ \ int seq_offset = 0; \ constexpr int state_trans_base_idx = 2; \ @@ -196,7 +196,7 @@ class CRFDecodeKernelImpl : public CRFDecodeKernel { max_score = _mm256_max_ps(max_score, score_v); \ trans_offset += this->num_; \ } \ - UPDATE_ALPHA(AVX2_FLOAT_BLOCK) \ + UPDATE_ALPHA(YMM_FLOAT_BLOCK) \ } \ seq_offset += this->num_; \ } \ @@ -208,14 +208,14 @@ class CRFDecodeKernelImpl : public CRFDecodeKernel { int tag_num) \ : CRFDecodeKernel() { \ this->num_ = tag_num; \ - this->end_ = this->num_ / AVX512_FLOAT_BLOCK; \ - this->rest_ = this->num_ % AVX512_FLOAT_BLOCK; \ + this->end_ = this->num_ / ZMM_FLOAT_BLOCK; \ + this->rest_ = this->num_ % ZMM_FLOAT_BLOCK; \ } \ template <> \ void CRFDecodeKernelImpl::Compute( \ const int seq_len, const float* x, const float* w, float* alpha, \ int* track) const { \ - INIT_ALPHA(AVX512_FLOAT_BLOCK) \ + INIT_ALPHA(ZMM_FLOAT_BLOCK) \ /* Use the column-major strategy to get the location of maximum score.*/ \ int seq_offset = 0; \ constexpr int state_trans_base_idx = 2; \ @@ -250,7 +250,7 @@ class CRFDecodeKernelImpl : public CRFDecodeKernel { this->num_ + j_offset), \ max_j); \ /* Calculate the offset of next step*/ \ - j_offset += AVX512_FLOAT_BLOCK; \ + j_offset += ZMM_FLOAT_BLOCK; \ if (j == this->end_ - 1) { \ if (this->rest_ > 0) { \ j_offset += last_offset; \ diff --git a/paddle/fluid/operators/math/jit_kernel_exp.cc b/paddle/fluid/operators/math/jit_kernel_exp.cc index 2ac9e1092362f60ea3d89da0c971a365b45f39ea..f26815300de31c47a7ea341307b0051dee99e63b 100644 --- a/paddle/fluid/operators/math/jit_kernel_exp.cc +++ b/paddle/fluid/operators/math/jit_kernel_exp.cc @@ -16,6 +16,11 @@ limitations under the License. */ #include // for exp #include #include "paddle/fluid/operators/math/jit_kernel_macro.h" + +#ifdef PADDLE_WITH_XBYAK +#include "paddle/fluid/operators/math/jit_code.h" +#endif + #ifdef PADDLE_WITH_MKLML #include "paddle/fluid/platform/dynload/mklml.h" #endif @@ -30,38 +35,238 @@ namespace math { namespace jitkernel { namespace jit = platform::jit; +// TODO(TJ): move refer codes to one file +// Refer code only focus on correctness +template +void VExpRefer(const T* x, T* y, int n) { + for (int i = 0; i < n; ++i) { + y[i] = std::exp(x[i]); + } +} + +template +void VSigmoidRefer(const T* x, T* y, int n) { + // y = 1 / (1 + e^-x) + const T min = SIGMOID_THRESHOLD_MIN; + const T max = SIGMOID_THRESHOLD_MAX; + for (int i = 0; i < n; ++i) { + T tmp = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]); + y[i] = static_cast(1) / (static_cast(1) + std::exp(-tmp)); + } +} + +template +void VTanhRefer(const T* x, T* y, int n) { + // y = 2 * sigmoid(2x) - 1 + for (int i = 0; i < n; ++i) { + y[i] = static_cast(2) * x[i]; + } + VSigmoidRefer(y, y, n); + for (int i = 0; i < n; ++i) { + y[i] = static_cast(2) * y[i] - static_cast(1); + } +} + +#ifdef PADDLE_WITH_MKLML +// try to use MKL to speedup +template +void VExpMKL(const T* x, T* y, int n); + +template <> +void VExpMKL(const float* x, float* y, int n) { + platform::dynload::vsExp(n, x, y); +} + +template <> +void VExpMKL(const double* x, double* y, int n) { + platform::dynload::vdExp(n, x, y); +} + +template +void VSigmoidMKL(const T* x, T* y, int n) { + const T min = SIGMOID_THRESHOLD_MIN; + const T max = SIGMOID_THRESHOLD_MAX; + for (int i = 0; i < n; ++i) { + y[i] = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]); + y[i] = static_cast(0) - y[i]; + } + VExpMKL(y, y, n); + for (int i = 0; i < n; ++i) { + y[i] = static_cast(1) / (static_cast(1) + y[i]); + } +} + +template +void VTanhMKL(const T* x, T* y, int n) { + for (int i = 0; i < n; ++i) { + y[i] = static_cast(2) * x[i]; + } + VSigmoidMKL(y, y, n); + for (int i = 0; i < n; ++i) { + y[i] = static_cast(2) * y[i] - static_cast(1); + } +} +#endif + /* VExp JitKernel */ -template +template class VExpKernelImpl : public VExpKernel { public: - explicit VExpKernelImpl(int d) : VExpKernel() { this->num_ = d; } - void ComputeDeprecated(const T* x, T* y) const override { - for (int i = 0; i < this->num_; ++i) { - y[i] = std::exp(x[i]); + JITKERNEL_DECLARE_STATIC_FUNC; + explicit VExpKernelImpl(int d) : VExpKernel() { +#ifdef PADDLE_WITH_XBYAK + if (useJIT(d)) { + size_t sz = 96 + d / YMM_FLOAT_BLOCK * 70 * 8; + jitcode_.reset(new gen::VActJitCode(d, gen::operand_type::exp, + sz > 4096 ? sz : 4096)); + this->Compute = jitcode_->getCode(); + return; + } +#endif +#ifdef PADDLE_WITH_MKLML + if (useMKL(d)) { + this->Compute = VExpMKL; + return; } +#endif + this->Compute = VExpRefer; } + +#ifdef PADDLE_WITH_XBYAK + + private: + std::unique_ptr jitcode_{nullptr}; +#endif }; +#ifdef PADDLE_WITH_XBYAK +template <> +bool VExpKernelImpl::useJIT(int d) { + return gen::VActJitCode::init(d, gen::operand_type::exp); +} +#endif + #ifdef PADDLE_WITH_MKLML -#define MKL_FLOAT(isa, block) \ - template <> \ - void VExpKernelImpl::ComputeDeprecated(const float* x, \ - float* y) const { \ - platform::dynload::vsExp(this->num_, x, y); \ +template <> +bool VExpKernelImpl::useMKL(int d) { + return d > 512; +} + +template <> +bool VExpKernelImpl::useMKL(int d) { + return true; +} + +#endif + +/* VSigmoid JitKernel */ +template +class VSigmoidKernelImpl : public VSigmoidKernel { + public: + JITKERNEL_DECLARE_STATIC_FUNC; + explicit VSigmoidKernelImpl(int d) : VSigmoidKernel() { +#ifdef PADDLE_WITH_XBYAK + if (useJIT(d)) { + size_t sz = 96 + d / YMM_FLOAT_BLOCK * 82 * 8; + jitcode_.reset(new gen::VActJitCode(d, gen::operand_type::sigmoid, + sz > 4096 ? sz : 4096)); + this->Compute = jitcode_->getCode(); + return; + } +#endif + +#ifdef PADDLE_WITH_MKLML + // strictly it's a better impl with MKL, then is refer + if (useMKL(d)) { + this->Compute = VSigmoidMKL; + return; + } +#endif + this->Compute = VSigmoidRefer; } -#define MKL_DOUBLE(isa, block) \ - template <> \ - void VExpKernelImpl::ComputeDeprecated( \ - const double* x, double* y) const { \ - platform::dynload::vdExp(this->num_, x, y); \ +#ifdef PADDLE_WITH_XBYAK + + private: + std::unique_ptr jitcode_{nullptr}; +#endif +}; + +#ifdef PADDLE_WITH_XBYAK +template <> +bool VSigmoidKernelImpl::useJIT(int d) { + return gen::VActJitCode::init(d, gen::operand_type::sigmoid); +} +#endif + +#ifdef PADDLE_WITH_MKLML +template <> +bool VSigmoidKernelImpl::useMKL(int d) { + return d > 512; +} + +template <> +bool VSigmoidKernelImpl::useMKL(int d) { + return true; +} +#endif + +/* VTanh JitKernel */ +template +class VTanhKernelImpl : public VTanhKernel { + public: + JITKERNEL_DECLARE_STATIC_FUNC; + explicit VTanhKernelImpl(int d) : VTanhKernel() { +#ifdef PADDLE_WITH_XBYAK + if (useJIT(d)) { + size_t sz = 96 + d / YMM_FLOAT_BLOCK * 84 * 8; + jitcode_.reset(new gen::VActJitCode(d, gen::operand_type::tanh, + sz > 4096 ? sz : 4096)); + this->Compute = jitcode_->getCode(); + return; + } +#endif + +#ifdef PADDLE_WITH_MKLML + // strictly it's a better impl with MKL, then is refer + if (useMKL(d)) { + this->Compute = VTanhMKL; + return; + } +#endif + this->Compute = VTanhRefer; } -FOR_EACH_ISA(MKL_FLOAT, kLT8); -FOR_EACH_ISA(MKL_FLOAT, kGT8LT16); -FOR_EACH_ISA(MKL_FLOAT, kGT16); -FOR_EACH_ISA_BLOCK(MKL_DOUBLE); + +#ifdef PADDLE_WITH_XBYAK + + private: + std::unique_ptr jitcode_{nullptr}; +#endif +}; + +#ifdef PADDLE_WITH_XBYAK +template <> +bool VTanhKernelImpl::useJIT(int d) { + return gen::VActJitCode::init(d, gen::operand_type::tanh); +} +#endif + +#ifdef PADDLE_WITH_MKLML +template <> +bool VTanhKernelImpl::useMKL(int d) { + return d > 512; +} + +template <> +bool VTanhKernelImpl::useMKL(int d) { + return true; +} #endif +REGISTER_JITKERNEL(vexp, VExpKernel); +REGISTER_JITKERNEL(vsigmoid, VSigmoidKernel); +REGISTER_JITKERNEL(vtanh, VTanhKernel); + namespace detail { #ifdef __AVX__ @@ -210,334 +415,6 @@ __m256 ExpAVX2(__m256 x) { #endif } // namespace detail - -#define INTRI8_FLOAT(isa, expisa) \ - template <> \ - void VExpKernelImpl::ComputeDeprecated(const float* x, \ - float* y) const { \ - __m256 tmp = _mm256_loadu_ps(x); \ - _mm256_storeu_ps(y, expisa(tmp)); \ - } - -#define INTRI16_FLOAT(isa, expisa) \ - template <> \ - void VExpKernelImpl::ComputeDeprecated(const float* x, \ - float* y) const { \ - __m256 tmp0 = _mm256_loadu_ps(x); \ - __m256 tmp1 = _mm256_loadu_ps(x + 8); \ - tmp0 = expisa(tmp0); \ - tmp1 = expisa(tmp1); \ - _mm256_storeu_ps(y, tmp0); \ - _mm256_storeu_ps(y + 8, tmp1); \ - } - -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx, detail::ExpAVX); -INTRI16_FLOAT(jit::avx, detail::ExpAVX); -#endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2, detail::ExpAVX2); -INTRI16_FLOAT(jit::avx2, detail::ExpAVX2); -#endif -#ifdef __AVX512F__ -INTRI8_FLOAT(jit::avx512f, detail::ExpAVX2); -INTRI16_FLOAT(jit::avx512f, detail::ExpAVX2); -#endif -// TODO(TJ): eq16 test and complete avx512 - -#undef INTRI8_FLOAT -#undef INTRI16_FLOAT -#undef MKL_FLOAT -#undef MKL_DOUBLE - -REGISTER_JITKERNEL_DEPRECATED(vexp, VExpKernel); - -/* VSigmoid JitKernel */ -template -class VSigmoidKernelImpl : public VSigmoidKernel { - public: - explicit VSigmoidKernelImpl(int d) : VSigmoidKernel() { - this->num_ = d; - vexp_ = KernelPool::Instance().template Get>(d); - } - void ComputeDeprecated(const T* x, T* y) const override { - const T min = SIGMOID_THRESHOLD_MIN; - const T max = SIGMOID_THRESHOLD_MAX; - for (int i = 0; i < this->num_; ++i) { - y[i] = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]); - y[i] = static_cast(0) - y[i]; - } - vexp_->ComputeDeprecated(y, y); - for (int i = 0; i < this->num_; ++i) { - y[i] = static_cast(1) / (static_cast(1) + y[i]); - } - } - - private: - std::shared_ptr> vexp_; -}; - -#define INTRI_SIGMOID(tmp, min, max, expisa) \ - tmp = _mm256_max_ps(tmp, min); \ - tmp = _mm256_min_ps(tmp, max); \ - tmp = _mm256_sub_ps(_mm256_set1_ps(0.0f), tmp); \ - tmp = expisa(tmp); \ - tmp = _mm256_add_ps(_mm256_set1_ps(1.0f), tmp); \ - tmp = _mm256_div_ps(_mm256_set1_ps(1.0f), tmp) - -#define INTRI8_FLOAT(isa, expisa) \ - template <> \ - void VSigmoidKernelImpl::ComputeDeprecated( \ - const float* x, float* y) const { \ - /* TODO(TJ): try to use static const*/ \ - __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \ - __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \ - __m256 tmp = _mm256_loadu_ps(x); \ - INTRI_SIGMOID(tmp, min, max, expisa); \ - _mm256_storeu_ps(y, tmp); \ - } - -#define INTRI16_FLOAT(isa, expisa) \ - template <> \ - void VSigmoidKernelImpl::ComputeDeprecated( \ - const float* x, float* y) const { \ - __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \ - __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \ - __m256 tmp0 = _mm256_loadu_ps(x); \ - __m256 tmp1 = _mm256_loadu_ps(x + 8); \ - INTRI_SIGMOID(tmp0, min, max, expisa); \ - INTRI_SIGMOID(tmp1, min, max, expisa); \ - _mm256_storeu_ps(y, tmp0); \ - _mm256_storeu_ps(y + 8, tmp1); \ - } - -#define INTRI_GT8LT16_FLOAT(isa, expisa) \ - template <> \ - VSigmoidKernelImpl::VSigmoidKernelImpl(int d) \ - : VSigmoidKernel() { \ - this->num_ = d; \ - this->end_ = AVX_FLOAT_BLOCK; \ - this->rest_ = d - this->end_; \ - vexp_ = \ - KernelPool::Instance().template Get>(this->rest_); \ - } \ - template <> \ - void VSigmoidKernelImpl::ComputeDeprecated( \ - const float* x, float* y) const { \ - __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \ - __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \ - __m256 tmp = _mm256_loadu_ps(x); \ - INTRI_SIGMOID(tmp, min, max, expisa); \ - _mm256_storeu_ps(y, tmp); \ - const float min_ = SIGMOID_THRESHOLD_MIN; \ - const float max_ = SIGMOID_THRESHOLD_MAX; \ - for (int i = this->end_; i < this->num_; ++i) { \ - y[i] = (x[i] < min_) ? min_ : ((x[i] > max_) ? max_ : x[i]); \ - y[i] = 0.f - y[i]; \ - } \ - vexp_->ComputeDeprecated(y + this->end_, y + this->end_); \ - for (int i = this->end_; i < this->num_; ++i) { \ - y[i] = 1.f / (1.f + y[i]); \ - } \ - } - -#define INTRI_GT16_FLOAT(isa, expisa) \ - template <> \ - VSigmoidKernelImpl::VSigmoidKernelImpl(int d) \ - : VSigmoidKernel() { \ - this->num_ = d; \ - this->rest_ = d % AVX_FLOAT_BLOCK; \ - this->end_ = d - this->rest_; \ - vexp_ = \ - KernelPool::Instance().template Get>(this->rest_); \ - } \ - template <> \ - void VSigmoidKernelImpl::ComputeDeprecated( \ - const float* x, float* y) const { \ - __m256 max = _mm256_set1_ps(SIGMOID_THRESHOLD_MAX); \ - __m256 min = _mm256_set1_ps(SIGMOID_THRESHOLD_MIN); \ - for (int i = 0; i < this->end_; i += AVX_FLOAT_BLOCK) { \ - __m256 tmp = _mm256_loadu_ps(x + i); \ - INTRI_SIGMOID(tmp, min, max, expisa); \ - _mm256_storeu_ps(y + i, tmp); \ - } \ - const float min_ = SIGMOID_THRESHOLD_MIN; \ - const float max_ = SIGMOID_THRESHOLD_MAX; \ - for (int i = this->end_; i < this->num_; ++i) { \ - y[i] = (x[i] < min_) ? min_ : ((x[i] > max_) ? max_ : x[i]); \ - y[i] = 0.f - y[i]; \ - } \ - vexp_->ComputeDeprecated(y + this->end_, y + this->end_); \ - for (int i = this->end_; i < this->num_; ++i) { \ - y[i] = 1.f / (1.f + y[i]); \ - } \ - } - -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx, detail::ExpAVX); -INTRI16_FLOAT(jit::avx, detail::ExpAVX); -INTRI_GT8LT16_FLOAT(jit::avx, detail::ExpAVX); -INTRI_GT16_FLOAT(jit::avx, detail::ExpAVX); -#endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2, detail::ExpAVX2); -INTRI16_FLOAT(jit::avx2, detail::ExpAVX2); -// maybe use avx at gt8lt16 and gt16 -#endif -#ifdef __AVX512F__ -INTRI8_FLOAT(jit::avx512f, detail::ExpAVX2); -INTRI16_FLOAT(jit::avx512f, detail::ExpAVX2); -// maybe use avx2 at gt8lt16 and gt16 -#endif - -#undef INTRI8_FLOAT -#undef INTRI16_FLOAT -#undef INTRI_GT8LT16_FLOAT -#undef INTRI_GT16_FLOAT -#undef INTRI_VSIGMOID - -REGISTER_JITKERNEL_DEPRECATED(vsigmoid, VSigmoidKernel); - -/* VTanh JitKernel */ -template -class VTanhKernelImpl : public VTanhKernel { - public: - explicit VTanhKernelImpl(int d) : VTanhKernel() { - this->num_ = d; - vscal_ = KernelPool::Instance().template Get>(d); - vsigmoid_ = KernelPool::Instance().template Get>(d); - vaddbias_ = KernelPool::Instance().template Get>(d); - } - void ComputeDeprecated(const T* x, T* y) const override { - const T a = static_cast(2), b = static_cast(-1); - vscal_->Compute(&a, x, y, this->num_); - vsigmoid_->ComputeDeprecated(y, y); - vscal_->Compute(&a, y, y, this->num_); - vaddbias_->Compute(&b, y, y, this->num_); - } - - private: - std::shared_ptr> vscal_; - std::shared_ptr> vsigmoid_; - std::shared_ptr> vaddbias_; -}; - -#define INTRI_VTANH(tmp, expisa) \ - tmp = _mm256_mul_ps(_mm256_set1_ps(-2.0f), tmp); \ - tmp = _mm256_min_ps(tmp, _mm256_set1_ps(EXP_MAX_INPUT)); \ - tmp = expisa(tmp); \ - tmp = _mm256_add_ps(_mm256_set1_ps(1.0f), tmp); \ - tmp = _mm256_div_ps(_mm256_set1_ps(2.0f), tmp); \ - tmp = _mm256_sub_ps(tmp, _mm256_set1_ps(1.0f)) - -#define INTRI8_FLOAT(isa, expisa) \ - template <> \ - void VTanhKernelImpl::ComputeDeprecated(const float* x, \ - float* y) const { \ - __m256 tmp = _mm256_loadu_ps(x); \ - INTRI_VTANH(tmp, expisa); \ - _mm256_storeu_ps(y, tmp); \ - } - -#define INTRI16_FLOAT(isa, expisa) \ - template <> \ - void VTanhKernelImpl::ComputeDeprecated(const float* x, \ - float* y) const { \ - __m256 tmp0 = _mm256_loadu_ps(x); \ - __m256 tmp1 = _mm256_loadu_ps(x + 8); \ - INTRI_VTANH(tmp0, expisa); \ - INTRI_VTANH(tmp1, expisa); \ - _mm256_storeu_ps(y, tmp0); \ - _mm256_storeu_ps(y + 8, tmp1); \ - } - -#define INTRI_GT8LT16_FLOAT(isa, expisa) \ - template <> \ - VTanhKernelImpl::VTanhKernelImpl(int d) \ - : VTanhKernel() { \ - this->num_ = d; \ - this->end_ = AVX_FLOAT_BLOCK; \ - this->rest_ = d - this->end_; \ - vscal_ = \ - KernelPool::Instance().template Get>(this->rest_); \ - vsigmoid_ = KernelPool::Instance().template Get>( \ - this->rest_); \ - vaddbias_ = KernelPool::Instance().template Get>( \ - this->rest_); \ - } \ - template <> \ - void VTanhKernelImpl::ComputeDeprecated( \ - const float* x, float* y) const { \ - __m256 tmp = _mm256_loadu_ps(x); \ - INTRI_VTANH(tmp, expisa); \ - _mm256_storeu_ps(y, tmp); \ - x += AVX_FLOAT_BLOCK; \ - y += AVX_FLOAT_BLOCK; \ - const float a = 2.f, b = -1.f; \ - vscal_->Compute(&a, x, y, this->num_); \ - vsigmoid_->ComputeDeprecated(y, y); \ - vscal_->Compute(&a, y, y, this->num_); \ - vaddbias_->Compute(&b, y, y, this->num_); \ - } - -#define INTRI_GT16_FLOAT(isa, expisa) \ - template <> \ - VTanhKernelImpl::VTanhKernelImpl(int d) \ - : VTanhKernel() { \ - this->num_ = d; \ - this->rest_ = d % AVX_FLOAT_BLOCK; \ - this->end_ = d - this->rest_; \ - vscal_ = \ - KernelPool::Instance().template Get>(this->rest_); \ - vsigmoid_ = KernelPool::Instance().template Get>( \ - this->rest_); \ - vaddbias_ = KernelPool::Instance().template Get>( \ - this->rest_); \ - } \ - template <> \ - void VTanhKernelImpl::ComputeDeprecated(const float* x, \ - float* y) const { \ - for (int i = 0; i < this->end_; i += AVX_FLOAT_BLOCK) { \ - __m256 tmp = _mm256_loadu_ps(x + i); \ - INTRI_VTANH(tmp, expisa); \ - _mm256_storeu_ps(y + i, tmp); \ - } \ - x += this->end_; \ - y += this->end_; \ - const float a = 2.f, b = -1.f; \ - vscal_->Compute(&a, x, y, this->num_); \ - vsigmoid_->ComputeDeprecated(y, y); \ - vscal_->Compute(&a, y, y, this->num_); \ - vaddbias_->Compute(&b, y, y, this->num_); \ - } - -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx, detail::ExpAVX); -INTRI16_FLOAT(jit::avx, detail::ExpAVX); -INTRI_GT8LT16_FLOAT(jit::avx, detail::ExpAVX); -INTRI_GT16_FLOAT(jit::avx, detail::ExpAVX); -#endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2, detail::ExpAVX2); -INTRI16_FLOAT(jit::avx2, detail::ExpAVX2); -// maybe use avx at gt8lt16 and gt16 -#endif -#ifdef __AVX512F__ -INTRI8_FLOAT(jit::avx512f, detail::ExpAVX2); -INTRI16_FLOAT(jit::avx512f, detail::ExpAVX2); -// maybe use avx at gt8lt16 and gt16 -#endif - -#undef INTRI8_FLOAT -#undef INTRI16_FLOAT -#undef INTRI_GT8LT16_FLOAT -#undef INTRI_GT16_FLOAT -#undef INTRI_VTANH - -REGISTER_JITKERNEL_DEPRECATED(vtanh, VTanhKernel); - -#undef JITKERNEL_NEW_ACT_IMPL - } // namespace jitkernel } // namespace math } // namespace operators diff --git a/paddle/fluid/operators/math/jit_kernel_layer_norm.cc b/paddle/fluid/operators/math/jit_kernel_layer_norm.cc new file mode 100644 index 0000000000000000000000000000000000000000..49904e6e8c7cd346bcbfb67c3a7574118b36e058 --- /dev/null +++ b/paddle/fluid/operators/math/jit_kernel_layer_norm.cc @@ -0,0 +1,241 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at +http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include "paddle/fluid/operators/math/jit_kernel.h" +#include +#include +#include +#include "paddle/fluid/operators/math/jit_kernel_macro.h" +#ifdef __AVX__ +#include +#endif + +namespace paddle { +namespace operators { +namespace math { +namespace jitkernel { + +namespace jit = platform::jit; + +/* Layer Norm JitKernel */ +template +class LayerNormKernelImpl : public LayerNormKernel { + public: + explicit LayerNormKernelImpl(int right) : LayerNormKernel() { + this->num_ = right; + } + + void Compute(T* x, T* out, T* mean, T* var, const T* scale, const T* bias, + int height, const float epsilon) const override { + // get mean + for (int i = 0; i < height; i++) { + T sum = 0.0; + int offset = i * this->num_; + for (int j = 0; j < this->num_; j++) { + sum += x[offset + j]; + } + mean[i] = sum / this->num_; + } + + // get variance + for (int i = 0; i < height; i++) { + T sum = 0.0; + int offset = i * this->num_; + for (int j = 0; j < this->num_; j++) { + sum += (x[offset + j] - mean[i]) * (x[offset + j] - mean[i]); + } + var[i] = sum / this->num_; + } + + for (int i = 0; i < height; i++) { + int offset = i * this->num_; + T sqrt_var = sqrt(var[i] + (T)epsilon); + for (int j = 0; j < this->num_; j++) { + out[offset + j] = (x[offset + j] - mean[i]) / sqrt_var; + } + } + if (scale) { + for (int i = 0; i < height; i++) { + int offset = i * this->num_; + for (int j = 0; j < this->num_; j++) { + out[offset + j] *= scale[j]; + } + } + } + + if (bias) { + for (int i = 0; i < height; i++) { + int offset = i * this->num_; + for (int j = 0; j < this->num_; j++) { + out[offset + j] += bias[j]; + } + } + } + } +}; + +#define INTRIAVX_FLOAT(isa, block) \ + template <> \ + LayerNormKernelImpl::LayerNormKernelImpl(int right) \ + : LayerNormKernel() { \ + this->num_ = right; \ + this->rest_ = this->num_ % YMM_FLOAT_BLOCK; \ + this->end_ = this->num_ - this->rest_; \ + } \ + template <> \ + void LayerNormKernelImpl::Compute( \ + float* x, float* out, float* mean, float* var, const float* scale, \ + const float* bias, int height, const float epsilon) const { \ + __m256 sum; \ + __m256 mean_vec, var_vec; \ + __m128 hi, lo; \ + __m256 tmp; \ + size_t offset; \ + size_t j; \ + __m256 reverse_num_vec = \ + _mm256_div_ps(_mm256_set1_ps(1.0), _mm256_set1_ps(this->num_)); \ + __m256 epsilon_vec = _mm256_set1_ps(epsilon); \ + int rest_mask = \ + ((-1) & (~((~0U) >> (sizeof(int) * 8 - (YMM_FLOAT_BLOCK - rest_))))) & \ + 0x0ff; \ + __m256i mask_vec = _mm256_set_epi32( \ + rest_mask & 0x80 ? 0xffffffff : 0, rest_mask & 0x40 ? 0xffffffff : 0, \ + rest_mask & 0x20 ? 0xffffffff : 0, rest_mask & 0x10 ? 0xffffffff : 0, \ + rest_mask & 0x8 ? 0xffffffff : 0, rest_mask & 0x4 ? 0xffffffff : 0, \ + rest_mask & 0x2 ? 0xffffffff : 0, rest_mask & 0x1 ? 0xffffffff : 0); \ + \ + for (int i = 0; i < height; ++i) { \ + offset = i * this->num_; \ + \ + /* get mean */ \ + sum = _mm256_setzero_ps(); \ + for (j = offset; j < end_ + offset; j += block) { \ + sum = _mm256_add_ps(sum, _mm256_loadu_ps((const float*)x + j)); \ + } \ + if (rest_ != 0) { \ + j = offset + this->num_ - block; \ + tmp = _mm256_loadu_ps((const float*)x + j); \ + tmp = _mm256_blendv_ps(_mm256_setzero_ps(), tmp, (__m256)mask_vec); \ + sum = _mm256_add_ps(sum, tmp); \ + } \ + hi = _mm256_extractf128_ps(sum, 1); \ + lo = _mm256_extractf128_ps(sum, 0); \ + sum = _mm256_add_ps( \ + sum, _mm256_insertf128_ps( \ + _mm256_insertf128_ps(_mm256_setzero_ps(), hi, 0), lo, 1)); \ + sum = _mm256_hadd_ps(sum, sum); \ + sum = _mm256_hadd_ps(sum, sum); \ + mean_vec = _mm256_mul_ps(sum, reverse_num_vec); \ + mean[i] = *reinterpret_cast(&mean_vec); \ + \ + /* get variance */ \ + sum = _mm256_setzero_ps(); \ + for (j = offset; j < end_ + offset; j += block) { \ + tmp = _mm256_sub_ps(_mm256_loadu_ps((const float*)x + j), mean_vec); \ + tmp = _mm256_mul_ps(tmp, tmp); \ + sum = _mm256_add_ps(sum, tmp); \ + } \ + if (rest_ != 0) { \ + j = offset + this->num_ - block; \ + tmp = _mm256_sub_ps(_mm256_loadu_ps((const float*)x + j), mean_vec); \ + tmp = _mm256_mul_ps(tmp, tmp); \ + tmp = _mm256_blendv_ps(_mm256_setzero_ps(), tmp, (__m256)mask_vec); \ + sum = _mm256_add_ps(sum, tmp); \ + } \ + hi = _mm256_extractf128_ps(sum, 1); \ + lo = _mm256_extractf128_ps(sum, 0); \ + sum = _mm256_add_ps( \ + sum, _mm256_insertf128_ps( \ + _mm256_insertf128_ps(_mm256_setzero_ps(), hi, 0), lo, 1)); \ + sum = _mm256_hadd_ps(sum, sum); \ + sum = _mm256_hadd_ps(sum, sum); \ + var_vec = _mm256_mul_ps(sum, reverse_num_vec); \ + var[i] = *reinterpret_cast(&var_vec); \ + \ + /* get x_norm and calculate output*/ \ + for (j = offset; j < end_ + offset; j += block) { \ + tmp = _mm256_sub_ps(_mm256_loadu_ps((const float*)x + j), mean_vec); \ + tmp = _mm256_div_ps( \ + tmp, _mm256_sqrt_ps(_mm256_add_ps(var_vec, epsilon_vec))); \ + _mm256_storeu_ps(reinterpret_cast(out) + j, tmp); \ + } \ + if (rest_ != 0) { \ + j = offset + num_ - block; \ + tmp = _mm256_sub_ps(_mm256_loadu_ps((const float*)x + j), mean_vec); \ + tmp = _mm256_div_ps( \ + tmp, _mm256_sqrt_ps(_mm256_add_ps(var_vec, epsilon_vec))); \ + _mm256_storeu_ps(reinterpret_cast(out) + j, tmp); \ + } \ + \ + if (scale) { \ + if (rest_ != 0) { \ + j = offset + this->num_ - block; \ + tmp = _mm256_loadu_ps((const float*)out + j); \ + } \ + for (j = offset; j < end_ + offset; j += block) { \ + _mm256_storeu_ps( \ + reinterpret_cast(out) + j, \ + _mm256_mul_ps( \ + _mm256_loadu_ps((const float*)out + j), \ + _mm256_loadu_ps((const float*)scale + j - offset))); \ + } \ + if (rest_ != 0) { \ + j = offset + this->num_ - block; \ + _mm256_storeu_ps( \ + reinterpret_cast(out) + j, \ + _mm256_mul_ps( \ + tmp, _mm256_loadu_ps((const float*)scale + j - offset))); \ + } \ + } \ + \ + if (bias) { \ + if (rest_ != 0) { \ + j = offset + this->num_ - block; \ + tmp = _mm256_loadu_ps((const float*)out + j); \ + } \ + for (j = offset; j < end_ + offset; j += block) { \ + _mm256_storeu_ps( \ + reinterpret_cast(out) + j, \ + _mm256_add_ps( \ + _mm256_loadu_ps((const float*)out + j), \ + _mm256_loadu_ps((const float*)bias + j - offset))); \ + } \ + if (rest_ != 0) { \ + j = offset + this->num_ - block; \ + _mm256_storeu_ps( \ + reinterpret_cast(out) + j, \ + _mm256_add_ps( \ + tmp, _mm256_loadu_ps((const float*)bias + j - offset))); \ + } \ + } \ + } \ + } + +#ifdef __AVX__ +INTRIAVX_FLOAT(jit::avx, kEQ8); +INTRIAVX_FLOAT(jit::avx, kGT8LT16); +INTRIAVX_FLOAT(jit::avx, kEQ16); +INTRIAVX_FLOAT(jit::avx, kGT16); +#endif +#ifdef __AVX2__ +INTRIAVX_FLOAT(jit::avx2, kEQ8); +INTRIAVX_FLOAT(jit::avx2, kGT8LT16); +INTRIAVX_FLOAT(jit::avx2, kEQ16); +INTRIAVX_FLOAT(jit::avx2, kGT16); +#endif + +#undef INTRIAVX_FLOAT + +REGISTER_JITKERNEL_DEPRECATED(layer_norm, LayerNormKernel); + +} // namespace jitkernel +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/jit_kernel_macro.h b/paddle/fluid/operators/math/jit_kernel_macro.h index a8169ea48ae3eee5a8cba291be4496c4c6074221..8acf60cfbfd3d47ad52862241b7635aba6982ebf 100644 --- a/paddle/fluid/operators/math/jit_kernel_macro.h +++ b/paddle/fluid/operators/math/jit_kernel_macro.h @@ -15,12 +15,20 @@ limitations under the License. */ #pragma once #include #include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace operators { namespace math { namespace jitkernel { +#define JITKERNEL_DECLARE_STATIC_FUNC \ + static inline std::string name(int d) { \ + PADDLE_THROW("DType should be either float or double"); \ + } \ + static inline bool useJIT(int d) { return false; } \ + static inline bool useMKL(int d) { return false; } + #define JITKERNEL_DEFINE_NAME(ker_key, ker_class) \ template <> \ std::string ker_class##Impl::name(int d) { \ @@ -86,17 +94,17 @@ namespace jitkernel { namespace jit = platform::jit; // TODO(TJ): below defines are deprecated, would be remove recently -#define SEARCH_BLOCK(macro_, ker, dtype, isa) \ - if (d < AVX_FLOAT_BLOCK) { \ - macro_(ker, dtype, isa, kLT8); \ - } else if (d == AVX_FLOAT_BLOCK) { \ - macro_(ker, dtype, isa, kEQ8); \ - } else if (d > AVX_FLOAT_BLOCK && d < AVX512_FLOAT_BLOCK) { \ - macro_(ker, dtype, isa, kGT8LT16); \ - } else if (d == AVX512_FLOAT_BLOCK) { \ - macro_(ker, dtype, isa, kEQ16); \ - } else { \ - macro_(ker, dtype, isa, kGT16); \ +#define SEARCH_BLOCK(macro_, ker, dtype, isa) \ + if (d < YMM_FLOAT_BLOCK) { \ + macro_(ker, dtype, isa, kLT8); \ + } else if (d == YMM_FLOAT_BLOCK) { \ + macro_(ker, dtype, isa, kEQ8); \ + } else if (d > YMM_FLOAT_BLOCK && d < ZMM_FLOAT_BLOCK) { \ + macro_(ker, dtype, isa, kGT8LT16); \ + } else if (d == ZMM_FLOAT_BLOCK) { \ + macro_(ker, dtype, isa, kEQ16); \ + } else { \ + macro_(ker, dtype, isa, kGT16); \ } #define SEARCH_ISA_BLOCK(macro_, ker, dtype) \ diff --git a/paddle/fluid/operators/math/jit_kernel_rnn.cc b/paddle/fluid/operators/math/jit_kernel_rnn.cc index 926221f0a75c461e275a72f16b4339ae28a8e988..e79b0400ab75d1488a26450bd8cde4a0979fc995 100644 --- a/paddle/fluid/operators/math/jit_kernel_rnn.cc +++ b/paddle/fluid/operators/math/jit_kernel_rnn.cc @@ -175,26 +175,26 @@ class LSTMKernelImpl : public LSTMKernel { void ComputeCtHt(T* gates, const T* ct_1, T* ct, T* ht, const T* wp_data, T* checked) const override { // gates: W_ch, W_ih, W_fh, W_oh - act_gate_d3_->ComputeDeprecated(gates + d_, gates + d_); + act_gate_d3_->Compute(gates + d_, gates + d_, d3_); /* C_t = C_t-1 * fgated + cand_gated * igated */ - act_cand_d_->ComputeDeprecated(gates, gates); + act_cand_d_->Compute(gates, gates, d_); vmul_d_->Compute(gates, gates + d_, gates + d_, d_); vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_); vadd_d_->Compute(gates + d_, gates + d2_, ct, d_); /* H_t = act_cell(C_t) * ogated */ - act_cell_d_->ComputeDeprecated(ct, gates + d2_); + act_cell_d_->Compute(ct, gates + d2_, d_); vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } void ComputeC1H1(T* gates, T* ct, T* ht, const T* wp_data) const override { /* C_t = igated * cgated*/ - act_gate_d_->ComputeDeprecated(gates + d_, gates + d_); - act_cand_d_->ComputeDeprecated(gates, gates); + act_gate_d_->Compute(gates + d_, gates + d_, d_); + act_cand_d_->Compute(gates, gates, d_); vmul_d_->Compute(gates, gates + d_, ct, d_); /* H_t = act_cell(C_t) * ogated */ - act_gate_d_->ComputeDeprecated(gates + d3_, gates + d3_); - act_cell_d_->ComputeDeprecated(ct, gates + d2_); + act_gate_d_->Compute(gates + d3_, gates + d3_, d_); + act_cell_d_->Compute(ct, gates + d2_, d_); vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } @@ -292,32 +292,32 @@ class PeepholeKernelImpl : public LSTMKernel { vmul_d_->Compute(wp_data, ct_1, checked, d_); vmul_d_->Compute(wp_data + d_, ct_1, checked + d_, d_); vadd_d2_->Compute(checked, gates + d_, gates + d_, d2_); - act_gate_d2_->ComputeDeprecated(gates + d_, gates + d_); + act_gate_d2_->Compute(gates + d_, gates + d_, d2_); /* C_t = C_t-1 * fgated + cand_gated * igated*/ - act_cand_d_->ComputeDeprecated(gates, gates); + act_cand_d_->Compute(gates, gates, d_); vmul_d_->Compute(gates, gates + d_, gates + d_, d_); vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_); vadd_d_->Compute(gates + d_, gates + d2_, ct, d_); /* get ogated*/ vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_); vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_, d_); - act_gate_d_->ComputeDeprecated(gates + d3_, gates + d3_); + act_gate_d_->Compute(gates + d3_, gates + d3_, d_); /* H_t = act_cell(C_t) * ogated */ - act_cell_d_->ComputeDeprecated(ct, gates + d2_); + act_cell_d_->Compute(ct, gates + d2_, d_); vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } void ComputeC1H1(T* gates, T* ct, T* ht, const T* wp_data) const override { /* C_t = igated * cgated*/ - act_gate_d_->ComputeDeprecated(gates + d_, gates + d_); - act_cand_d_->ComputeDeprecated(gates, gates); + act_gate_d_->Compute(gates + d_, gates + d_, d_); + act_cand_d_->Compute(gates, gates, d_); vmul_d_->Compute(gates, gates + d_, ct, d_); /* get outgated, put W_oc * C_t on igated */ vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_); vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_, d_); /* H_t = act_cell(C_t) * ogated */ - act_gate_d_->ComputeDeprecated(gates + d3_, gates + d3_); - act_cell_d_->ComputeDeprecated(ct, gates + d2_); + act_gate_d_->Compute(gates + d3_, gates + d3_, d_); + act_cell_d_->Compute(ct, gates + d2_, d_); vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } @@ -376,20 +376,20 @@ class GRUKernelImpl : public GRUKernel { } void ComputeH1(T* gates, T* ht) const override { - act_gate_d_->ComputeDeprecated(gates, gates); - act_state_d_->ComputeDeprecated(gates + d2_, gates + d2_); + act_gate_d_->Compute(gates, gates, d_); + act_state_d_->Compute(gates + d2_, gates + d2_, d_); vmul_d_->Compute(gates, gates + d2_, ht, d_); } void ComputeHtPart1(T* gates, const T* ht_1, T* ht) const override { // W: {W_update, W_reset; W_state} - act_gate_d2_->ComputeDeprecated(gates, gates); + act_gate_d2_->Compute(gates, gates, d2_); vmul_d_->Compute(ht_1, gates + d_, ht, d_); } void ComputeHtPart2(T* gates, const T* ht_1, T* ht) const override { T* y = gates + d2_; - act_state_d_->ComputeDeprecated(y, y); + act_state_d_->Compute(y, y, d_); // out = zt*ht~ + (1-zt)*ht_1 for (int i = 0; i < d_; ++i) { ht[i] = gates[i] * y[i] + (static_cast(1) - gates[i]) * ht_1[i]; diff --git a/paddle/fluid/operators/math/jit_kernel_test.cc b/paddle/fluid/operators/math/jit_kernel_test.cc index 5e1f91ffae03796be2817d0461900c2512938c77..b6c62a26348cdc20582cf7465f93026402051587 100644 --- a/paddle/fluid/operators/math/jit_kernel_test.cc +++ b/paddle/fluid/operators/math/jit_kernel_test.cc @@ -33,6 +33,9 @@ limitations under the License. */ constexpr int repeat = 20000; +// TODO(TJ): benchmark and test should be seperated, +// benchmark should verify more sizes + inline double GetCurrentUS() { struct timeval time; gettimeofday(&time, NULL); @@ -66,7 +69,7 @@ void vrelu_intri8(const int n, const float* x, float* y) { TEST(JitKernel, vrelu) { namespace jit = paddle::operators::math::jitkernel; - for (int d : {7, 8, 15, 16, 30, 256, 512}) { + for (int d : {3, 7, 8, 15, 16, 30, 256, 512}) { std::vector x(d); std::vector zref(d), ztgt(d); RandomVec(d, x.data(), -10.f, 1.f); @@ -156,7 +159,7 @@ void vexp_mkl(const int n, const float* x, float* y) { TEST(JitKernel, vexp) { namespace jit = paddle::operators::math::jitkernel; - for (int d : {7, 8, 15, 16, 30, 128, 256}) { + for (int d : {1, 3, 4, 6, 7, 8, 12, 15, 16, 20, 30, 128, 256}) { std::vector x(d); std::vector zref(d), ztgt(d); RandomVec(d, x.data(), -2.f, 2.f); @@ -181,7 +184,8 @@ TEST(JitKernel, vexp) { auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->ComputeDeprecated(x_data, ztgt_data); + // ker->Compute(x_data, ztgt_data); + ker->Compute(x_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); @@ -222,7 +226,7 @@ void vsigmoid_better( y[i] = (x[i] < min) ? min : ((x[i] > max) ? max : x[i]); y[i] = 0.f - y[i]; } - vexp->ComputeDeprecated(y, y); + vexp->Compute(y, y, n); for (int i = 0; i < n; ++i) { y[i] = 1.f / (1.f + y[i]); } @@ -230,7 +234,7 @@ void vsigmoid_better( TEST(JitKernel, vsigmoid) { namespace jit = paddle::operators::math::jitkernel; - for (int d : {7, 8, 15, 16, 30, 32, 64, 100, 128, 256}) { + for (int d : {1, 3, 4, 6, 7, 8, 15, 16, 30, 32, 64, 100, 128, 256}) { std::vector x(d); std::vector zref(d), ztgt(d); RandomVec(d, x.data(), -2.f, 2.f); @@ -253,7 +257,7 @@ TEST(JitKernel, vsigmoid) { auto trefe = GetCurrentUS(); auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->ComputeDeprecated(x_data, ztgt_data); + ker->Compute(x_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); @@ -287,14 +291,14 @@ void vtanh_better( const int n, const float* x, float* y) { const float a = 2.f, b = -1.f; vscal->Compute(&a, x, y, n); - vsigmoid->ComputeDeprecated(y, y); + vsigmoid->Compute(y, y, n); vscal->Compute(&a, y, y, n); vaddbias->Compute(&b, y, y, n); } TEST(JitKernel, vtanh) { namespace jit = paddle::operators::math::jitkernel; - for (int d : {7, 8, 15, 16, 30, 32, 64, 100, 128, 256}) { + for (int d : {1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 30, 32, 64, 100, 128, 256}) { std::vector x(d); std::vector zref(d), ztgt(d); RandomVec(d, x.data(), -2.f, 2.f); @@ -321,7 +325,7 @@ TEST(JitKernel, vtanh) { auto trefe = GetCurrentUS(); auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->ComputeDeprecated(x_data, ztgt_data); + ker->Compute(x_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); @@ -344,8 +348,8 @@ void lstm_ctht_ref( const std::shared_ptr< const paddle::operators::math::jitkernel::VExpKernel>& vexp_1, const int d, float* gates, const float* ct_1, float* ct, float* ht) { - vsigmoid_3d->ComputeDeprecated(gates + d, gates + d); - vtanh_d->ComputeDeprecated(gates, gates); + vsigmoid_3d->Compute(gates + d, gates + d, 3 * d); + vtanh_d->Compute(gates, gates, d); const float *i = gates + d, *f = gates + d * 2, *o = gates + d * 3; const float min = SIGMOID_THRESHOLD_MIN; const float max = SIGMOID_THRESHOLD_MAX; @@ -355,7 +359,7 @@ void lstm_ctht_ref( // H_t = act_cell(C_t) * ogated float tmp = ct[k] * 2; tmp = 0.f - ((tmp < min) ? min : ((tmp > max) ? max : tmp)); - vexp_1->ComputeDeprecated(&tmp, &tmp); + vexp_1->Compute(&tmp, &tmp, 1); tmp = 2.f / (1.f + tmp) - 1.f; ht[k] = tmp * o[k]; } @@ -373,19 +377,19 @@ void lstm_ctht_better( const paddle::operators::math::jitkernel::VAddKernel>& vadd_d, const int d, float* gates, const float* ct_1, float* ct, float* ht) { int d2 = d * 2; - vsigmoid_3d->ComputeDeprecated(gates + d, gates + d); - vtanh_d->ComputeDeprecated(gates, gates); + vsigmoid_3d->Compute(gates + d, gates + d, 3 * d); + vtanh_d->Compute(gates, gates, d); vmul_d->Compute(gates, gates + d, gates + d, d); vmul_d->Compute(ct_1, gates + d2, gates + d2, d); vadd_d->Compute(gates + d, gates + d2, ct, d); /* H_t = act_cell(C_t) * ogated */ - vtanh_d->ComputeDeprecated(ct, gates + d2); + vtanh_d->Compute(ct, gates + d2, d); vmul_d->Compute(gates + d2, gates + d * 3, ht, d); } TEST(JitKernel, lstm) { namespace jit = paddle::operators::math::jitkernel; - for (int d : {7, 8, 15, 16, 30, 32, 64, 100}) { + for (int d : {1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 30, 32, 64, 100}) { int d4 = d * 4; int d3 = d * 3; std::vector x(d4), xref(d4); @@ -736,7 +740,7 @@ void vaddrelu_better( const paddle::operators::math::jitkernel::VReluKernel>& vrelu, const float* x, const float* y, float* z, int d) { vadd->Compute(x, y, z, d); - vrelu->ComputeDeprecated(z, z); + vrelu->Compute(z, z, d); } TEST(JitKernel, vaddrelu) { @@ -758,7 +762,7 @@ TEST(JitKernel, vaddrelu) { float* zref_data = zref.data(); auto trefs = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - vadd_ref(d, x_data, y_data, zref_data); + vaddrelu_ref(d, x_data, y_data, zref_data); } auto trefe = GetCurrentUS(); auto tmkls = GetCurrentUS(); diff --git a/paddle/fluid/operators/math/selected_rows_functor_test.cu b/paddle/fluid/operators/math/selected_rows_functor_test.cu.cc similarity index 99% rename from paddle/fluid/operators/math/selected_rows_functor_test.cu rename to paddle/fluid/operators/math/selected_rows_functor_test.cu.cc index 17af3e3999ca688c584f636f4c00386f886f9bbf..73d83fa2e43f14445c969648cd469b0e32d644c7 100644 --- a/paddle/fluid/operators/math/selected_rows_functor_test.cu +++ b/paddle/fluid/operators/math/selected_rows_functor_test.cu.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/operators/math/selected_rows_functor.h" #include #include "gtest/gtest.h" #include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/operators/math/selected_rows_functor.h" TEST(selected_rows_functor, gpu_add) { paddle::platform::CUDAPlace gpu_place(0); @@ -38,6 +38,7 @@ TEST(selected_rows_functor, gpu_add) { {static_cast(rows1.size()), row_numel}), gpu_place); functor(ctx, in1_value, 1.0); + PADDLE_ENFORCE(cudaDeviceSynchronize()); std::vector rows2{0, 5, 7, 9}; std::unique_ptr selected_rows2{ diff --git a/paddle/fluid/operators/math/softmax.h b/paddle/fluid/operators/math/softmax.h index bf698dc2f753f0002557af07ad7ea976c85edada..089458e957dfaac1cbc3bf1bc2b4be4877e702c9 100644 --- a/paddle/fluid/operators/math/softmax.h +++ b/paddle/fluid/operators/math/softmax.h @@ -19,7 +19,8 @@ namespace paddle { namespace operators { namespace math { -template +template class SoftmaxFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor* X, diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index 7cf98f27251db3cfe5e8e295ed21056f6e5a2963..0f3e5b20086378da8ef1138a5f5c005b724f7fa2 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/math/blas.h" namespace paddle { namespace operators { namespace math { @@ -32,8 +33,8 @@ struct ValueClip { } }; -template -void SoftmaxFunctor::operator()( +template +void SoftmaxFunctor::operator()( const DeviceContext& context, const framework::Tensor* X, framework::Tensor* Y) { auto logits = EigenMatrix::From(*X); @@ -65,36 +66,46 @@ void SoftmaxFunctor::operator()( .broadcast(one_by_class)); } -template -class SoftmaxFunctor { +template +using enable_if_CPU = typename std::enable_if< + std::is_same::value>::type; + +template +class SoftmaxFunctor> { void operator()(const DeviceContext& context, const framework::Tensor* X, framework::Tensor* Y) { - auto logits = EigenMatrix::From(*X); - auto softmax = EigenMatrix::From(*Y); - + auto in_dims = X->dims(); + auto out_dims = Y->dims(); + const float* in_data = X->data(); + float* out_data = Y->data(); const int kBatchDim = 0; const int kClassDim = 1; - - const int batch_size = logits.dimension(kBatchDim); - const int num_classes = logits.dimension(kClassDim); - - Eigen::DSizes along_class(kClassDim); - Eigen::DSizes batch_by_one(batch_size, 1); - Eigen::DSizes one_by_class(1, num_classes); - - auto shifted_logits = (logits - - logits.maximum(along_class) - .eval() - .reshape(batch_by_one) - .broadcast(one_by_class)); - - softmax.device(*context.eigen_device()) = shifted_logits.exp(); - softmax.device(*context.eigen_device()) = (softmax * - softmax.sum(along_class) - .inverse() - .eval() - .reshape(batch_by_one) - .broadcast(one_by_class)); + // 2D data. Batch x C + const int batch_size = in_dims[kBatchDim]; + const int num_classes = in_dims[kClassDim]; + std::vector entities(batch_size); + auto blas = math::GetBlas(context); + for (int n = 0; n < batch_size; ++n) { + entities[n] = in_data[n * num_classes]; + for (int c = 1; c < num_classes; ++c) { + entities[n] = in_data[n * num_classes + c] > entities[n] + ? in_data[n * num_classes + c] + : entities[n]; + } + for (int c = 0; c < num_classes; ++c) { + out_data[n * num_classes + c] = + in_data[n * num_classes + c] - entities[n]; + } + } + + blas.VEXP(num_classes * batch_size, out_data, out_data); + for (int n = 0; n < batch_size; ++n) { + entities[n] = out_data[n * num_classes]; + for (int c = 1; c < num_classes; ++c) { + entities[n] += out_data[n * num_classes + c]; + } + blas.SCAL(num_classes, 1.0f / entities[n], &out_data[n * num_classes]); + } } }; diff --git a/paddle/fluid/operators/metrics/CMakeLists.txt b/paddle/fluid/operators/metrics/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d468316e8eacb73c4a4ce81c784880bb5e46c2d --- /dev/null +++ b/paddle/fluid/operators/metrics/CMakeLists.txt @@ -0,0 +1,2 @@ +include(operators) +register_operators() diff --git a/paddle/fluid/operators/accuracy_op.cc b/paddle/fluid/operators/metrics/accuracy_op.cc similarity index 98% rename from paddle/fluid/operators/accuracy_op.cc rename to paddle/fluid/operators/metrics/accuracy_op.cc index 42fcace17926641b5caf677eb3c8ba5222e37190..95aa76bc6947c9c39e56d39031c5184dc262acd0 100644 --- a/paddle/fluid/operators/accuracy_op.cc +++ b/paddle/fluid/operators/metrics/accuracy_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/accuracy_op.h" +#include "paddle/fluid/operators/metrics/accuracy_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/accuracy_op.cu b/paddle/fluid/operators/metrics/accuracy_op.cu similarity index 98% rename from paddle/fluid/operators/accuracy_op.cu rename to paddle/fluid/operators/metrics/accuracy_op.cu index 23b48c6fdf427348879de07c671c65327d6436d7..b255d2a7c413b4f965f6b874d342dcb93c7b5e66 100644 --- a/paddle/fluid/operators/accuracy_op.cu +++ b/paddle/fluid/operators/metrics/accuracy_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include #include -#include "paddle/fluid/operators/accuracy_op.h" +#include "paddle/fluid/operators/metrics/accuracy_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_info.h" diff --git a/paddle/fluid/operators/accuracy_op.h b/paddle/fluid/operators/metrics/accuracy_op.h similarity index 100% rename from paddle/fluid/operators/accuracy_op.h rename to paddle/fluid/operators/metrics/accuracy_op.h diff --git a/paddle/fluid/operators/auc_op.cc b/paddle/fluid/operators/metrics/auc_op.cc similarity index 98% rename from paddle/fluid/operators/auc_op.cc rename to paddle/fluid/operators/metrics/auc_op.cc index cb98bc514083ad113fdebfbac043a9516fd9435a..335d4fded4a9543dabf984f7ed9c342b46dd04f0 100644 --- a/paddle/fluid/operators/auc_op.cc +++ b/paddle/fluid/operators/metrics/auc_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/auc_op.h" +#include "paddle/fluid/operators/metrics/auc_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/auc_op.h b/paddle/fluid/operators/metrics/auc_op.h similarity index 100% rename from paddle/fluid/operators/auc_op.h rename to paddle/fluid/operators/metrics/auc_op.h diff --git a/paddle/fluid/operators/precision_recall_op.cc b/paddle/fluid/operators/metrics/precision_recall_op.cc similarity index 99% rename from paddle/fluid/operators/precision_recall_op.cc rename to paddle/fluid/operators/metrics/precision_recall_op.cc index e7ce16f33fb5052ffb41fc05bd1538e2f0dc35be..0d733c47dd2fcaad776d8d4e6467ecd1872bce05 100644 --- a/paddle/fluid/operators/precision_recall_op.cc +++ b/paddle/fluid/operators/metrics/precision_recall_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/precision_recall_op.h" +#include "paddle/fluid/operators/metrics/precision_recall_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/precision_recall_op.h b/paddle/fluid/operators/metrics/precision_recall_op.h similarity index 100% rename from paddle/fluid/operators/precision_recall_op.h rename to paddle/fluid/operators/metrics/precision_recall_op.h diff --git a/paddle/fluid/operators/nccl/CMakeLists.txt b/paddle/fluid/operators/nccl/CMakeLists.txt index cdcba8035762d8f442eb8b8ed52a4e3e99ac31b6..9b26e19cc7ed05038e05308f9277b200a885dc10 100644 --- a/paddle/fluid/operators/nccl/CMakeLists.txt +++ b/paddle/fluid/operators/nccl/CMakeLists.txt @@ -1,3 +1,13 @@ if(WITH_GPU AND NOT WIN32) nv_library(nccl_common SRCS nccl_gpu_common.cc DEPS device_context operator ) endif() + +if(WITH_GPU) + op_library(nccl_op DEPS nccl_common) + file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(ncclAllReduce);\n") + set(OPERATOR_DEPS ${OPERATOR_DEPS} nccl_common PARENT_SCOPE) +endif() + +if(NOT WIN32) + nv_test(nccl_op_test SRCS nccl_op_test.cu.cc DEPS nccl_op gpu_info device_context) +endif() diff --git a/paddle/fluid/operators/nccl_op.cc b/paddle/fluid/operators/nccl/nccl_op.cc similarity index 100% rename from paddle/fluid/operators/nccl_op.cc rename to paddle/fluid/operators/nccl/nccl_op.cc diff --git a/paddle/fluid/operators/nccl_op.cu.cc b/paddle/fluid/operators/nccl/nccl_op.cu.cc similarity index 100% rename from paddle/fluid/operators/nccl_op.cu.cc rename to paddle/fluid/operators/nccl/nccl_op.cu.cc diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl/nccl_op_test.cu.cc similarity index 100% rename from paddle/fluid/operators/nccl_op_test.cu.cc rename to paddle/fluid/operators/nccl/nccl_op_test.cu.cc diff --git a/paddle/fluid/operators/optimizers/CMakeLists.txt b/paddle/fluid/operators/optimizers/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d468316e8eacb73c4a4ce81c784880bb5e46c2d --- /dev/null +++ b/paddle/fluid/operators/optimizers/CMakeLists.txt @@ -0,0 +1,2 @@ +include(operators) +register_operators() diff --git a/paddle/fluid/operators/adadelta_op.cc b/paddle/fluid/operators/optimizers/adadelta_op.cc similarity index 98% rename from paddle/fluid/operators/adadelta_op.cc rename to paddle/fluid/operators/optimizers/adadelta_op.cc index 89a7a49e0fa8427826f5d91274912a68f2316b61..9039d02b673b3403c840492c088179b30e23da9c 100644 --- a/paddle/fluid/operators/adadelta_op.cc +++ b/paddle/fluid/operators/optimizers/adadelta_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/adadelta_op.h" +#include "paddle/fluid/operators/optimizers/adadelta_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adadelta_op.cu b/paddle/fluid/operators/optimizers/adadelta_op.cu similarity index 93% rename from paddle/fluid/operators/adadelta_op.cu rename to paddle/fluid/operators/optimizers/adadelta_op.cu index fc10c6657476e7f87b2f703a1d0cb88eeebc35cf..3fbfee5df05770a1206ab3170d3baffdd20bc77b 100644 --- a/paddle/fluid/operators/adadelta_op.cu +++ b/paddle/fluid/operators/optimizers/adadelta_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/adadelta_op.h" +#include "paddle/fluid/operators/optimizers/adadelta_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/adadelta_op.h b/paddle/fluid/operators/optimizers/adadelta_op.h similarity index 100% rename from paddle/fluid/operators/adadelta_op.h rename to paddle/fluid/operators/optimizers/adadelta_op.h diff --git a/paddle/fluid/operators/adagrad_op.cc b/paddle/fluid/operators/optimizers/adagrad_op.cc similarity index 99% rename from paddle/fluid/operators/adagrad_op.cc rename to paddle/fluid/operators/optimizers/adagrad_op.cc index c88297ff544ddb0e5a97452a8ad2e8f9f77825ba..e8d5a9e2c875570a198629bd745c9d58036746cb 100644 --- a/paddle/fluid/operators/adagrad_op.cc +++ b/paddle/fluid/operators/optimizers/adagrad_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/adagrad_op.h" +#include "paddle/fluid/operators/optimizers/adagrad_op.h" #include #include diff --git a/paddle/fluid/operators/adagrad_op.cu b/paddle/fluid/operators/optimizers/adagrad_op.cu similarity index 98% rename from paddle/fluid/operators/adagrad_op.cu rename to paddle/fluid/operators/optimizers/adagrad_op.cu index b99b33343d36fbb7f6b1a2928e142ca615b238b3..4efe56855a4bdca41d24f02c29a618a8d4232887 100644 --- a/paddle/fluid/operators/adagrad_op.cu +++ b/paddle/fluid/operators/optimizers/adagrad_op.cu @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/adagrad_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" +#include "paddle/fluid/operators/optimizers/adagrad_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { diff --git a/paddle/fluid/operators/adagrad_op.h b/paddle/fluid/operators/optimizers/adagrad_op.h similarity index 100% rename from paddle/fluid/operators/adagrad_op.h rename to paddle/fluid/operators/optimizers/adagrad_op.h diff --git a/paddle/fluid/operators/adam_op.cc b/paddle/fluid/operators/optimizers/adam_op.cc similarity index 99% rename from paddle/fluid/operators/adam_op.cc rename to paddle/fluid/operators/optimizers/adam_op.cc index f3717af630017eba18aa265f3dbb496e18280a57..5710cda39acce53e35dfceec675fcd4979a84e31 100644 --- a/paddle/fluid/operators/adam_op.cc +++ b/paddle/fluid/operators/optimizers/adam_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/adam_op.h" +#include "paddle/fluid/operators/optimizers/adam_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adam_op.cu b/paddle/fluid/operators/optimizers/adam_op.cu similarity index 93% rename from paddle/fluid/operators/adam_op.cu rename to paddle/fluid/operators/optimizers/adam_op.cu index 77f1991002e6007e8b8dff4746739a90e836145d..e8090ebacfe85153aba9e275c9cd1c55fd7af15e 100644 --- a/paddle/fluid/operators/adam_op.cu +++ b/paddle/fluid/operators/optimizers/adam_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/adam_op.h" +#include "paddle/fluid/operators/optimizers/adam_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/adam_op.h b/paddle/fluid/operators/optimizers/adam_op.h similarity index 100% rename from paddle/fluid/operators/adam_op.h rename to paddle/fluid/operators/optimizers/adam_op.h diff --git a/paddle/fluid/operators/adamax_op.cc b/paddle/fluid/operators/optimizers/adamax_op.cc similarity index 99% rename from paddle/fluid/operators/adamax_op.cc rename to paddle/fluid/operators/optimizers/adamax_op.cc index d4aa4d338a2379adf985ba7f89b528bc402eda06..4b244a76dc0ebee65b7c95db2d2754ebae03bbac 100644 --- a/paddle/fluid/operators/adamax_op.cc +++ b/paddle/fluid/operators/optimizers/adamax_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/adamax_op.h" +#include "paddle/fluid/operators/optimizers/adamax_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adamax_op.cu b/paddle/fluid/operators/optimizers/adamax_op.cu similarity index 93% rename from paddle/fluid/operators/adamax_op.cu rename to paddle/fluid/operators/optimizers/adamax_op.cu index 05cafd7a8eef79588d1d5724084586cb9b51d3d4..e54adcb142fe0d50dad23fe5df14bd6f28220d8a 100644 --- a/paddle/fluid/operators/adamax_op.cu +++ b/paddle/fluid/operators/optimizers/adamax_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/adamax_op.h" +#include "paddle/fluid/operators/optimizers/adamax_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/adamax_op.h b/paddle/fluid/operators/optimizers/adamax_op.h similarity index 100% rename from paddle/fluid/operators/adamax_op.h rename to paddle/fluid/operators/optimizers/adamax_op.h diff --git a/paddle/fluid/operators/decayed_adagrad_op.cc b/paddle/fluid/operators/optimizers/decayed_adagrad_op.cc similarity index 98% rename from paddle/fluid/operators/decayed_adagrad_op.cc rename to paddle/fluid/operators/optimizers/decayed_adagrad_op.cc index d73ae9e2721b388212cb6efa354eb4b480df9cad..80278441c07203b03dbcff157193ea5976eefbf1 100644 --- a/paddle/fluid/operators/decayed_adagrad_op.cc +++ b/paddle/fluid/operators/optimizers/decayed_adagrad_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/decayed_adagrad_op.h" +#include "paddle/fluid/operators/optimizers/decayed_adagrad_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/decayed_adagrad_op.cu b/paddle/fluid/operators/optimizers/decayed_adagrad_op.cu similarity index 92% rename from paddle/fluid/operators/decayed_adagrad_op.cu rename to paddle/fluid/operators/optimizers/decayed_adagrad_op.cu index 7da16acf05eefc21cbe3dd0540dcbf69022431de..84d65e39329659f82099011f9ec60468d5db6328 100644 --- a/paddle/fluid/operators/decayed_adagrad_op.cu +++ b/paddle/fluid/operators/optimizers/decayed_adagrad_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/decayed_adagrad_op.h" +#include "paddle/fluid/operators/optimizers/decayed_adagrad_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/decayed_adagrad_op.h b/paddle/fluid/operators/optimizers/decayed_adagrad_op.h similarity index 100% rename from paddle/fluid/operators/decayed_adagrad_op.h rename to paddle/fluid/operators/optimizers/decayed_adagrad_op.h diff --git a/paddle/fluid/operators/ftrl_op.cc b/paddle/fluid/operators/optimizers/ftrl_op.cc similarity index 99% rename from paddle/fluid/operators/ftrl_op.cc rename to paddle/fluid/operators/optimizers/ftrl_op.cc index b77e12d6508eb07ae137b313ca91eac951afbcbe..1c9e91d9b610669def6d6d52e4753714745d1c0f 100644 --- a/paddle/fluid/operators/ftrl_op.cc +++ b/paddle/fluid/operators/optimizers/ftrl_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/ftrl_op.h" +#include "paddle/fluid/operators/optimizers/ftrl_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/ftrl_op.cu b/paddle/fluid/operators/optimizers/ftrl_op.cu similarity index 93% rename from paddle/fluid/operators/ftrl_op.cu rename to paddle/fluid/operators/optimizers/ftrl_op.cu index e7371c80da1d1cbb39247b50d8c6537ee8e948f8..f836b75df93861a0fd670f2a0e786e6a797a4661 100644 --- a/paddle/fluid/operators/ftrl_op.cu +++ b/paddle/fluid/operators/optimizers/ftrl_op.cu @@ -12,7 +12,7 @@ CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/ftrl_op.h" +#include "paddle/fluid/operators/optimizers/ftrl_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/ftrl_op.h b/paddle/fluid/operators/optimizers/ftrl_op.h similarity index 100% rename from paddle/fluid/operators/ftrl_op.h rename to paddle/fluid/operators/optimizers/ftrl_op.h diff --git a/paddle/fluid/operators/lars_momentum_op.cc b/paddle/fluid/operators/optimizers/lars_momentum_op.cc similarity index 96% rename from paddle/fluid/operators/lars_momentum_op.cc rename to paddle/fluid/operators/optimizers/lars_momentum_op.cc index a8dda93902448fa1bd21b719ffd9c9b500caf755..574a03680b66962ac2d6ba249d0fc491a36794cd 100644 --- a/paddle/fluid/operators/lars_momentum_op.cc +++ b/paddle/fluid/operators/optimizers/lars_momentum_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/lars_momentum_op.h" -#include "paddle/fluid/operators/momentum_op.h" +#include "paddle/fluid/operators/optimizers/lars_momentum_op.h" +#include "paddle/fluid/operators/optimizers/momentum_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lars_momentum_op.cu b/paddle/fluid/operators/optimizers/lars_momentum_op.cu similarity index 98% rename from paddle/fluid/operators/lars_momentum_op.cu rename to paddle/fluid/operators/optimizers/lars_momentum_op.cu index eb346851a2f690fa05422c84ddcb08307539048f..a277d6ff2bea917addac8c6ea4b24b63dcbc8dba 100644 --- a/paddle/fluid/operators/lars_momentum_op.cu +++ b/paddle/fluid/operators/optimizers/lars_momentum_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/lars_momentum_op.h" +#include "paddle/fluid/operators/optimizers/lars_momentum_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lars_momentum_op.h b/paddle/fluid/operators/optimizers/lars_momentum_op.h similarity index 100% rename from paddle/fluid/operators/lars_momentum_op.h rename to paddle/fluid/operators/optimizers/lars_momentum_op.h diff --git a/paddle/fluid/operators/momentum_op.cc b/paddle/fluid/operators/optimizers/momentum_op.cc similarity index 98% rename from paddle/fluid/operators/momentum_op.cc rename to paddle/fluid/operators/optimizers/momentum_op.cc index 7f0b51580aa2591ac7338ad7c29ee4756d909925..cde238c076b6991eb52dac328c3e30a045420c92 100644 --- a/paddle/fluid/operators/momentum_op.cc +++ b/paddle/fluid/operators/optimizers/momentum_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/momentum_op.h" +#include "paddle/fluid/operators/optimizers/momentum_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/momentum_op.cu b/paddle/fluid/operators/optimizers/momentum_op.cu similarity index 93% rename from paddle/fluid/operators/momentum_op.cu rename to paddle/fluid/operators/optimizers/momentum_op.cu index b68fec34d43f0dee834f1045f192d5c6089d9356..8ce739de8dfd74cb43f9521bf39e3127a8a21925 100644 --- a/paddle/fluid/operators/momentum_op.cu +++ b/paddle/fluid/operators/optimizers/momentum_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/momentum_op.h" +#include "paddle/fluid/operators/optimizers/momentum_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/momentum_op.h b/paddle/fluid/operators/optimizers/momentum_op.h similarity index 100% rename from paddle/fluid/operators/momentum_op.h rename to paddle/fluid/operators/optimizers/momentum_op.h diff --git a/paddle/fluid/operators/proximal_adagrad_op.cc b/paddle/fluid/operators/optimizers/proximal_adagrad_op.cc similarity index 98% rename from paddle/fluid/operators/proximal_adagrad_op.cc rename to paddle/fluid/operators/optimizers/proximal_adagrad_op.cc index 8d8075d76111928ec9855eb0b70fe6dbd90a979b..7b07b3b7071cb39e4e81cb4612372eec96efe489 100644 --- a/paddle/fluid/operators/proximal_adagrad_op.cc +++ b/paddle/fluid/operators/optimizers/proximal_adagrad_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/proximal_adagrad_op.h" +#include "paddle/fluid/operators/optimizers/proximal_adagrad_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/proximal_adagrad_op.cu b/paddle/fluid/operators/optimizers/proximal_adagrad_op.cu similarity index 92% rename from paddle/fluid/operators/proximal_adagrad_op.cu rename to paddle/fluid/operators/optimizers/proximal_adagrad_op.cu index 7e0226c62bfd5d4804cc70c00391237deec33ebb..d1c1f747b70c3ceb806da06e6786a70b62a32995 100644 --- a/paddle/fluid/operators/proximal_adagrad_op.cu +++ b/paddle/fluid/operators/optimizers/proximal_adagrad_op.cu @@ -12,7 +12,7 @@ CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/proximal_adagrad_op.h" +#include "paddle/fluid/operators/optimizers/proximal_adagrad_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/proximal_adagrad_op.h b/paddle/fluid/operators/optimizers/proximal_adagrad_op.h similarity index 100% rename from paddle/fluid/operators/proximal_adagrad_op.h rename to paddle/fluid/operators/optimizers/proximal_adagrad_op.h diff --git a/paddle/fluid/operators/proximal_gd_op.cc b/paddle/fluid/operators/optimizers/proximal_gd_op.cc similarity index 98% rename from paddle/fluid/operators/proximal_gd_op.cc rename to paddle/fluid/operators/optimizers/proximal_gd_op.cc index baf9cbcba2ed89f62afc9816e0ab9e0f112e6008..dcef4f7be249e04306732213a7c6209d32602048 100644 --- a/paddle/fluid/operators/proximal_gd_op.cc +++ b/paddle/fluid/operators/optimizers/proximal_gd_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/proximal_gd_op.h" +#include "paddle/fluid/operators/optimizers/proximal_gd_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/proximal_gd_op.cu b/paddle/fluid/operators/optimizers/proximal_gd_op.cu similarity index 92% rename from paddle/fluid/operators/proximal_gd_op.cu rename to paddle/fluid/operators/optimizers/proximal_gd_op.cu index 32ee9ab74cd58fd6f48b6c34e108f31315adaf71..7aa0e1015008eba0c1cf63ba1278dc2b8049b20b 100644 --- a/paddle/fluid/operators/proximal_gd_op.cu +++ b/paddle/fluid/operators/optimizers/proximal_gd_op.cu @@ -12,7 +12,7 @@ CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/proximal_gd_op.h" +#include "paddle/fluid/operators/optimizers/proximal_gd_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/proximal_gd_op.h b/paddle/fluid/operators/optimizers/proximal_gd_op.h similarity index 100% rename from paddle/fluid/operators/proximal_gd_op.h rename to paddle/fluid/operators/optimizers/proximal_gd_op.h diff --git a/paddle/fluid/operators/rmsprop_op.cc b/paddle/fluid/operators/optimizers/rmsprop_op.cc similarity index 99% rename from paddle/fluid/operators/rmsprop_op.cc rename to paddle/fluid/operators/optimizers/rmsprop_op.cc index f06f87e61d3a4d1fc8b864b9dd84e697fb12a006..99d1156ee6d5fc88161e25bfa581a265707e6f92 100644 --- a/paddle/fluid/operators/rmsprop_op.cc +++ b/paddle/fluid/operators/optimizers/rmsprop_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/rmsprop_op.h" +#include "paddle/fluid/operators/optimizers/rmsprop_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/rmsprop_op.cu b/paddle/fluid/operators/optimizers/rmsprop_op.cu similarity index 92% rename from paddle/fluid/operators/rmsprop_op.cu rename to paddle/fluid/operators/optimizers/rmsprop_op.cu index cdc473769598be5aac87a14613d9acdd5c1a1204..69e35a309e04f61068d9ff1b6d9f1450d2524253 100644 --- a/paddle/fluid/operators/rmsprop_op.cu +++ b/paddle/fluid/operators/optimizers/rmsprop_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/rmsprop_op.h" +#include "paddle/fluid/operators/optimizers/rmsprop_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/rmsprop_op.h b/paddle/fluid/operators/optimizers/rmsprop_op.h similarity index 100% rename from paddle/fluid/operators/rmsprop_op.h rename to paddle/fluid/operators/optimizers/rmsprop_op.h diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/optimizers/sgd_op.cc similarity index 98% rename from paddle/fluid/operators/sgd_op.cc rename to paddle/fluid/operators/optimizers/sgd_op.cc index ea62acd08c5009556abf05c91726111870d1a462..690381a67f89d18fe81c3b856b7ddce25d496ed0 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/optimizers/sgd_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sgd_op.h" +#include "paddle/fluid/operators/optimizers/sgd_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sgd_op.cu b/paddle/fluid/operators/optimizers/sgd_op.cu similarity index 98% rename from paddle/fluid/operators/sgd_op.cu rename to paddle/fluid/operators/optimizers/sgd_op.cu index d3f4eba3b24ec1ac0328ef270256cdf3abe499db..a9d303d55d8f681fe3a014db36ede5ef6b2742bd 100644 --- a/paddle/fluid/operators/sgd_op.cu +++ b/paddle/fluid/operators/optimizers/sgd_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/fluid/operators/sgd_op.h" +#include "paddle/fluid/operators/optimizers/sgd_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { diff --git a/paddle/fluid/operators/sgd_op.h b/paddle/fluid/operators/optimizers/sgd_op.h similarity index 100% rename from paddle/fluid/operators/sgd_op.h rename to paddle/fluid/operators/optimizers/sgd_op.h diff --git a/paddle/fluid/operators/prelu_op.h b/paddle/fluid/operators/prelu_op.h index 12f1525594ecf0887618616ffe563bd2bda32496..594f1cb3abe49c61ad7c490ebcd100a5c9ea6fb9 100644 --- a/paddle/fluid/operators/prelu_op.h +++ b/paddle/fluid/operators/prelu_op.h @@ -32,7 +32,7 @@ class PReluKernel : public framework::OpKernel { T* o_ptr = out->mutable_data(context.GetPlace()); const T* alpha_ptr = alpha->data(); - std::string mode = context.Attr("mode"); + auto& mode = context.Attr("mode"); int numel = x->numel(); auto dim = x->dims(); @@ -99,6 +99,8 @@ class PReluGradKernel : public framework::OpKernel { index = 0; if (dalpha) { T* dalpha_ptr = dalpha->mutable_data(context.GetPlace()); + memset(dalpha_ptr, 0, sizeof(T) * dalpha->numel()); + if (mode == "channel") { for (i = 0; i < numel; i++) { temp = numel / (dim[0] * dim[1]); diff --git a/paddle/fluid/operators/reader/CMakeLists.txt b/paddle/fluid/operators/reader/CMakeLists.txt index 728197377df04df8c993a48bc282431473fe9959..6c919ee1782ebce6d56f7530daa9b748dfb26c47 100644 --- a/paddle/fluid/operators/reader/CMakeLists.txt +++ b/paddle/fluid/operators/reader/CMakeLists.txt @@ -1,3 +1,5 @@ +include(operators) + cc_library(reader_op_registry SRCS reader_op_registry.cc DEPS operator op_registry reader) set(LOCAL_READER_LIBS) @@ -28,4 +30,10 @@ reader_library(create_py_reader_op SRCS create_py_reader_op.cc) cc_test(reader_blocking_queue_test SRCS reader_blocking_queue_test.cc) # Export local libraries to parent -set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE) +# set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE) + +op_library(read_op) + +foreach(src ${LOCAL_READER_LIBS}) + set(OP_LIBRARY ${src} ${OP_LIBRARY} CACHE INTERNAL "op libs") +endforeach() diff --git a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc index a08a9dbd0da46e73082cdd24c019e8d210d8bcc4..d7a048257f92c1c58c34decf1a93ff95f5f736c7 100644 --- a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc +++ b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/operators/reader/reader_op_registry.h" +#include "paddle/fluid/platform/lock_guard_ptr.h" #include "paddle/fluid/recordio/scanner.h" namespace paddle { @@ -33,11 +34,7 @@ class RecordIOFileReader : public framework::FileReader { protected: void ReadNextImpl(std::vector* out) override { - std::unique_ptr> guard; - if (ThreadSafe) { - guard.reset(new std::lock_guard(*mutex_)); - } - + platform::LockGuardPtr guard(mutex_); bool ok = framework::ReadFromRecordIO(&scanner_, dev_ctx_, out); if (!ok) { out->clear(); diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/reader/read_op.cc similarity index 100% rename from paddle/fluid/operators/read_op.cc rename to paddle/fluid/operators/reader/read_op.cc diff --git a/paddle/fluid/operators/reduce_ops/CMakeLists.txt b/paddle/fluid/operators/reduce_ops/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5fe4d15ae2c6254a50318813c852b6c314880aba --- /dev/null +++ b/paddle/fluid/operators/reduce_ops/CMakeLists.txt @@ -0,0 +1,20 @@ +include(operators) +register_operators() + +if(WITH_GPU) + file(GLOB OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.part.cu") + string(REPLACE ".part.cu" "" OPS "${OPS}") + + foreach(src ${OPS}) + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${src}.part.cu) + set(CUDA_KERNEL_FILE ${CMAKE_CURRENT_SOURCE_DIR}/${src}.part.cu) + file(READ ${CUDA_KERNEL_FILE} TARGET_CONTENT) + string(REGEX MATCH "REGISTER_OP_CUDA_KERNEL\\(\\n?([^,]+),.*" MATCHED ${TARGET_CONTENT}) + if (MATCHED) + string(STRIP ${CMAKE_MATCH_1} MATCHED) + file(APPEND ${pybind_file} "USE_OP_DEVICE_KERNEL(${MATCHED}, CUDA);\n") + endif() + + endif() + endforeach() +endif() diff --git a/paddle/fluid/operators/cub_reduce.h b/paddle/fluid/operators/reduce_ops/cub_reduce.h similarity index 100% rename from paddle/fluid/operators/cub_reduce.h rename to paddle/fluid/operators/reduce_ops/cub_reduce.h diff --git a/paddle/fluid/operators/reduce_max_op.cc b/paddle/fluid/operators/reduce_ops/reduce_max_op.cc similarity index 96% rename from paddle/fluid/operators/reduce_max_op.cc rename to paddle/fluid/operators/reduce_ops/reduce_max_op.cc index 95d3768e1fdf6947659c7b3a1c9d57fad741472a..cb438b4a8057267015c8b3c15dd8468fca5a4b44 100644 --- a/paddle/fluid/operators/reduce_max_op.cc +++ b/paddle/fluid/operators/reduce_ops/reduce_max_op.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/reduce_min_max_op.h" +#include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h" REGISTER_REDUCE_OP(reduce_max); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/fluid/operators/reduce_max_op.cu b/paddle/fluid/operators/reduce_ops/reduce_max_op.cu similarity index 95% rename from paddle/fluid/operators/reduce_max_op.cu rename to paddle/fluid/operators/reduce_ops/reduce_max_op.cu index b21da178f3eeaafa41bde5f64cc4abcf7944b032..832112ede833a06e053dcff5139e82f054b127c4 100644 --- a/paddle/fluid/operators/reduce_max_op.cu +++ b/paddle/fluid/operators/reduce_ops/reduce_max_op.cu @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/reduce_min_max_op.h" +#include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h" REGISTER_OP_CUDA_KERNEL(reduce_max, ops::ReduceKernel -#include "paddle/fluid/operators/cub_reduce.h" -#include "paddle/fluid/operators/reduce_mean_op.h" +#include "paddle/fluid/operators/reduce_ops/cub_reduce.h" +#include "paddle/fluid/operators/reduce_ops/reduce_mean_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reduce_mean_op.h b/paddle/fluid/operators/reduce_ops/reduce_mean_op.h similarity index 95% rename from paddle/fluid/operators/reduce_mean_op.h rename to paddle/fluid/operators/reduce_ops/reduce_mean_op.h index 1359679c4767d2032bf3e3a90849ad2a2ef3e829..240c43bc6d0af266e3500c14f894fe30abab728e 100644 --- a/paddle/fluid/operators/reduce_mean_op.h +++ b/paddle/fluid/operators/reduce_ops/reduce_mean_op.h @@ -14,7 +14,7 @@ #pragma once -#include "paddle/fluid/operators/reduce_op.h" +#include "paddle/fluid/operators/reduce_ops/reduce_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reduce_mean_op.part.cu b/paddle/fluid/operators/reduce_ops/reduce_mean_op.part.cu similarity index 95% rename from paddle/fluid/operators/reduce_mean_op.part.cu rename to paddle/fluid/operators/reduce_ops/reduce_mean_op.part.cu index 4b663bcdca7c20f8802d962a362f429d8eafe9af..9324ec1e1db6f40e463b415e5d2bdc5cfe664ef4 100644 --- a/paddle/fluid/operators/reduce_mean_op.part.cu +++ b/paddle/fluid/operators/reduce_ops/reduce_mean_op.part.cu @@ -13,7 +13,7 @@ // limitations under the License. // .part used to speed up nvcc compile -#include "paddle/fluid/operators/reduce_mean_op.h" +#include "paddle/fluid/operators/reduce_ops/reduce_mean_op.h" REGISTER_OP_CUDA_KERNEL( reduce_mean_grad, ops::ReduceGradKernel #include -#include "paddle/fluid/operators/reduce_op_function.h" +#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reduce_op_function.h b/paddle/fluid/operators/reduce_ops/reduce_op_function.h similarity index 100% rename from paddle/fluid/operators/reduce_op_function.h rename to paddle/fluid/operators/reduce_ops/reduce_op_function.h diff --git a/paddle/fluid/operators/reduce_prod_op.cc b/paddle/fluid/operators/reduce_ops/reduce_prod_op.cc similarity index 96% rename from paddle/fluid/operators/reduce_prod_op.cc rename to paddle/fluid/operators/reduce_ops/reduce_prod_op.cc index 713728b99757a6f3bb128f665d5576ac64eef8ec..88935107df187da731e5b77bb6c24cd692d2994f 100644 --- a/paddle/fluid/operators/reduce_prod_op.cc +++ b/paddle/fluid/operators/reduce_ops/reduce_prod_op.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/reduce_prod_op.h" +#include "paddle/fluid/operators/reduce_ops/reduce_prod_op.h" REGISTER_REDUCE_OP(reduce_prod); REGISTER_OP_CPU_KERNEL(reduce_prod, diff --git a/paddle/fluid/operators/reduce_prod_op.cu b/paddle/fluid/operators/reduce_ops/reduce_prod_op.cu similarity index 95% rename from paddle/fluid/operators/reduce_prod_op.cu rename to paddle/fluid/operators/reduce_ops/reduce_prod_op.cu index d8692afb96e4d5d3206210060684dd12fb4d79a7..4434937f75397d8d5340a94abbd41efa7e7a8d4b 100644 --- a/paddle/fluid/operators/reduce_prod_op.cu +++ b/paddle/fluid/operators/reduce_ops/reduce_prod_op.cu @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/reduce_prod_op.h" +#include "paddle/fluid/operators/reduce_ops/reduce_prod_op.h" REGISTER_OP_CUDA_KERNEL(reduce_prod, ops::ReduceKernel -#include "paddle/fluid/operators/reduce_op.h" +#include "paddle/fluid/operators/reduce_ops/reduce_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reduce_sum_op.part.cu b/paddle/fluid/operators/reduce_ops/reduce_sum_op.part.cu similarity index 90% rename from paddle/fluid/operators/reduce_sum_op.part.cu rename to paddle/fluid/operators/reduce_ops/reduce_sum_op.part.cu index 525633f62a95b2d0d677fcbebe551b75cb2a180d..eb3295731b047391a244bfb598c9d802bca1fc0c 100644 --- a/paddle/fluid/operators/reduce_sum_op.part.cu +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.part.cu @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/cub_reduce.h" -#include "paddle/fluid/operators/reduce_sum_op.h" +#include "paddle/fluid/operators/reduce_ops/cub_reduce.h" +#include "paddle/fluid/operators/reduce_ops/reduce_sum_op.h" REGISTER_OP_CUDA_KERNEL( reduce_sum_grad, ops::ReduceGradKernelmutable_data(paddle::framework::make_ddim({1, 4}), - paddle::platform::CPUPlace()); - p_index = index->mutable_data(paddle::framework::make_ddim({1}), - paddle::platform::CPUPlace()); - - for (size_t i = 0; i < 4; ++i) p_src[i] = static_cast(i); + paddle::framework::Tensor src; + paddle::framework::Tensor index; + paddle::framework::Tensor output; + + auto* p_src = src.mutable_data(paddle::framework::make_ddim({1, 4}), + paddle::platform::CPUPlace()); + auto* p_index = index.mutable_data(paddle::framework::make_ddim({1}), + paddle::platform::CPUPlace()); + + for (size_t i = 0; i < 4; ++i) { + p_src[i] = static_cast(i); + } p_index[0] = 1; - float* p_output = output->mutable_data( + auto* p_output = output.mutable_data( paddle::framework::make_ddim({4, 4}), paddle::platform::CPUPlace()); + for (int64_t i = 0; i < output.numel(); ++i) { + p_output[i] = 0; + } + auto* cpu_place = new paddle::platform::CPUPlace(); paddle::platform::CPUDeviceContext ctx(*cpu_place); - paddle::operators::ScatterAssign(ctx, *src, *index, output); + paddle::operators::ScatterAssign(ctx, src, index, &output); for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], 0.0f); - for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data()[i], 0.0f); + for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output.data()[i], 0.0f); for (size_t i = 4; i < 8; ++i) { EXPECT_EQ(p_output[i], static_cast(i - 4)); } for (size_t i = 4; i < 8; ++i) - EXPECT_EQ(output->data()[i], static_cast(i - 4)); + EXPECT_EQ(output.data()[i], static_cast(i - 4)); for (size_t i = 8; i < 16; ++i) EXPECT_EQ(p_output[i], 0.0f); - for (size_t i = 8; i < 16; ++i) EXPECT_EQ(output->data()[i], 0.0f); - - delete src; - delete index; - delete output; + for (size_t i = 8; i < 16; ++i) EXPECT_EQ(output.data()[i], 0.0f); } diff --git a/paddle/fluid/operators/sequence_ops/CMakeLists.txt b/paddle/fluid/operators/sequence_ops/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5d468316e8eacb73c4a4ce81c784880bb5e46c2d --- /dev/null +++ b/paddle/fluid/operators/sequence_ops/CMakeLists.txt @@ -0,0 +1,2 @@ +include(operators) +register_operators() diff --git a/paddle/fluid/operators/sequence_concat_op.cc b/paddle/fluid/operators/sequence_ops/sequence_concat_op.cc similarity index 98% rename from paddle/fluid/operators/sequence_concat_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_concat_op.cc index 3234b60861da3d0c6a8434eb11fd0488a95e171f..37f1b9dda50ba4b62d7cf75765125e0ad663d9d8 100644 --- a/paddle/fluid/operators/sequence_concat_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_concat_op.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/sequence_concat_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_concat_op.h" #include namespace paddle { diff --git a/paddle/fluid/operators/sequence_concat_op.cu.cc b/paddle/fluid/operators/sequence_ops/sequence_concat_op.cu.cc similarity index 94% rename from paddle/fluid/operators/sequence_concat_op.cu.cc rename to paddle/fluid/operators/sequence_ops/sequence_concat_op.cu.cc index eb6535235df80a9267b22403ae1f35c6cefb7fe7..7b8043bc4538b486bb73e005769e1585e5c4817e 100644 --- a/paddle/fluid/operators/sequence_concat_op.cu.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_concat_op.cu.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/sequence_concat_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_concat_op.h" template using Kernel = diff --git a/paddle/fluid/operators/sequence_concat_op.h b/paddle/fluid/operators/sequence_ops/sequence_concat_op.h similarity index 100% rename from paddle/fluid/operators/sequence_concat_op.h rename to paddle/fluid/operators/sequence_ops/sequence_concat_op.h diff --git a/paddle/fluid/operators/sequence_conv_op.cc b/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc similarity index 99% rename from paddle/fluid/operators/sequence_conv_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_conv_op.cc index 95a21a5d3ee6d8037431083edc25d1cddf05dedb..65cd9edbc7125f605d6fb437a2e056054eb9a6d7 100644 --- a/paddle/fluid/operators/sequence_conv_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_conv_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_conv_op.h" #include diff --git a/paddle/fluid/operators/sequence_conv_op.cu.cc b/paddle/fluid/operators/sequence_ops/sequence_conv_op.cu.cc similarity index 93% rename from paddle/fluid/operators/sequence_conv_op.cu.cc rename to paddle/fluid/operators/sequence_ops/sequence_conv_op.cu.cc index de482b7f10bafc4ac6f3838670e2da9a86374c26..600981b5e96c279329a67b608a8dd94dee7d88ef 100644 --- a/paddle/fluid/operators/sequence_conv_op.cu.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_conv_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_conv_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_conv_op.h b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h similarity index 100% rename from paddle/fluid/operators/sequence_conv_op.h rename to paddle/fluid/operators/sequence_ops/sequence_conv_op.h diff --git a/paddle/fluid/operators/sequence_enumerate_op.cc b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc similarity index 97% rename from paddle/fluid/operators/sequence_enumerate_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc index 58e48c228bb34814700fd0f7a3d62ef4b1a435dd..1eebadc2c980ddf1cbaaefef1568dd401d0c77ed 100644 --- a/paddle/fluid/operators/sequence_enumerate_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/sequence_enumerate_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_enumerate_op.cu b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu similarity index 97% rename from paddle/fluid/operators/sequence_enumerate_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu index bdc9a615aa9a1ecd99c1f6995361f8c5ff0aa383..28821e7129c1601f1214b0b56696fbf526a2123f 100644 --- a/paddle/fluid/operators/sequence_enumerate_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu @@ -14,7 +14,7 @@ #include #include -#include "paddle/fluid/operators/sequence_enumerate_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { diff --git a/paddle/fluid/operators/sequence_enumerate_op.h b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h similarity index 100% rename from paddle/fluid/operators/sequence_enumerate_op.h rename to paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h diff --git a/paddle/fluid/operators/sequence_erase_op.cc b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc similarity index 97% rename from paddle/fluid/operators/sequence_erase_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_erase_op.cc index 816ba123a6cbf84ec9b321d5d7cfef7fab9749b1..ddda80ee0824e261b0d737f86e03866d5fdfd77a 100644 --- a/paddle/fluid/operators/sequence_erase_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_erase_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_erase_op.h" #include namespace paddle { diff --git a/paddle/fluid/operators/sequence_erase_op.cu b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu similarity index 98% rename from paddle/fluid/operators/sequence_erase_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_erase_op.cu index 3a58e47f1132cd1ac85584b2470e8c6cddcfb28a..619c40dbd10ad6b538f2d4e3567966b222fc5e2d 100644 --- a/paddle/fluid/operators/sequence_erase_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include #include -#include "paddle/fluid/operators/sequence_erase_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_erase_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { diff --git a/paddle/fluid/operators/sequence_erase_op.h b/paddle/fluid/operators/sequence_ops/sequence_erase_op.h similarity index 100% rename from paddle/fluid/operators/sequence_erase_op.h rename to paddle/fluid/operators/sequence_ops/sequence_erase_op.h diff --git a/paddle/fluid/operators/sequence_expand_as_op.cc b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc similarity index 98% rename from paddle/fluid/operators/sequence_expand_as_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc index 33c1e1c973c80ba3943924331380d35b225ac800..3b79d0c71975bb740b4085ce80f7d95b65f600c1 100644 --- a/paddle/fluid/operators/sequence_expand_as_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_expand_as_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_expand_as_op.cu b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cu similarity index 98% rename from paddle/fluid/operators/sequence_expand_as_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cu index 7357f5ae6e732f28307af65d1f1b6b3cbed1f640..998bf82ab1ddcd815491de95a3f7cf987036ee65 100644 --- a/paddle/fluid/operators/sequence_expand_as_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/fluid/operators/sequence_expand_as_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { diff --git a/paddle/fluid/operators/sequence_expand_as_op.h b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h similarity index 100% rename from paddle/fluid/operators/sequence_expand_as_op.h rename to paddle/fluid/operators/sequence_ops/sequence_expand_as_op.h diff --git a/paddle/fluid/operators/sequence_expand_op.cc b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc similarity index 99% rename from paddle/fluid/operators/sequence_expand_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_expand_op.cc index 944c7f85e5f43679e1875fcce813382be2ba5526..c07e6962e673ceb274ef31cbf492f378ae696137 100644 --- a/paddle/fluid/operators/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_expand_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_expand_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_expand_op.cu b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cu similarity index 98% rename from paddle/fluid/operators/sequence_expand_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_expand_op.cu index 550677b22694085059e914678a5361d914b455bc..afc08c7b3f6596efd3b6e0b74c17aa3c9268c47d 100644 --- a/paddle/fluid/operators/sequence_expand_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/fluid/operators/sequence_expand_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_expand_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { diff --git a/paddle/fluid/operators/sequence_expand_op.h b/paddle/fluid/operators/sequence_ops/sequence_expand_op.h similarity index 100% rename from paddle/fluid/operators/sequence_expand_op.h rename to paddle/fluid/operators/sequence_ops/sequence_expand_op.h diff --git a/paddle/fluid/operators/sequence_mask_op.cc b/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc similarity index 95% rename from paddle/fluid/operators/sequence_mask_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_mask_op.cc index 798211f481659eb71248f7a6210e6522273d387f..7fc506aab4d3c6861282b68b09fdcb5fd8055f77 100644 --- a/paddle/fluid/operators/sequence_mask_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/sequence_mask_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_mask_op.h" REGISTER_OPERATOR(sequence_mask, paddle::operators::SequenceMaskOp, paddle::operators::SequenceMaskOpMaker, diff --git a/paddle/fluid/operators/sequence_mask_op.cu b/paddle/fluid/operators/sequence_ops/sequence_mask_op.cu similarity index 94% rename from paddle/fluid/operators/sequence_mask_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_mask_op.cu index 2ad23774579533b62b9189c1564ad7c7db5c298a..e963ce610e2c147d66087a1df59f67a04d899ccc 100644 --- a/paddle/fluid/operators/sequence_mask_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_mask_op.cu @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/sequence_mask_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_mask_op.h" REGISTER_OP_CUDA_KERNEL( sequence_mask, diff --git a/paddle/fluid/operators/sequence_mask_op.h b/paddle/fluid/operators/sequence_ops/sequence_mask_op.h similarity index 100% rename from paddle/fluid/operators/sequence_mask_op.h rename to paddle/fluid/operators/sequence_ops/sequence_mask_op.h diff --git a/paddle/fluid/operators/sequence_pad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc similarity index 99% rename from paddle/fluid/operators/sequence_pad_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_pad_op.cc index 4583b26256ba2e084bf7477c54d468df860d9b43..23c7bf7cea830bb0ccf5e81f99130043c2d5f80b 100644 --- a/paddle/fluid/operators/sequence_pad_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_pad_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_pad_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_pad_op.cu b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cu similarity index 95% rename from paddle/fluid/operators/sequence_pad_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_pad_op.cu index ff8f81a2f0ec4a72befc3be2a5fc48c3a586c824..7fc64a530ef5442ae927faac96ad92a4126febcd 100644 --- a/paddle/fluid/operators/sequence_pad_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_pad_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_pad_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_pad_op.h b/paddle/fluid/operators/sequence_ops/sequence_pad_op.h similarity index 100% rename from paddle/fluid/operators/sequence_pad_op.h rename to paddle/fluid/operators/sequence_ops/sequence_pad_op.h diff --git a/paddle/fluid/operators/sequence_pool_op.cc b/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc similarity index 98% rename from paddle/fluid/operators/sequence_pool_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_pool_op.cc index 7e80b8db5e90730e2df420466a33362620e15730..44b09bf7c2c776cdc455a8706cb2b2251f3be509 100644 --- a/paddle/fluid/operators/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_pool_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_pool_op.h" #include namespace paddle { diff --git a/paddle/fluid/operators/sequence_pool_op.cu b/paddle/fluid/operators/sequence_ops/sequence_pool_op.cu similarity index 93% rename from paddle/fluid/operators/sequence_pool_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_pool_op.cu index 2bf0697af3c74ee922a832fecaa2cd2399a06849..63cd47a38a0ff6413c430c6be6284c5f4bfc2595 100644 --- a/paddle/fluid/operators/sequence_pool_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_pool_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/fluid/operators/sequence_pool_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_pool_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_pool_op.h b/paddle/fluid/operators/sequence_ops/sequence_pool_op.h similarity index 100% rename from paddle/fluid/operators/sequence_pool_op.h rename to paddle/fluid/operators/sequence_ops/sequence_pool_op.h diff --git a/paddle/fluid/operators/sequence_reshape_op.cc b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.cc similarity index 98% rename from paddle/fluid/operators/sequence_reshape_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_reshape_op.cc index 31d28d723498892f287246ba228df757d5b9f6c8..5421f35662b3b0a6a61748ac0b6b5f718d213b73 100644 --- a/paddle/fluid/operators/sequence_reshape_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/sequence_reshape_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_reshape_op.h" #include "paddle/fluid/framework/ddim.h" namespace paddle { diff --git a/paddle/fluid/operators/sequence_reshape_op.cu b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.cu similarity index 95% rename from paddle/fluid/operators/sequence_reshape_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_reshape_op.cu index 232e031c0b022497d9e5141750dbf8fccffc7615..38bc599165d5f84f67e2fe08bf96ebef4b03d8a4 100644 --- a/paddle/fluid/operators/sequence_reshape_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_reshape_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_reshape_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_reshape_op.h b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.h similarity index 100% rename from paddle/fluid/operators/sequence_reshape_op.h rename to paddle/fluid/operators/sequence_ops/sequence_reshape_op.h diff --git a/paddle/fluid/operators/sequence_reverse_op.cc b/paddle/fluid/operators/sequence_ops/sequence_reverse_op.cc similarity index 94% rename from paddle/fluid/operators/sequence_reverse_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_reverse_op.cc index 1428cca1a6bf6150594f9cb72dbf00cd0eff7df5..dfbbf5f156983189ac1ab82fbff51d7eb4844f9a 100644 --- a/paddle/fluid/operators/sequence_reverse_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_reverse_op.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/sequence_reverse_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_reverse_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/sequence_reverse_op.cu b/paddle/fluid/operators/sequence_ops/sequence_reverse_op.cu similarity index 94% rename from paddle/fluid/operators/sequence_reverse_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_reverse_op.cu index ce65f4799e8661adca60d212eaa9c3f0f92c4c29..0a59ed7f9fee07bc3b12909973535f31ef049a4a 100644 --- a/paddle/fluid/operators/sequence_reverse_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_reverse_op.cu @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/sequence_reverse_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_reverse_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/sequence_reverse_op.h b/paddle/fluid/operators/sequence_ops/sequence_reverse_op.h similarity index 100% rename from paddle/fluid/operators/sequence_reverse_op.h rename to paddle/fluid/operators/sequence_ops/sequence_reverse_op.h diff --git a/paddle/fluid/operators/sequence_scatter_op.cc b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc similarity index 98% rename from paddle/fluid/operators/sequence_scatter_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc index adb81bffccb50069b3a2e5f391f3fdfde231b2be..c49d1ccb18427a1ec3c45f326b57bce32c60e1e2 100644 --- a/paddle/fluid/operators/sequence_scatter_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_scatter_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_scatter_op.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/gather.h" diff --git a/paddle/fluid/operators/sequence_scatter_op.h b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.h similarity index 100% rename from paddle/fluid/operators/sequence_scatter_op.h rename to paddle/fluid/operators/sequence_ops/sequence_scatter_op.h diff --git a/paddle/fluid/operators/sequence_slice_op.cc b/paddle/fluid/operators/sequence_ops/sequence_slice_op.cc similarity index 98% rename from paddle/fluid/operators/sequence_slice_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_slice_op.cc index df9243dc04c584d70dfa6ca78d5fac8423796466..6f84023e26dbf1280d9622946ab20184fb835be1 100644 --- a/paddle/fluid/operators/sequence_slice_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_slice_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_slice_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_slice_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_slice_op.cu b/paddle/fluid/operators/sequence_ops/sequence_slice_op.cu similarity index 92% rename from paddle/fluid/operators/sequence_slice_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_slice_op.cu index 059e802df0ebdba68f758decfb8b54a362996335..1e4a1b8323dbaacdf3f74c33e7aa4484d9be2478 100644 --- a/paddle/fluid/operators/sequence_slice_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_slice_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_slice_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_slice_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_slice_op.h b/paddle/fluid/operators/sequence_ops/sequence_slice_op.h similarity index 100% rename from paddle/fluid/operators/sequence_slice_op.h rename to paddle/fluid/operators/sequence_ops/sequence_slice_op.h diff --git a/paddle/fluid/operators/sequence_softmax_cudnn_op.cu.cc b/paddle/fluid/operators/sequence_ops/sequence_softmax_cudnn_op.cu.cc similarity index 100% rename from paddle/fluid/operators/sequence_softmax_cudnn_op.cu.cc rename to paddle/fluid/operators/sequence_ops/sequence_softmax_cudnn_op.cu.cc diff --git a/paddle/fluid/operators/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc similarity index 98% rename from paddle/fluid/operators/sequence_softmax_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc index ada3e0c8dbba38729c2b9c8b02335327835f2ef4..644a5bebc18886a2ac9210576f1c2251ad5ad0be 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_softmax_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_softmax_op.h" #include namespace paddle { diff --git a/paddle/fluid/operators/sequence_softmax_op.cu b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cu similarity index 98% rename from paddle/fluid/operators/sequence_softmax_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_softmax_op.cu index e94ceaa170131e8bce7d1574b27f0baeaa8d1ffc..cc5e9821903fb7a726f52177df1d17757f697411 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include #include // NOLINT -#include "paddle/fluid/operators/sequence_softmax_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_softmax_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_softmax_op.h b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.h similarity index 100% rename from paddle/fluid/operators/sequence_softmax_op.h rename to paddle/fluid/operators/sequence_ops/sequence_softmax_op.h diff --git a/paddle/fluid/operators/sequence_unpad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc similarity index 98% rename from paddle/fluid/operators/sequence_unpad_op.cc rename to paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc index e633e378a226ece8adea2e150cc6c1e9aa874331..2cf508e0b707ecc986886e72e5d42fde3c84894d 100644 --- a/paddle/fluid/operators/sequence_unpad_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_unpad_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_unpad_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_unpad_op.cu b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cu similarity index 95% rename from paddle/fluid/operators/sequence_unpad_op.cu rename to paddle/fluid/operators/sequence_ops/sequence_unpad_op.cu index 75248372237ec2cb23122f6b16e64f6ce750ebf9..bf54f77f5b55cf7eb19873e352359c028207308a 100644 --- a/paddle/fluid/operators/sequence_unpad_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/sequence_unpad_op.h" +#include "paddle/fluid/operators/sequence_ops/sequence_unpad_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_unpad_op.h b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.h similarity index 100% rename from paddle/fluid/operators/sequence_unpad_op.h rename to paddle/fluid/operators/sequence_ops/sequence_unpad_op.h diff --git a/paddle/fluid/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h index 2fea8a65bc5141b11549ef400f11b54278be35f9..8eb5c7691efe930e9f79ad6a381cb290107d1a14 100644 --- a/paddle/fluid/operators/softmax_op.h +++ b/paddle/fluid/operators/softmax_op.h @@ -35,8 +35,10 @@ class SoftmaxKernel : public framework::OpKernel { Tensor X_2d = framework::ReshapeToMatrix(*X, rank - 1); Tensor Out_2d = framework::ReshapeToMatrix(*Out, rank - 1); -#ifdef ON_INFER - math::SoftmaxFunctor()( +#ifdef PADDLE_ON_INFERENCE + math::SoftmaxFunctor< + DeviceContext, T, + std::is_same::value>()( context.template device_context(), &X_2d, &Out_2d); #else math::SoftmaxFunctor()( diff --git a/paddle/fluid/operators/space_to_depth_op.cc b/paddle/fluid/operators/space_to_depth_op.cc index f109dd685c87ab1b0776a855bb5f510eab1f5526..c047bc78ee315201d25a7294b7dae7d766a6c968 100644 --- a/paddle/fluid/operators/space_to_depth_op.cc +++ b/paddle/fluid/operators/space_to_depth_op.cc @@ -86,7 +86,7 @@ class SpaceToDepthOpMaker : public framework::OpProtoAndCheckerMaker { .GreaterThan(1); AddComment(R"DOC( reorg operator used in Yolo v2. - The equation is: C2 = C1/blocksize * blocksize, W2 = W1 ∗ blocksize + offset % blocksize, H2 = H1 ∗ blocksize + offset / blocksize, + The equation is: C2 = C1/blocksize * blocksize, W2 = W1 * blocksize + offset % blocksize, H2 = H1 * blocksize + offset / blocksize, Reshape Input(X) into the shape according to Attr(blocksize). The data in Input(X) are unchanged. diff --git a/paddle/fluid/operators/stack_op.h b/paddle/fluid/operators/stack_op.h index d236c5b943704683c27b9b155c11ca9113edf514..3d132e4397e837442d406e1668126da9163129ef 100644 --- a/paddle/fluid/operators/stack_op.h +++ b/paddle/fluid/operators/stack_op.h @@ -147,20 +147,32 @@ class StackKernel : public framework::OpKernel { auto &dim = x[0]->dims(); for (auto i = 0; i < axis; ++i) pre *= dim[i]; for (auto i = axis; i < dim.size(); ++i) post *= dim[i]; - int total_num = pre * n * post; - auto &dev_ctx = ctx.template device_context(); #ifdef __NVCC__ + int total_num = pre * n * post; + auto &dev_ctx = ctx.template device_context(); + thrust::device_vector device_x_vec(x_datas); auto x_data_arr = device_x_vec.data().get(); -#else - auto x_data_arr = x_datas.data(); -#endif + StackFunctorForRange(dev_ctx, x_data_arr, y_data, total_num, n, post); -#ifdef __NVCC__ + // Wait() must be called because device_x_vec may be destructed before // kernel ends dev_ctx.Wait(); +#else + auto x_data_arr = x_datas.data(); + + size_t x_offset = 0; + size_t y_offset = 0; + for (int i = 0; i < pre; i++) { + for (int j = 0; j < n; j++) { + std::memcpy(y_data + y_offset, x_data_arr[j] + x_offset, + post * sizeof(T)); + y_offset += post; + } + x_offset += post; + } #endif } }; diff --git a/paddle/fluid/operators/strided_memcpy_test.cc b/paddle/fluid/operators/strided_memcpy_test.cc index a6ca82d16f216c98055fb92b4575a357b8b10348..3a450773a9d749eb3f73baa46e681e588e1fbd0f 100644 --- a/paddle/fluid/operators/strided_memcpy_test.cc +++ b/paddle/fluid/operators/strided_memcpy_test.cc @@ -87,13 +87,16 @@ TEST(StridedMemcpy, GPUCrop) { platform::CUDADeviceContext ctx(gpu0); - int* gpu_src = reinterpret_cast(memory::Alloc(gpu0, sizeof(src))); + auto src_allocation = memory::Alloc(gpu0, sizeof(src)); + + int* gpu_src = reinterpret_cast(src_allocation->ptr()); memory::Copy(gpu0, gpu_src, cpu, src, sizeof(src), ctx.stream()); framework::DDim src_stride({5, 1}); int dst[4]; - int* gpu_dst = reinterpret_cast(memory::Alloc(gpu0, sizeof(dst))); + auto dst_allocation = memory::Alloc(gpu0, sizeof(dst)); + int* gpu_dst = reinterpret_cast(dst_allocation->ptr()); framework::DDim dst_dim({2, 2}); framework::DDim dst_stride({2, 1}); @@ -108,9 +111,6 @@ TEST(StridedMemcpy, GPUCrop) { ASSERT_EQ(2, dst[1]); ASSERT_EQ(3, dst[2]); ASSERT_EQ(4, dst[3]); - - memory::Free(gpu0, gpu_dst); - memory::Free(gpu0, gpu_src); } TEST(StridedMemcpy, GPUConcat) { @@ -124,12 +124,13 @@ TEST(StridedMemcpy, GPUConcat) { platform::CUDAPlace gpu0(0); platform::CPUPlace cpu; platform::CUDADeviceContext ctx(gpu0); - - int* gpu_src = reinterpret_cast(memory::Alloc(gpu0, sizeof(src))); + auto gpu_src_allocation = memory::Alloc(gpu0, sizeof(src)); + int* gpu_src = reinterpret_cast(gpu_src_allocation->ptr()); memory::Copy(gpu0, gpu_src, cpu, src, sizeof(src), ctx.stream()); int dst[8]; - int* gpu_dst = reinterpret_cast(memory::Alloc(gpu0, sizeof(dst))); + auto gpu_dst_allocation = memory::Alloc(gpu0, sizeof(dst)); + int* gpu_dst = reinterpret_cast(gpu_dst_allocation->ptr()); framework::DDim src_stride({2, 1}); framework::DDim dst_dim({2, 2}); @@ -151,9 +152,6 @@ TEST(StridedMemcpy, GPUConcat) { for (size_t i = 0; i < sizeof(expect_dst) / sizeof(int); ++i) { ASSERT_EQ(expect_dst[i], dst[i]); } - - memory::Free(gpu0, gpu_dst); - memory::Free(gpu0, gpu_src); } #endif diff --git a/paddle/fluid/operators/tensorrt/CMakeLists.txt b/paddle/fluid/operators/tensorrt/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..eee0b90fbae216e804e62993313796e914fcef5a --- /dev/null +++ b/paddle/fluid/operators/tensorrt/CMakeLists.txt @@ -0,0 +1,5 @@ +op_library(tensorrt_engine_op DEPS tensorrt_engine tensorrt_converter) +file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(tensorrt_engine);\n") +nv_test(test_tensorrt_engine_op SRCS tensorrt_engine_op_test.cc + DEPS tensorrt_engine_op + analysis) diff --git a/paddle/fluid/operators/tensorrt_engine_op.cc b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc similarity index 96% rename from paddle/fluid/operators/tensorrt_engine_op.cc rename to paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc index 41a5786fe8c3295390144732221280e152d0a15a..3cf2ce3c7ef87dcf75548f7d9c3a55d06ed765e8 100644 --- a/paddle/fluid/operators/tensorrt_engine_op.cc +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc @@ -17,7 +17,7 @@ #include #include -#include "paddle/fluid/operators/tensorrt_engine_op.h" +#include "paddle/fluid/operators/tensorrt/tensorrt_engine_op.h" namespace paddle { diff --git a/paddle/fluid/operators/tensorrt_engine_op.cu.cc b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cu.cc similarity index 93% rename from paddle/fluid/operators/tensorrt_engine_op.cu.cc rename to paddle/fluid/operators/tensorrt/tensorrt_engine_op.cu.cc index e1ddfde6d51ef719ca0b89cf286b176195ee682a..cbe1b426f65386e722a7b02ec1fdfdf75bfd770c 100644 --- a/paddle/fluid/operators/tensorrt_engine_op.cu.cc +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/tensorrt_engine_op.h" +#include "paddle/fluid/operators/tensorrt/tensorrt_engine_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h similarity index 100% rename from paddle/fluid/operators/tensorrt_engine_op.h rename to paddle/fluid/operators/tensorrt/tensorrt_engine_op.h diff --git a/paddle/fluid/operators/tensorrt_engine_op_test.cc b/paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc similarity index 99% rename from paddle/fluid/operators/tensorrt_engine_op_test.cc rename to paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc index e21101e8d12f210af08284dbcebe5c14c1af6dd3..56bdd6c2f2801967829f2baf889b5517a1d9d8d9 100644 --- a/paddle/fluid/operators/tensorrt_engine_op_test.cc +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/tensorrt_engine_op.h" +#include "paddle/fluid/operators/tensorrt/tensorrt_engine_op.h" #include #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/lod_tensor.h" diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 5af8af640e43a5b2e5ee9856f09f66a9fdf4463c..0d0613e1a4364e300640b62687c8a045e40b9ca9 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -73,3 +73,4 @@ cc_test(float16_test SRCS float16_test.cc DEPS lod_tensor) IF(WITH_GPU) nv_test(cuda_helper_test SRCS cuda_helper_test.cu) ENDIF() +nv_library(cuda_device_guard SRCS cuda_device_guard.cc DEPS gpu_info) diff --git a/paddle/fluid/platform/cpu_info.cc b/paddle/fluid/platform/cpu_info.cc index b5f472d20f40fa182a4aa55ff384b0954e4ba9e3..d466f28d1ea0a8327f8d7a45c3e55c5aacd61544 100644 --- a/paddle/fluid/platform/cpu_info.cc +++ b/paddle/fluid/platform/cpu_info.cc @@ -56,10 +56,17 @@ DEFINE_double( "Default use 50% of CPU memory as the pinned_memory for PaddlePaddle," "reserve the rest for page tables, etc"); +// If use_pinned_memory is true, CPUAllocator calls mlock, which +// returns pinned and locked memory as staging areas for data exchange +// between host and device. Allocates too much would reduce the amount +// of memory available to the system for paging. So, by default, we +// should set false to use_pinned_memory. +DEFINE_bool(use_pinned_memory, true, "If set, allocate cpu pinned memory."); + namespace paddle { namespace platform { -inline size_t CpuTotalPhysicalMemory() { +size_t CpuTotalPhysicalMemory() { #ifdef __APPLE__ int mib[2]; mib[0] = CTL_HW; diff --git a/paddle/fluid/platform/cpu_info.h b/paddle/fluid/platform/cpu_info.h index 6810a1651a14cdb2080af846b21cad242b70bf35..fd31ef77b46d5b5b641983a0421da31914c87c18 100644 --- a/paddle/fluid/platform/cpu_info.h +++ b/paddle/fluid/platform/cpu_info.h @@ -19,6 +19,8 @@ limitations under the License. */ namespace paddle { namespace platform { +size_t CpuTotalPhysicalMemory(); + //! Get the maximum allocation size for a machine. size_t CpuMaxAllocSize(); diff --git a/paddle/fluid/platform/cuda_device_guard.cc b/paddle/fluid/platform/cuda_device_guard.cc new file mode 100644 index 0000000000000000000000000000000000000000..8582ec9f604f96b244a0f2d650aa8d669d6fc66c --- /dev/null +++ b/paddle/fluid/platform/cuda_device_guard.cc @@ -0,0 +1,22 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/platform/cuda_device_guard.h" + +namespace paddle { +namespace platform { +// Even this source file does not contains any code, it is better to keep this +// source file for cmake dependency. +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/cuda_device_guard.h b/paddle/fluid/platform/cuda_device_guard.h new file mode 100644 index 0000000000000000000000000000000000000000..a85ebf4b8136630712d39d98e2341ee919cf6e45 --- /dev/null +++ b/paddle/fluid/platform/cuda_device_guard.h @@ -0,0 +1,45 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/platform/gpu_info.h" + +namespace paddle { +namespace platform { + +class CUDADeviceGuard { + public: + explicit inline CUDADeviceGuard(int dev_id) { + int prev_id = platform::GetCurrentDeviceId(); + if (prev_id != dev_id) { + prev_id_ = prev_id; + platform::SetDeviceId(dev_id); + } + } + + inline ~CUDADeviceGuard() { + if (prev_id_ != -1) { + platform::SetDeviceId(prev_id_); + } + } + + CUDADeviceGuard(const CUDADeviceGuard& o) = delete; + CUDADeviceGuard& operator=(const CUDADeviceGuard& o) = delete; + + private: + int prev_id_{-1}; +}; + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index f174a7bc4867634df01367cc0091b0ec55849985..682b0c0ff39b71f08fe1a8b0c9c7b7d386b67738 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include "paddle/fluid/framework/operator.h" @@ -81,6 +82,16 @@ enum class PoolingMode { kAverageInclusive, }; +enum ActivationMode { + kNone, // activation identity + kSigmoid, + kRelu, + kRelu6, + kReluX, + kTanh, + kBandPass, +}; + #if CUDNN_VERSION < 6000 #pragma message "CUDNN version under 6.0 is supported at best effort." #pragma message "We strongly encourage you to move to 6.0 and above." @@ -120,6 +131,26 @@ inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { } #endif // CUDNN_VERSION < 6000 +inline ActivationMode StringToActivationMode(const std::string& str) { + if (str == "identity") { + return ActivationMode::kNone; + } else if (str == "sigmoid") { + return ActivationMode::kSigmoid; + } else if (str == "relu") { + return ActivationMode::kRelu; + } else if (str == "relu6") { + return ActivationMode::kRelu6; + } else if (str == "relux") { + return ActivationMode::kReluX; + } else if (str == "tanh") { + return ActivationMode::kTanh; + } else if (str == "bandpass") { + return ActivationMode::kBandPass; + } else { + PADDLE_THROW("Unknown activation string: %s", str); + } +} + template class CudnnDataType; @@ -368,6 +399,58 @@ class ScopedSpatialTransformerDescriptor { DISABLE_COPY_AND_ASSIGN(ScopedSpatialTransformerDescriptor); }; +class ScopedActivationDescriptor { + public: + ScopedActivationDescriptor() { + PADDLE_ENFORCE(dynload::cudnnCreateActivationDescriptor(&desc_)); + } + ~ScopedActivationDescriptor() { + PADDLE_ENFORCE(dynload::cudnnDestroyActivationDescriptor(desc_)); + } + + template + inline cudnnActivationDescriptor_t descriptor( + const std::string& act, double value_max = static_cast(0.)) { + double relu_ceiling = 0.0; + ActivationMode activation_mode = StringToActivationMode(act); + cudnnActivationMode_t mode; + switch (activation_mode) { +#if CUDNN_VERSION >= 7100 + case ActivationMode::kNone: + mode = CUDNN_ACTIVATION_IDENTITY; + break; +#endif + case ActivationMode::kRelu6: + relu_ceiling = 6.0; + mode = CUDNN_ACTIVATION_CLIPPED_RELU; + break; + case ActivationMode::kReluX: + relu_ceiling = value_max; + mode = CUDNN_ACTIVATION_CLIPPED_RELU; + break; + case ActivationMode::kRelu: + mode = CUDNN_ACTIVATION_RELU; + break; + case ActivationMode::kSigmoid: + mode = CUDNN_ACTIVATION_SIGMOID; + break; + case ActivationMode::kTanh: + mode = CUDNN_ACTIVATION_TANH; + break; + default: + PADDLE_THROW("unrecognized activation mode: %d .", + static_cast(activation_mode)); + } + CUDNN_ENFORCE(dynload::cudnnSetActivationDescriptor( + desc_, mode, CUDNN_NOT_PROPAGATE_NAN, relu_ceiling)); + return desc_; + } + + private: + cudnnActivationDescriptor_t desc_; + DISABLE_COPY_AND_ASSIGN(ScopedActivationDescriptor); +}; + inline bool CanCUDNNBeUsed(const framework::ExecutionContext& ctx) { bool use_cudnn = ctx.Attr("use_cudnn"); use_cudnn &= paddle::platform::is_gpu_place(ctx.GetPlace()); diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index f5541014af5170488efbb10f6e7e331ef015a848..d0a108f905f46135bcd2b68be19ab396ab897272 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -9,7 +9,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/platform/device_context.h" - #include #include #include @@ -18,6 +17,7 @@ limitations under the License. */ #include "paddle/fluid/memory/memory.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/framework/rw_lock.h" +#include "paddle/fluid/platform/cuda_device_guard.h" #endif namespace paddle { @@ -120,11 +120,15 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { } void* allocate(size_t num_bytes) const override { - return paddle::memory::Alloc(place_, num_bytes); + auto buf = paddle::memory::Alloc(place_, num_bytes, + memory::Allocator::kScratchpad); + void* retv = buf->ptr(); + allocations_[buf->ptr()] = std::move(buf); + return retv; } void deallocate(void* buffer) const override { - paddle::memory::Free(place_, buffer); + allocations_.erase(allocations_.find(buffer)); } void* scratchpad() const override { @@ -151,37 +155,35 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { const cudaDeviceProp* device_prop_; // not owned; mutable void* scratch_; mutable unsigned int* semaphore_; + mutable std::unordered_map allocations_; }; CudnnHolder::CudnnHolder(const cudaStream_t* stream, const CUDAPlace& place) - : workspace_(nullptr), workspace_len_(0), stream_(stream), place_(place) { + : workspace_(nullptr), stream_(stream), place_(place) { PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_)); PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, *stream_)); } CudnnHolder::~CudnnHolder() { PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_)); - if (workspace_ != nullptr) { - paddle::memory::Free(place_, workspace_); - } } void CudnnHolder::ReallocateWorkspace(size_t required_workspace_len) { - if (required_workspace_len <= workspace_len_) { + if (required_workspace_len <= WorkspaceSize()) { return; } if (workspace_ != nullptr) { // Maybe someone is using the current workspace PADDLE_ENFORCE(cudaStreamSynchronize(*stream_)); - paddle::memory::Free(place_, workspace_); + workspace_.reset(); } - workspace_ = paddle::memory::Alloc(place_, required_workspace_len); - workspace_len_ = required_workspace_len; + workspace_ = paddle::memory::Alloc(place_, required_workspace_len, + paddle::memory::Allocator::kScratchpad); } CUDADeviceContext::CUDADeviceContext(CUDAPlace place) : place_(place), cudnn_holder_(nullptr) { - SetDeviceId(place_.device); + CUDADeviceGuard guard(place_.device); compute_capability_ = GetCUDAComputeCapability(place_.device); multi_process_ = GetCUDAMultiProcessors(place_.device); max_threads_per_mp_ = GetCUDAMaxThreadsPerMultiProcessor(place_.device); diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index df248f9bb15591d5015ad01278797ec7e31ef9d1..9a9018cdea6a9dcdebe20fd0faef8ff3d4e0e2a1 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include - +#include "paddle/fluid/memory/malloc.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/dynload/cublas.h" #include "paddle/fluid/platform/dynload/cudnn.h" @@ -85,17 +85,32 @@ class CudnnHolder { template void RunFuncImpl(Callback&& cudnn_func, size_t required_workspace_len) { - if (required_workspace_len > workspace_len_) { + if (required_workspace_len > WorkspaceSize()) { ReallocateWorkspace(required_workspace_len); } - cudnn_func(workspace_); + cudnn_func(WorkspacePtr()); + } + + inline void* WorkspacePtr() { + if (workspace_) { + return workspace_->ptr(); + } else { + return nullptr; + } + } + + inline size_t WorkspaceSize() { + if (workspace_) { + return workspace_->size(); + } else { + return 0; + } } std::mutex& Mutex() { return mtx_; } cudnnHandle_t cudnn_handle_; - void* workspace_; - size_t workspace_len_; + memory::AllocationPtr workspace_; const cudaStream_t* stream_; // not owned; const CUDAPlace place_; diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index db2e28bc911a05a077c786c57c4f0e4c34bd61f7..065b940b9ca6fb7522790d2145d1a93469169461 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -152,14 +152,15 @@ CUDNN_DNN_ROUTINE_EACH_R5(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) #endif #if CUDNN_VERSION >= 7001 -#define CUDNN_DNN_ROUTINE_EACH_R7(__macro) \ - __macro(cudnnSetConvolutionGroupCount); \ - __macro(cudnnSetConvolutionMathType); \ - __macro(cudnnCreateCTCLossDescriptor); \ - __macro(cudnnDestroyCTCLossDescriptor); \ - __macro(cudnnGetCTCLossDescriptor); \ - __macro(cudnnSetCTCLossDescriptor); \ - __macro(cudnnGetCTCLossWorkspaceSize); \ +#define CUDNN_DNN_ROUTINE_EACH_R7(__macro) \ + __macro(cudnnSetConvolutionGroupCount); \ + __macro(cudnnSetConvolutionMathType); \ + __macro(cudnnConvolutionBiasActivationForward); \ + __macro(cudnnCreateCTCLossDescriptor); \ + __macro(cudnnDestroyCTCLossDescriptor); \ + __macro(cudnnGetCTCLossDescriptor); \ + __macro(cudnnSetCTCLossDescriptor); \ + __macro(cudnnGetCTCLossWorkspaceSize); \ __macro(cudnnCTCLoss); CUDNN_DNN_ROUTINE_EACH_R7(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) #endif diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index 84d1b852cbe5a334ddfc27d404e879b178e341fe..9f7aa556988e8ab0ca87d0e7212fe27a209f6a32 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -19,6 +19,9 @@ limitations under the License. */ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/cpu_helper.h" #include "paddle/fluid/platform/cpu_info.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/cuda_device_guard.h" +#endif #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" @@ -64,7 +67,7 @@ void InitP2P(std::vector devices) { LOG(WARNING) << "Cannot enable P2P access from " << devices[i] << " to " << devices[j]; } else { - cudaSetDevice(devices[i]); + platform::CUDADeviceGuard guard(devices[i]); cudaDeviceEnablePeerAccess(devices[j], 0); } } diff --git a/paddle/fluid/platform/lock_guard_ptr.h b/paddle/fluid/platform/lock_guard_ptr.h new file mode 100644 index 0000000000000000000000000000000000000000..bff24e74a7070b31d6385b2d5924bdc62d7219c9 --- /dev/null +++ b/paddle/fluid/platform/lock_guard_ptr.h @@ -0,0 +1,55 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include // NOLINT +namespace paddle { +namespace platform { + +/** + * LockGuard for std::unique_ptr. It will do nothing when guarded ptr + * is nullptr. + * + * The advantage of using `LockGuardPtr` instead of + * std::unique> is this type is totally a stack + * variable. There is no heap allocation at all. + */ +template +class LockGuardPtr { + public: + explicit LockGuardPtr(std::unique_ptr& lock_ptr) // NOLINT + : lock_(lock_ptr.get()) { + if (lock_) { + lock_->lock(); + } + } + ~LockGuardPtr() { + if (lock_) { + lock_->unlock(); + } + } + + LockGuardPtr(const LockGuardPtr&) = delete; + LockGuardPtr& operator=(const LockGuardPtr&) = delete; + LockGuardPtr(LockGuardPtr&&) = delete; + LockGuardPtr& operator=(LockGuardPtr&&) = delete; + + private: + LockType* lock_; +}; + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/place.h b/paddle/fluid/platform/place.h index e3ee504f3d042d6a99036e34507c4c8bee306750..a095d4929ec2130b4af48d32bf016d9fe108b418 100644 --- a/paddle/fluid/platform/place.h +++ b/paddle/fluid/platform/place.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include #include diff --git a/paddle/fluid/platform/transform_test.cu b/paddle/fluid/platform/transform_test.cu index f65d1f60100edc85ba9745ed36f26a0ed160d80f..23f5865971246b2862f859885f5bfccd926b9697 100644 --- a/paddle/fluid/platform/transform_test.cu +++ b/paddle/fluid/platform/transform_test.cu @@ -18,8 +18,6 @@ limitations under the License. */ #include "paddle/fluid/platform/hostdevice.h" #include "paddle/fluid/platform/transform.h" -namespace { - template class Scale { public: @@ -36,10 +34,7 @@ class Multiply { HOSTDEVICE T operator()(const T& a, const T& b) const { return a * b; } }; -} // namespace - using paddle::memory::Alloc; -using paddle::memory::Free; using paddle::memory::Copy; using paddle::platform::CPUPlace; @@ -63,13 +58,13 @@ TEST(Transform, GPUUnary) { CUDAPlace gpu0(0); CUDADeviceContext ctx(gpu0); float cpu_buf[4] = {0.1, 0.2, 0.3, 0.4}; - float* gpu_buf = static_cast(Alloc(gpu0, sizeof(float) * 4)); + auto gpu_allocation = Alloc(gpu0, sizeof(float) * 4); + float* gpu_buf = static_cast(gpu_allocation->ptr()); Copy(gpu0, gpu_buf, CPUPlace(), cpu_buf, sizeof(cpu_buf), ctx.stream()); Transform trans; trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, Scale(10)); ctx.Wait(); Copy(CPUPlace(), cpu_buf, gpu0, gpu_buf, sizeof(cpu_buf), ctx.stream()); - Free(gpu0, gpu_buf); for (int i = 0; i < 4; ++i) { ASSERT_NEAR(cpu_buf[i], static_cast(i + 1), 1e-5); } @@ -89,13 +84,13 @@ TEST(Transform, GPUBinary) { int buf[4] = {1, 2, 3, 4}; CUDAPlace gpu0(0); CUDADeviceContext ctx(gpu0); - int* gpu_buf = static_cast(Alloc(gpu0, sizeof(buf))); + auto gpu_allocation = Alloc(gpu0, sizeof(buf)); + int* gpu_buf = static_cast(gpu_allocation->ptr()); Copy(gpu0, gpu_buf, CPUPlace(), buf, sizeof(buf), ctx.stream()); Transform trans; trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, gpu_buf, Multiply()); ctx.Wait(); Copy(CPUPlace(), buf, gpu0, gpu_buf, sizeof(buf), ctx.stream()); - Free(gpu0, gpu_buf); for (int i = 0; i < 4; ++i) { ASSERT_EQ((i + 1) * (i + 1), buf[i]); } diff --git a/paddle/fluid/platform/variant.h b/paddle/fluid/platform/variant.h index 42bff087d2bda90889a106bc5f4fb32bccaa8a9b..e9aef621acea44b0dab7a687c13223617d5603c0 100644 --- a/paddle/fluid/platform/variant.h +++ b/paddle/fluid/platform/variant.h @@ -41,6 +41,7 @@ limitations under the License. */ #include #include #include +#include #include // some platform-independent defintion diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 6afa53cd36dee2004ada707a52995ce33b3650e9..6417da077e63dd78857d29ddd3484c646849daf4 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -10,12 +10,12 @@ if(WITH_PYTHON) hip_library(paddle_pybind SHARED SRCS ${PYBIND_SRCS} DEPS ${PYBIND_DEPS} - ${GLOB_OP_LIB}) + ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS}) else() cc_library(paddle_pybind SHARED SRCS ${PYBIND_SRCS} DEPS ${PYBIND_DEPS} - ${GLOB_OP_LIB}) + ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS}) if(NOT APPLE AND NOT ANDROID AND NOT WIN32) target_link_libraries(paddle_pybind rt) endif(NOT APPLE AND NOT ANDROID AND NOT WIN32) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 0d059d8aea756fbf85932d4d918ba7f77b772ccd..8ff6f6c85ace4bdfb14a2e9c82b1e07d01fc0f4c 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -43,6 +43,7 @@ limitations under the License. */ #include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/framework/version.h" +#include "paddle/fluid/memory/allocation/allocator_strategy.h" #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h" #include "paddle/fluid/platform/enforce.h" @@ -94,6 +95,7 @@ bool IsCompiledWithDIST() { } PYBIND11_PLUGIN(core) { + paddle::memory::allocation::UseAllocatorStrategyGFlag(); py::module m("core", "C++ core of PaddlePaddle"); // using framework in this function. Since it is inside a function, it will diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 51614a6a3dd2f7f830cf533fc365b56a99d3b918..b39323f843f8dbf5a7e4bac841c8cb8ed7efdc07 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -21,6 +21,7 @@ limitations under the License. */ #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/float16.h" +#include "pybind11/common.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" @@ -57,7 +58,8 @@ struct CastToPyBufferImpl { prod *= dims_outside[i - 1]; } framework::Tensor dst_tensor; - if (paddle::platform::is_gpu_place(tensor.place())) { + bool is_gpu = paddle::platform::is_gpu_place(tensor.place()); + if (is_gpu) { #ifdef PADDLE_WITH_CUDA auto *src_ptr = static_cast(tensor.data()); auto *dst_ptr = static_cast(dst_tensor.mutable_data( @@ -73,16 +75,44 @@ struct CastToPyBufferImpl { dst_tensor = tensor; } - if (std::type_index(typeid(CUR_TYPE)) == - std::type_index(typeid(platform::float16))) { - return pybind11::buffer_info( - dst_tensor.data(), sizeof(CUR_TYPE), - "e", /* np.dtype('e') == np.float16 */ - (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides); + std::string dtype = std::type_index(typeid(CUR_TYPE)) == + std::type_index(typeid(platform::float16)) + ? std::string("e") // np.dtype('e') == np.float16 + : pybind11::format_descriptor::format(); + + if (is_gpu) { + // manually construct a py_buffer if is_gpu since gpu data is copied + // into CPU. + // TODO(yy): Is these following code memleak? + Py_buffer *py_buffer = + reinterpret_cast(malloc(sizeof(Py_buffer))); + py_buffer->format = strdup(dtype.c_str()); + py_buffer->itemsize = sizeof(CUR_TYPE); + py_buffer->ndim = framework::arity(dst_tensor.dims()); + py_buffer->len = tensor.numel(); + py_buffer->strides = reinterpret_cast( + malloc(sizeof(Py_ssize_t) * strides.size())); + for (size_t i = 0; i < strides.size(); ++i) { + py_buffer->strides[i] = strides[i]; + } + + py_buffer->shape = reinterpret_cast( + malloc(sizeof(Py_ssize_t) * tensor.dims().size())); + for (int i = 0; i < tensor.dims().size(); ++i) { + py_buffer->shape[i] = tensor.dims()[i]; + } + + py_buffer->readonly = false; + py_buffer->suboffsets = nullptr; + py_buffer->obj = nullptr; + py_buffer->buf = + malloc(static_cast(py_buffer->len * py_buffer->itemsize)); + memcpy(py_buffer->buf, dst_tensor.data(), + static_cast(py_buffer->len * py_buffer->itemsize)); + return pybind11::buffer_info(py_buffer, true); } else { return pybind11::buffer_info( - dst_tensor.data(), sizeof(CUR_TYPE), - pybind11::format_descriptor::format(), + dst_tensor.data(), sizeof(CUR_TYPE), dtype, (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides); } } else { @@ -112,17 +142,16 @@ T TensorGetElement(const framework::Tensor &self, size_t offset) { } } -// TODO(dzhwinter) : fix the redundent Tensor allocate and free +// TODO(dzhwinter) : fix the redundant Tensor allocate and free template void TensorSetElement(framework::Tensor *self, size_t offset, T elem) { if (platform::is_gpu_place(self->place())) { - std::shared_ptr dst(new framework::Tensor); - framework::TensorCopySync(*self, platform::CPUPlace(), dst.get()); - dst->data()[offset] = elem; - framework::TensorCopySync(*dst.get(), self->place(), self); - + framework::Tensor dst; + framework::TensorCopySync(*self, platform::CPUPlace(), &dst); + dst.mutable_data(platform::CPUPlace())[offset] = elem; + framework::TensorCopySync(dst, self->place(), self); } else if (platform::is_cpu_place(self->place())) { - self->data()[offset] = elem; + self->mutable_data(self->place())[offset] = elem; } } diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index cfea2059c3ce20fb44732d990e9708ad6f8d81a1..598f435461b40ed07e97c0adde79dc1014b60a2e 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -16,10 +16,12 @@ limitations under the License. */ #include "gflags/gflags.h" #include "gtest/gtest.h" +#include "paddle/fluid/memory/allocation/allocator_strategy.h" #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/platform/init.h" int main(int argc, char** argv) { + paddle::memory::allocation::UseAllocatorStrategyGFlag(); testing::InitGoogleTest(&argc, argv); std::vector new_argv; std::string gflags_env; @@ -28,21 +30,16 @@ int main(int argc, char** argv) { } #ifdef PADDLE_WITH_CUDA new_argv.push_back( - strdup("--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory")); + strdup("--tryfromenv=fraction_of_gpu_memory_to_use,allocator_strategy")); #else - new_argv.push_back(strdup( - "--tryfromenv=use_pinned_memory,use_mkldnn,initial_cpu_memory_in_mb")); + new_argv.push_back( + strdup("--tryfromenv=use_pinned_memory,use_mkldnn,initial_cpu_memory_in_" + "mb,allocator_strategy")); new_argv.push_back(strdup("--undefok=use_mkldnn,initial_cpu_memory_in_mb")); #endif int new_argc = static_cast(new_argv.size()); char** new_argv_address = new_argv.data(); google::ParseCommandLineFlags(&new_argc, &new_argv_address, false); - paddle::memory::Used(paddle::platform::CPUPlace()); - -#ifdef PADDLE_WITH_CUDA - paddle::memory::Used(paddle::platform::CUDAPlace(0)); -#endif - paddle::framework::InitDevices(true); return RUN_ALL_TESTS(); } diff --git a/python/paddle/dataset/wmt16.py b/python/paddle/dataset/wmt16.py index 4a0c1f8cb663ec105030ac2c5a70c5f906cf6d12..aa66696fae7d3adb44511417edf4a92b82a9151b 100644 --- a/python/paddle/dataset/wmt16.py +++ b/python/paddle/dataset/wmt16.py @@ -78,7 +78,8 @@ def __build_dict(tar_file, dict_size, save_path, lang): six.iteritems(word_dict), key=lambda x: x[1], reverse=True)): if idx + 3 == dict_size: break - fout.write("%s\n" % (cpt.to_bytes(word[0]))) + fout.write(word[0].encode('utf-8')) + fout.write('\n') def __load_dict(tar_file, dict_size, lang, reverse=False): diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 81299189160739e54f39348ad327ff2edd2ac0e0..f2f49f813a1840897024d851f2810786a153fb49 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -115,8 +115,9 @@ def __bootstrap__(): 'use_pinned_memory', 'check_nan_inf', 'benchmark', 'eager_delete_scope', 'use_mkldnn', 'use_ngraph', 'initial_cpu_memory_in_mb', 'init_allocated_mem', 'free_idle_memory', 'paddle_num_threads', - 'dist_threadpool_size', 'eager_delete_tensor_gb', - 'reader_queue_speed_test_mode' + "dist_threadpool_size", 'cpu_deterministic', 'eager_delete_tensor_gb', + 'allocator_strategy', 'reader_queue_speed_test_mode', + 'print_sub_graph_dir' ] if os.name != 'nt': read_env_flags.append('warpctc_dir') diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 96b6705e26c0f8d8d223e9020192a8f330c2c727..3f17400a1432bb799e09accf2600ab6ec85e05a7 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -283,11 +283,7 @@ def detection_output(loc, prior_box_var=prior_box_var, target_box=loc, code_type='decode_center_size') - compile_shape = scores.shape - run_shape = nn.shape(scores) - scores = nn.flatten(x=scores, axis=2) scores = nn.softmax(input=scores) - scores = nn.reshape(x=scores, shape=compile_shape, actual_shape=run_shape) scores = nn.transpose(scores, perm=[0, 2, 1]) scores.stop_gradient = True nmsed_outs = helper.create_variable_for_type_inference( diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index af96f5de4f055ea49fee03612cfc1a1b3c6af2f8..99acd7e30884b46cb14e27ac4569af82af311a3a 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -5788,7 +5788,7 @@ def image_resize(input, Examples: .. code-block:: python - out = fluid.layers.image_resize(input, out_shape=[12, 12]) + out = fluid.layers.image_resize(input, out_shape=[12, 12], resample="NEAREST") """ resample_methods = { 'BILINEAR': 'bilinear', @@ -5891,6 +5891,11 @@ def resize_bilinear(input, Returns: ${out_comment}. + + Examples: + .. code-block:: python + + out = fluid.layers.resize_bilinear(input, out_shape=[12, 12]) """ return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape) @@ -5937,6 +5942,11 @@ def resize_nearest(input, Returns: ${out_comment}. + + Examples: + .. code-block:: python + + out = fluid.layers.resize_nearest(input, out_shape=[12, 12]) """ return image_resize(input, out_shape, scale, name, 'NEAREST', actual_shape) @@ -6949,8 +6959,15 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None): t_max(${t_max_type}|24.0): ${t_max_comment} name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. - Returns: + Returns: output(${out_type}): ${out_comment} + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32") + y = fluid.layers.brelu(x, t_min=1.0, t_max=20.0) """ helper = LayerHelper('brelu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -6972,8 +6989,15 @@ def leaky_relu(x, alpha=0.02, name=None): alpha(${alpha_type}|0.02): ${alpha_comment} name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. - Returns: + Returns: output(${out_type}): ${out_comment} + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32") + y = fluid.layers.leaky_relu(x, alpha=0.01) """ helper = LayerHelper('leaky_relu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -6994,8 +7018,15 @@ def soft_relu(x, threshold=40.0, name=None): threshold(${threshold_type}|40.0): ${threshold_comment} name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. - Returns: + Returns: output(${out_type}): ${out_comment} + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32") + y = fluid.layers.soft_relu(x, threshold=20.0) """ helper = LayerHelper('soft_relu', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index 42ab9b231153f7ede7b8f8dd4e754f8cc92f65fe..3d40b762281ae09d3214f2d2bc496c4966984866 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -38,7 +38,7 @@ depth = 8 mix_hidden_lr = 1e-3 IS_SPARSE = True -PASS_NUM = 1 +PASS_NUM = 2 BATCH_SIZE = 10 embedding_name = 'emb' @@ -196,7 +196,7 @@ def train(use_cuda, save_dirname=None, is_local=True): print("second per batch: " + str((time.time( ) - start_time) / batch_id)) # Set the threshold low to speed up the CI test - if float(cost) < 60.0: + if float(cost) < 80.0: if save_dirname is not None: # TODO(liuyiqun): Change the target to crf_decode fluid.io.save_inference_model(save_dirname, [ @@ -208,6 +208,10 @@ def train(use_cuda, save_dirname=None, is_local=True): batch_id = batch_id + 1 + raise RuntimeError( + "This model should save_inference_model and return, but not reach here, please check!" + ) + if is_local: train_loop(fluid.default_main_program()) else: diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py new file mode 100644 index 0000000000000000000000000000000000000000..9f3f2f348166864be9583855fcd1949fd4ac818c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_conv2d_fusion_op.py @@ -0,0 +1,158 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np + +import paddle.fluid.core as core +from op_test import OpTest + +from test_conv2d_op import conv2d_forward_naive + + +class TestConv2dFusionOp(OpTest): + def setUp(self): + self.op_type = "conv2d_fusion" + self.exhaustive_search = False + self.data_format = "AnyLayout" + self.dtype = np.float32 + self.activation = 'relu' + self.add_bias = True + self.add_residual_data = True + + self.init_group() + self.init_dilation() + self.init_test_case() + self.init_bias_residual() + self.init_activation() + self.set_search_method() + + conv2d_param = { + 'stride': self.stride, + 'pad': self.pad, + 'dilation': self.dilations + } + + input = np.random.random(self.input_size).astype(self.dtype) + filter = np.random.random(self.filter_size).astype(self.dtype) + + output = conv2d_forward_naive(input, filter, self.groups, + conv2d_param).astype(self.dtype) + + self.inputs = { + 'Input': OpTest.np_dtype_to_fluid_dtype(input), + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) + } + + if self.add_residual_data: + residual_data = np.random.random(output.shape).astype(self.dtype) + self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype( + residual_data) + output += residual_data + + if self.add_bias: + bias = np.random.random(self.filter_size[0]).astype(self.dtype) + self.inputs['Bias'] = OpTest.np_dtype_to_fluid_dtype(bias) + output = output + bias.reshape((1, bias.size, 1, 1)) + + assert self.activation in ['relu', 'identity'] + if self.activation == 'relu': + output = np.maximum(output, 0) + + self.attrs = { + 'strides': self.stride, + 'paddings': self.pad, + 'groups': self.groups, + 'dilations': self.dilations, + 'data_format': self.data_format, + 'exhaustive_search': self.exhaustive_search, + 'activation': self.activation + } + self.outputs = {'Output': output} + + def testcuda(self): + return core.is_compiled_with_cuda() + + def test_check_output(self): + if self.testcuda(): + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=1e-5) + else: + pass + + def init_test_case(self): + self.pad = [0, 0] + self.stride = [1, 1] + self.input_size = [2, 3, 5, 5] # NCHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] // self.groups + self.filter_size = [6, f_c, 3, 3] + + def init_dilation(self): + self.dilations = [1, 1] + + def init_group(self): + self.groups = 1 + + def init_bias_residual(self): + self.add_bias = True + self.add_residual_data = True + + def init_activation(self): + self.activation = 'relu' + + def set_search_method(self): + self.exhaustive_search = False + + +class TestWithoutResidual(TestConv2dFusionOp): + def init_bias_residual(self): + self.add_residual_data = False + + +class TestIdentityActivation(TestConv2dFusionOp): + def init_activation(self): + self.activation = 'identity' + + +class TestWithGroup(TestConv2dFusionOp): + def init_group(self): + self.groups = 3 + + +class TestWithDilation(TestConv2dFusionOp): + def init_test_case(self): + self.pad = [0, 0] + self.stride = [1, 1] + self.input_size = [2, 3, 10, 10] # NCHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] // self.groups + self.filter_size = [6, f_c, 3, 3] + + def init_dilation(self): + self.dilations = [2, 2] + + def init_group(self): + self.groups = 3 + + +class TestCUDNNExhaustiveSearch(TestConv2dFusionOp): + def set_search_method(self): + self.exhaustive_search = True + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index ebbbf3ab8b00ff49d55ea5d472a2f7c4eae0da52..bcb79f232bd28bcb534ff2a2a0b799297ff96b71 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -117,7 +117,7 @@ class TestConv2dOp(OpTest): return place = core.CUDAPlace(0) if self.testcuda() else core.CPUPlace() self.check_grad_with_place( - place, set(['Input', 'Filter']), 'Output', max_relative_error=0.02) + place, {'Input', 'Filter'}, 'Output', max_relative_error=0.02) def test_check_grad_no_filter(self): if self.dtype == np.float16: diff --git a/python/paddle/fluid/tests/unittests/test_data_balance.py b/python/paddle/fluid/tests/unittests/test_data_balance.py index 4bd24510bc8ac7f0fbaad3fd1919ab589cd21c4b..aa19a5edc7814315edaacf6e76072f62fcf7eb55 100644 --- a/python/paddle/fluid/tests/unittests/test_data_balance.py +++ b/python/paddle/fluid/tests/unittests/test_data_balance.py @@ -116,7 +116,7 @@ class TestDataBalance(unittest.TestCase): print("WARNING: Unittest TestDataBalance skipped. \ For the result is not correct when device count \ is larger than batch size.") - exit(0) + return fetch_list = [image.name, label.name] data_appeared = [False] * self.total_ins_num diff --git a/python/paddle/fluid/tests/unittests/test_dist_simnet_bow.py b/python/paddle/fluid/tests/unittests/test_dist_simnet_bow.py index 102a4dab05fe1adc6a503920714f50415b29dc19..30a7ec095e66acf1292fbb6602533d04bec9d5bf 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_simnet_bow.py +++ b/python/paddle/fluid/tests/unittests/test_dist_simnet_bow.py @@ -42,11 +42,12 @@ class TestDistSimnetBow2x2DenseAsync(TestDistBase): self._sync_mode = False self._enforce_place = "CPU" - def no_test_simnet_bow(self): + #FIXME(typhoonzero): fix async tests later + def notest_simnet_bow(self): need_envs = { "IS_DISTRIBUTED": '0', "IS_SPARSE": '0', - 'IS_SELF_CONTAINED_LR': '1' + 'IS_SELF_CONTAINED_LR': '1', } self.check_with_place( "dist_simnet_bow.py", diff --git a/python/paddle/fluid/transpiler/details/checkport.py b/python/paddle/fluid/transpiler/details/checkport.py index 7bad4b427a2d53bd14c7a1f870ce74a883158d04..6b78ceeaeec4d9b3db6524a5b5e939f88267340c 100644 --- a/python/paddle/fluid/transpiler/details/checkport.py +++ b/python/paddle/fluid/transpiler/details/checkport.py @@ -34,6 +34,7 @@ def wait_server_ready(endpoints): """ while True: all_ok = True + not_ready_endpoints = [] for ep in endpoints: ip_port = ep.split(":") with closing(socket.socket(socket.AF_INET, @@ -42,8 +43,11 @@ def wait_server_ready(endpoints): result = sock.connect_ex((ip_port[0], int(ip_port[1]))) if result != 0: all_ok = False + not_ready_endpoints.append(ep) if not all_ok: sys.stderr.write("pserver not ready, wait 3 sec to retry...\n") + sys.stderr.write("not ready endpoints:" + str(not_ready_endpoints) + + "\n") sys.stderr.flush() time.sleep(3) else: diff --git a/python/paddle/v2/dataset/wmt16.py b/python/paddle/v2/dataset/wmt16.py index c8818f715beadd9499ae588f2c19a57fbf26f372..5793002091ba3eabc32dcc156e5bb8eb512d8dfb 100644 --- a/python/paddle/v2/dataset/wmt16.py +++ b/python/paddle/v2/dataset/wmt16.py @@ -72,7 +72,8 @@ def __build_dict(tar_file, dict_size, save_path, lang): sorted( word_dict.iteritems(), key=lambda x: x[1], reverse=True)): if idx + 3 == dict_size: break - fout.write("%s\n" % (word[0])) + fout.write(word[0].encode('utf-8')) + fout.write('\n') def __load_dict(tar_file, dict_size, lang, reverse=False): @@ -300,8 +301,10 @@ def get_dict(lang, dict_size, reverse=False): dict: The word dictionary for the specific language. """ - if lang == "en": dict_size = min(dict_size, TOTAL_EN_WORDS) - else: dict_size = min(dict_size, TOTAL_DE_WORDS) + if lang == "en": + dict_size = min(dict_size, TOTAL_EN_WORDS) + else: + dict_size = min(dict_size, TOTAL_DE_WORDS) dict_path = os.path.join(paddle.v2.dataset.common.DATA_HOME, "wmt16/%s_%d.dict" % (lang, dict_size))