diff --git a/CMakeLists.txt b/CMakeLists.txt index 9ec632e20690eafdc558e24f160270a89b29ee41..e4442d254901e2524385452ebe5ac6f6df3056f9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -212,7 +212,7 @@ endif() if (WITH_JEMALLOC) find_package(JeMalloc REQUIRED) include_directories(${JEMALLOC_INCLUDE_DIR}) - add_definitions(-DWITH_JEMALLOC) + add_definitions(-DPADDLE_WITH_JEMALLOC) endif() include(generic) # simplify cmake module @@ -276,9 +276,3 @@ add_subdirectory(paddle) if(WITH_PYTHON) add_subdirectory(python) endif() - -if(WITH_DOC) - find_package(Sphinx REQUIRED) - find_python_module(recommonmark REQUIRED) - add_subdirectory(doc) -endif() diff --git a/Dockerfile b/Dockerfile index acfd091265e26d6c29c561d166fed2504c0cff1c..fe0721e9b99b5e028df2f6228ff04cb56a567a3f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,12 +11,10 @@ RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ub # ENV variables ARG WITH_GPU ARG WITH_AVX -ARG WITH_DOC ENV WOBOQ OFF ENV WITH_GPU=${WITH_GPU:-ON} ENV WITH_AVX=${WITH_AVX:-ON} -ENV WITH_DOC=${WITH_DOC:-OFF} ENV HOME /root # Add bash enhancements diff --git a/cmake/FindSphinx.cmake b/cmake/FindSphinx.cmake deleted file mode 100644 index f74cd4ff8c9c2c52319b18ac37264167b3718eae..0000000000000000000000000000000000000000 --- a/cmake/FindSphinx.cmake +++ /dev/null @@ -1,147 +0,0 @@ -# - This module looks for Sphinx -# Find the Sphinx documentation generator -# -# This modules defines -# SPHINX_EXECUTABLE -# SPHINX_FOUND - -find_program(SPHINX_EXECUTABLE - NAMES sphinx-build - PATHS - /usr/bin - /usr/local/bin - /opt/local/bin - DOC "Sphinx documentation generator" -) - -if( NOT SPHINX_EXECUTABLE ) - set(_Python_VERSIONS - 2.7 2.6 2.5 2.4 2.3 2.2 2.1 2.0 1.6 1.5 - ) - - foreach( _version ${_Python_VERSIONS} ) - set( _sphinx_NAMES sphinx-build-${_version} ) - - find_program( SPHINX_EXECUTABLE - NAMES ${_sphinx_NAMES} - PATHS - /usr/bin - /usr/local/bin - /opt/loca/bin - DOC "Sphinx documentation generator" - ) - endforeach() -endif() - -include(FindPackageHandleStandardArgs) - -find_package_handle_standard_args(Sphinx DEFAULT_MSG - SPHINX_EXECUTABLE -) - - -option( SPHINX_HTML_OUTPUT "Build a single HTML with the whole content." ON ) -option( SPHINX_DIRHTML_OUTPUT "Build HTML pages, but with a single directory per document." OFF ) -option( SPHINX_HTMLHELP_OUTPUT "Build HTML pages with additional information for building a documentation collection in htmlhelp." OFF ) -option( SPHINX_QTHELP_OUTPUT "Build HTML pages with additional information for building a documentation collection in qthelp." OFF ) -option( SPHINX_DEVHELP_OUTPUT "Build HTML pages with additional information for building a documentation collection in devhelp." OFF ) -option( SPHINX_EPUB_OUTPUT "Build HTML pages with additional information for building a documentation collection in epub." OFF ) -option( SPHINX_LATEX_OUTPUT "Build LaTeX sources that can be compiled to a PDF document using pdflatex." OFF ) -option( SPHINX_MAN_OUTPUT "Build manual pages in groff format for UNIX systems." OFF ) -option( SPHINX_TEXT_OUTPUT "Build plain text files." OFF ) - - -mark_as_advanced( - SPHINX_EXECUTABLE - SPHINX_HTML_OUTPUT - SPHINX_DIRHTML_OUTPUT - SPHINX_HTMLHELP_OUTPUT - SPHINX_QTHELP_OUTPUT - SPHINX_DEVHELP_OUTPUT - SPHINX_EPUB_OUTPUT - SPHINX_LATEX_OUTPUT - SPHINX_MAN_OUTPUT - SPHINX_TEXT_OUTPUT -) - -function( Sphinx_add_target target_name builder conf cache source destination ) - add_custom_target( ${target_name} ALL - COMMAND ${SPHINX_EXECUTABLE} -b ${builder} - -d ${cache} - -c ${conf} - ${source} - ${destination} - COMMENT "Generating sphinx documentation: ${builder}" - COMMAND cd ${destination} && ln -sf ./index_*.html index.html - ) - - set_property( - DIRECTORY APPEND PROPERTY - ADDITIONAL_MAKE_CLEAN_FILES - ${destination} - ) -endfunction() - -# Target dependencies can be optionally listed at the end. -function( Sphinx_add_targets target_base_name conf source base_destination ) - - set( _dependencies ) - - foreach( arg IN LISTS ARGN ) - set( _dependencies ${_dependencies} ${arg} ) - endforeach() - - if( ${SPHINX_HTML_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_html html ${conf} ${source} ${base_destination}/html ) - - add_dependencies( ${target_base_name}_html ${_dependencies} ) - endif() - - if( ${SPHINX_DIRHTML_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_dirhtml dirhtml ${conf} ${source} ${base_destination}/dirhtml ) - - add_dependencies( ${target_base_name}_dirhtml ${_dependencies} ) - endif() - - if( ${SPHINX_QTHELP_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_qthelp qthelp ${conf} ${source} ${base_destination}/qthelp ) - - add_dependencies( ${target_base_name}_qthelp ${_dependencies} ) - endif() - - if( ${SPHINX_DEVHELP_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_devhelp devhelp ${conf} ${source} ${base_destination}/devhelp ) - - add_dependencies( ${target_base_name}_devhelp ${_dependencies} ) - endif() - - if( ${SPHINX_EPUB_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_epub epub ${conf} ${source} ${base_destination}/epub ) - - add_dependencies( ${target_base_name}_epub ${_dependencies} ) - endif() - - if( ${SPHINX_LATEX_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_latex latex ${conf} ${source} ${base_destination}/latex ) - - add_dependencies( ${target_base_name}_latex ${_dependencies} ) - endif() - - if( ${SPHINX_MAN_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_man man ${conf} ${source} ${base_destination}/man ) - - add_dependencies( ${target_base_name}_man ${_dependencies} ) - endif() - - if( ${SPHINX_TEXT_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_text text ${conf} ${source} ${base_destination}/text ) - - add_dependencies( ${target_base_name}_text ${_dependencies} ) - endif() - - if( ${BUILD_TESTING} ) - sphinx_add_target( ${target_base_name}_linkcheck linkcheck ${conf} ${source} ${base_destination}/linkcheck ) - - add_dependencies( ${target_base_name}_linkcheck ${_dependencies} ) - endif() -endfunction() diff --git a/cmake/operators.cmake b/cmake/operators.cmake index 59c40a0e5d18b753038f2b9301d1c9494e3901be..c2d04828564e69d7ac965881057f185194aa0475 100644 --- a/cmake/operators.cmake +++ b/cmake/operators.cmake @@ -52,8 +52,8 @@ function(op_library TARGET) endif() if(WITH_MKLDNN) string(REPLACE "_op" "_mkldnn_op" MKLDNN_FILE "${TARGET}") - if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${MKLDNN_FILE}.cc) - list(APPEND mkldnn_cc_srcs ${MKLDNN_FILE}.cc) + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/mkldnn/${MKLDNN_FILE}.cc) + list(APPEND mkldnn_cc_srcs mkldnn/${MKLDNN_FILE}.cc) endif() endif() else() diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index afd3342768701adba4ff0040bd1c762b1cd8739d..fe8d6dd42595dfc9dc4bf17e336df801b582703e 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -122,7 +122,7 @@ paddle.fluid.layers.transpose ArgSpec(args=['x', 'perm', 'name'], varargs=None, paddle.fluid.layers.im2sequence ArgSpec(args=['input', 'filter_size', 'stride', 'padding', 'input_image_size', 'out_stride', 'name'], varargs=None, keywords=None, defaults=(1, 1, 0, None, 1, None)) paddle.fluid.layers.nce ArgSpec(args=['input', 'label', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples', 'name', 'sampler', 'custom_dist', 'seed', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 'uniform', None, 0, False)) paddle.fluid.layers.hsigmoid ArgSpec(args=['input', 'label', 'num_classes', 'param_attr', 'bias_attr', 'name', 'path_table', 'path_code', 'is_custom', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, None, False, False)) -paddle.fluid.layers.beam_search ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 'scores', 'beam_size', 'end_id', 'level', 'is_accumulated', 'name'], varargs=None, keywords=None, defaults=(0, True, None)) +paddle.fluid.layers.beam_search ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 'scores', 'beam_size', 'end_id', 'level', 'is_accumulated', 'name', 'return_parent_idx'], varargs=None, keywords=None, defaults=(0, True, None, False)) paddle.fluid.layers.row_conv ArgSpec(args=['input', 'future_context_size', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.layers.multiplex ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.layer_norm ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)) @@ -142,10 +142,10 @@ paddle.fluid.layers.label_smooth ArgSpec(args=['label', 'prior_dist', 'epsilon', paddle.fluid.layers.roi_pool ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)) paddle.fluid.layers.roi_align ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None)) paddle.fluid.layers.dice_loss ArgSpec(args=['input', 'label', 'epsilon'], varargs=None, keywords=None, defaults=(1e-05,)) -paddle.fluid.layers.image_resize ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None)) +paddle.fluid.layers.image_resize ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None, True, 1)) paddle.fluid.layers.image_resize_short ArgSpec(args=['input', 'out_short_len', 'resample'], varargs=None, keywords=None, defaults=('BILINEAR',)) -paddle.fluid.layers.resize_bilinear ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape'], varargs=None, keywords=None, defaults=(None, None, None, None)) -paddle.fluid.layers.resize_nearest ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape'], varargs=None, keywords=None, defaults=(None, None, None, None)) +paddle.fluid.layers.resize_bilinear ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1)) +paddle.fluid.layers.resize_nearest ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners'], varargs=None, keywords=None, defaults=(None, None, None, None, True)) paddle.fluid.layers.gather ArgSpec(args=['input', 'index'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.scatter ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.sequence_scatter ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)) @@ -322,9 +322,10 @@ paddle.fluid.layers.generate_proposal_labels ArgSpec(args=['rpn_rois', 'gt_class paddle.fluid.layers.generate_proposals ArgSpec(args=['scores', 'bbox_deltas', 'im_info', 'anchors', 'variances', 'pre_nms_top_n', 'post_nms_top_n', 'nms_thresh', 'min_size', 'eta', 'name'], varargs=None, keywords=None, defaults=(6000, 1000, 0.5, 0.1, 1.0, None)) paddle.fluid.layers.generate_mask_labels ArgSpec(args=['im_info', 'gt_classes', 'is_crowd', 'gt_segms', 'rois', 'labels_int32', 'num_classes', 'resolution'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.iou_similarity ArgSpec(args=['x', 'y', 'name'], varargs=None, keywords=None, defaults=(None,)) -paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name'], varargs=None, keywords=None, defaults=('encode_center_size', True, None)) +paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name', 'axis'], varargs=None, keywords=None, defaults=('encode_center_size', True, None, 0)) paddle.fluid.layers.polygon_box_transform ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.yolov3_loss ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'class_num', 'ignore_thresh', 'loss_weight_xy', 'loss_weight_wh', 'loss_weight_conf_target', 'loss_weight_conf_notarget', 'loss_weight_class', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None)) +paddle.fluid.layers.multiclass_nms ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'keep_top_k', 'nms_threshold', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(0.3, True, 1.0, 0, None)) paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None)) paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1)) paddle.fluid.layers.exponential_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)) diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 2ba2437de66f31549a87f20360dbb97b48ea6fbe..66f11dedbaccd7febcd75fa7ade9c68b6c42022c 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -1,4 +1,3 @@ - #windows treat symbolic file as a real file, which is different with unix #We create a hidden file and compile it instead of origin source file. function(windows_symbolic TARGET) @@ -207,3 +206,24 @@ endif (NOT WIN32) cc_library(dlpack_tensor SRCS dlpack_tensor.cc DEPS tensor dlpack) cc_test(dlpack_tensor_test SRCS dlpack_tensor_test.cc DEPS dlpack_tensor glog) + +# Get the current working branch +execute_process( + COMMAND git rev-parse --abbrev-ref HEAD + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE PADDLE_BRANCH + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# Get the latest abbreviated commit hash of the working branch +execute_process( + COMMAND git log -1 --format=%h + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE PADDLE_COMMIT + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +message(STATUS "commit: ${PADDLE_COMMIT}") +message(STATUS "branch: ${PADDLE_BRANCH}") + +configure_file(commit.h.in commit.h) diff --git a/paddle/fluid/framework/commit.h.in b/paddle/fluid/framework/commit.h.in new file mode 100644 index 0000000000000000000000000000000000000000..3a33ece624443a99083ae29abb70254a5ac40a3d --- /dev/null +++ b/paddle/fluid/framework/commit.h.in @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace paddle { +namespace framework { + +static std::string paddle_commit() { + return "@PADDLE_COMMIT@"; +} + +static std::string paddle_compile_branch() { + return "@PADDLE_BRANCH@"; +} + +static std::string paddle_version() { + return "@PADDLE_VERSION@"; +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index b118dccd1b3de881b4791bff6cd331726c8e05da..914bcce7755bcf0651da29dd669f5d6d14e081d2 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -10,8 +10,22 @@ function(pass_library TARGET DEST) set(options "") set(oneValueArgs "") set(multiValueArgs SRCS DEPS) + set(targetPrefix "") + + # Get optional argument + set(extraMacroArgs ${ARGN}) + list(LENGTH extraMacroArgs numExtraMacroArgs) + if(numExtraMacroArgs GREATER 0) + list(GET extraMacroArgs 0 targetPrefix) + endif() + cmake_parse_arguments(op_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - cc_library(${TARGET} SRCS ${TARGET}.cc DEPS graph_pattern_detector pass fuse_pass_base ${op_library_DEPS}) + if(targetPrefix) + cc_library(${TARGET} SRCS ${targetPrefix}/${TARGET}.cc DEPS graph_pattern_detector pass fuse_pass_base ${op_library_DEPS}) + else() + cc_library(${TARGET} SRCS ${TARGET}.cc DEPS graph_pattern_detector pass fuse_pass_base ${op_library_DEPS}) + endif() + # add more DEST here, such as train, dist and collect USE_PASS into a file automatically. if (${DEST} STREQUAL "base" OR ${DEST} STREQUAL "inference") message(STATUS "add pass ${TARGET} ${DEST}") @@ -62,11 +76,11 @@ foreach (index RANGE 3 6) endforeach() if(WITH_MKLDNN) - pass_library(mkldnn_placement_pass base) - pass_library(depthwise_conv_mkldnn_pass base) - pass_library(conv_bias_mkldnn_fuse_pass inference) - pass_library(conv_relu_mkldnn_fuse_pass inference) - pass_library(conv_elementwise_add_mkldnn_fuse_pass inference) + pass_library(mkldnn_placement_pass base mkldnn) + pass_library(depthwise_conv_mkldnn_pass base mkldnn) + pass_library(conv_bias_mkldnn_fuse_pass inference mkldnn) + pass_library(conv_relu_mkldnn_fuse_pass inference mkldnn) + pass_library(conv_elementwise_add_mkldnn_fuse_pass inference mkldnn) endif() cc_library(fuse_elewise_add_act_pass SRCS fuse_elewise_add_act_pass.cc DEPS pass graph_pattern_detector ) @@ -86,7 +100,7 @@ cc_test(test_fc_fuse_pass SRCS fc_fuse_pass_tester.cc DEPS fc_fuse_pass framewor cc_test(test_seqpool_concat_fuse_pass SRCS seqpool_concat_fuse_pass_tester.cc DEPS seqpool_concat_fuse_pass framework_proto) cc_test(test_is_test_pass SRCS is_test_pass_tester.cc DEPS is_test_pass) if (WITH_MKLDNN) - cc_test(test_depthwise_conv_mkldnn_pass SRCS depthwise_conv_mkldnn_pass_tester.cc DEPS depthwise_conv_mkldnn_pass) - cc_test(test_conv_relu_mkldnn_fuse_pass SRCS conv_relu_mkldnn_fuse_pass_tester.cc DEPS conv_relu_mkldnn_fuse_pass) - cc_test(test_conv_elementwise_add_mkldnn_fuse_pass SRCS conv_elementwise_add_mkldnn_fuse_pass_tester.cc DEPS conv_elementwise_add_mkldnn_fuse_pass) + cc_test(test_depthwise_conv_mkldnn_pass SRCS mkldnn/depthwise_conv_mkldnn_pass_tester.cc DEPS depthwise_conv_mkldnn_pass) + cc_test(test_conv_relu_mkldnn_fuse_pass SRCS mkldnn/conv_relu_mkldnn_fuse_pass_tester.cc DEPS conv_relu_mkldnn_fuse_pass) + cc_test(test_conv_elementwise_add_mkldnn_fuse_pass SRCS mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc DEPS conv_elementwise_add_mkldnn_fuse_pass) endif () diff --git a/paddle/fluid/framework/ir/graph_traits.cc b/paddle/fluid/framework/ir/graph_traits.cc index 2ee12cc410393d1e1aa5fc9e5374d858eca1b901..929d9edc34ffb92f468d5b7af54a0b8da4121543 100644 --- a/paddle/fluid/framework/ir/graph_traits.cc +++ b/paddle/fluid/framework/ir/graph_traits.cc @@ -14,6 +14,7 @@ #include "paddle/fluid/framework/ir/graph_traits.h" +#include #include namespace paddle { @@ -79,7 +80,7 @@ NodesTSIterator::NodesTSIterator(const std::vector &source) { } std::unordered_set visited; - std::unordered_set to_visit{source.begin(), source.end()}; + std::set to_visit{source.begin(), source.end()}; std::vector inlink_visited; while (!to_visit.empty()) { diff --git a/paddle/fluid/framework/ir/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc similarity index 98% rename from paddle/fluid/framework/ir/conv_bias_mkldnn_fuse_pass.cc rename to paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc index d4a701e0b173a96d8605dff308fee7007a0ecc0c..5d0b294f6fec5f14dcddb91f8ceffb27fc833d4e 100644 --- a/paddle/fluid/framework/ir/conv_bias_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/conv_bias_mkldnn_fuse_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h" #include #include #include diff --git a/paddle/fluid/framework/ir/conv_bias_mkldnn_fuse_pass.h b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h similarity index 100% rename from paddle/fluid/framework/ir/conv_bias_mkldnn_fuse_pass.h rename to paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc similarity index 99% rename from paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc rename to paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc index a8029e67e659a269f8492cf6e2f1f09040144283..fb3db81347b102cfa264082b36a2e22ea8c22982 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.h" #include #include #include diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.h similarity index 100% rename from paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h rename to paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.h diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc similarity index 98% rename from paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc rename to paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index 61ba097fd8cb55e25bda1947ea97d53308c55bd3..9ef5c298b8cddfec094e9544dc6da9afdcaf0dab 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -15,8 +15,8 @@ #include #include -#include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" #include "paddle/fluid/framework/ir/graph_traits.h" +#include "paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_relu_mkldnn_fuse_pass.cc similarity index 97% rename from paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.cc rename to paddle/fluid/framework/ir/mkldnn/conv_relu_mkldnn_fuse_pass.cc index e359a3832ee8d549f8c58d63bc1cc6564ecadede..4f4605398a665e63662a64a3a925c32d48f10952 100644 --- a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_relu_mkldnn_fuse_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/conv_relu_mkldnn_fuse_pass.h" #include #include #include "paddle/fluid/platform/enforce.h" diff --git a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h b/paddle/fluid/framework/ir/mkldnn/conv_relu_mkldnn_fuse_pass.h similarity index 100% rename from paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h rename to paddle/fluid/framework/ir/mkldnn/conv_relu_mkldnn_fuse_pass.h diff --git a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/conv_relu_mkldnn_fuse_pass_tester.cc similarity index 98% rename from paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc rename to paddle/fluid/framework/ir/mkldnn/conv_relu_mkldnn_fuse_pass_tester.cc index 19248b4dfee1da81d18cd2effac08ba68dde80fb..06d56f6222e4bb9a9969d4ab2d260c97d1ce6c72 100644 --- a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_relu_mkldnn_fuse_pass_tester.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/conv_relu_mkldnn_fuse_pass.h" #include #include "paddle/fluid/framework/op_proto_maker.h" diff --git a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc similarity index 96% rename from paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.cc rename to paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc index 19056e18aa892dbc83dfbf7305b6ad8b6b6bc51c..7851e8c84bca2e3b05d3b1603eaa4c0ca5909e10 100644 --- a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" namespace paddle { diff --git a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h similarity index 100% rename from paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h rename to paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h diff --git a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc similarity index 98% rename from paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass_tester.cc rename to paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc index 09d0b15f46a7e50afb6aea46383013ce6a6c6118..1783e3322b1df8125f580f09a12aefe64d246c1a 100644 --- a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.h" #include diff --git a/paddle/fluid/framework/ir/mkldnn_placement_pass.cc b/paddle/fluid/framework/ir/mkldnn/mkldnn_placement_pass.cc similarity index 95% rename from paddle/fluid/framework/ir/mkldnn_placement_pass.cc rename to paddle/fluid/framework/ir/mkldnn/mkldnn_placement_pass.cc index 951fcb066ce759ebfec0182e1e9dca887e343170..20e52410ffe3caa86450bc05bf3aabf5a5bce374 100644 --- a/paddle/fluid/framework/ir/mkldnn_placement_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/mkldnn_placement_pass.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/framework/ir/mkldnn_placement_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/mkldnn_placement_pass.h" #include namespace paddle { diff --git a/paddle/fluid/framework/ir/mkldnn_placement_pass.h b/paddle/fluid/framework/ir/mkldnn/mkldnn_placement_pass.h similarity index 100% rename from paddle/fluid/framework/ir/mkldnn_placement_pass.h rename to paddle/fluid/framework/ir/mkldnn/mkldnn_placement_pass.h diff --git a/paddle/fluid/framework/ngraph_operator.cc b/paddle/fluid/framework/ngraph_operator.cc deleted file mode 100644 index 7e174c7def1ffa4089a94d9cc504b18843557c53..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/ngraph_operator.cc +++ /dev/null @@ -1,545 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include - -#include -#include - -#include "paddle/fluid/framework/feed_fetch_type.h" -#include "paddle/fluid/framework/framework.pb.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/ngraph_bridge.h" -#include "paddle/fluid/framework/ngraph_operator.h" -#include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/framework/var_desc.h" -#include "paddle/fluid/framework/var_type.h" - -#include "ngraph/ngraph.hpp" - -namespace paddle { -namespace framework { - -static ngraph::Shape Ddim2Shape(const DDim& dims) { - ngraph::Shape sp; - for (int i = 0; i < dims.size(); ++i) { - int k = dims[i]; - k = k == 0 ? 1 : k; - sp.push_back(k); - } - return sp; -} - -static std::map pd2ng_type_map = { - {proto::VarType::FP32, ngraph::element::f32}, - {proto::VarType::FP64, ngraph::element::f64}, - {proto::VarType::INT32, ngraph::element::i32}, - {proto::VarType::INT64, ngraph::element::i64}, - {proto::VarType::BOOL, ngraph::element::boolean}, -}; - -typedef enum { /* nGraph support state on ops */ - FULL_TRAIN, /* Support full ops for train */ - PARTIAL_TRAIN, /* Support partial ops for train */ - FULL_TEST, /* Support full list of ops for test */ - PARTIAL_TEST /* Support partial list of ops for test */ -} op_state; - -// perform graph build through bridge and execute computation -class NgraphEngine { - public: - explicit NgraphEngine(const Scope& scope, const platform::Place& place, - const std::vector>& ops, - const std::unordered_map< - std::string, ngraph::element::Type>& var_type_map, - const std::unordered_set& persist, - const std::unordered_set& fetches, - const std::unordered_set& post_op_inputs, - op_state ng_op_state) - : scope_(scope), - place_(place), - fused_ops_(ops), - var_type_map_(var_type_map), - persistables_(persist), - fetches_(fetches), - post_op_inputs_(post_op_inputs), - ng_op_state_(ng_op_state) { - var_in_node_map_ = std::make_shared< - std::unordered_map>>(); - - var_node_map_ = std::make_shared< - std::unordered_map>>(); - - BuildNgIO(); - - GetNgFunction(); - } - - void Run(const Scope& scope, const platform::Place& place) const; - - private: - static std::unordered_map> - func_cache_; - const Scope& scope_; - const platform::Place& place_; - std::vector> fused_ops_; - std::unordered_map var_type_map_; - std::unordered_set persistables_; - std::unordered_set fetches_; - std::unordered_set post_op_inputs_; - op_state ng_op_state_; - - // ngraph backend eg. CPU - static std::shared_ptr backend_; - // ngraph function to call and execute - std::shared_ptr ngraph_function_; - // var_name of inputs - std::vector var_in_; - // var_name of outputs from fetch in order - std::vector var_out_; - // map input vars to nodes - std::shared_ptr< - std::unordered_map>> - var_in_node_map_; - // map each var name with a ngraph node - std::shared_ptr< - std::unordered_map>> - var_node_map_; - // cache key to check if function is cached - std::shared_ptr GetCacheKey(); - // get ngraph input and define ngraph input parameters - void GetNgInputShape(std::shared_ptr op); - // Call ngraph bridge to map ops - void BuildNgNodes(); - // get the ngraph input and output var list - void BuildNgIO(); - // build ngraph function call - void BuildNgFunction(); - // Check cache for ngraph function or otherwise build the function - void GetNgFunction(); -}; - -std::vector>::iterator>> -NgraphOperator::NgraphOpIntervals( - std::vector>* ops) { - std::vector>::iterator>> - intervals; - if (ops->empty()) { - return intervals; - } - size_t size = ops->size(); - size_t left = 0; - while (left < size && ops->at(left)->Type() != kFeedOpType) { - ++left; - } - if (left == size) { - return intervals; - } - while (left < size && ops->at(left)->Type() == kFeedOpType) { - ++left; - } - - size_t right = left; - while (right < size && ops->at(right)->Type() != kFetchOpType) { - ++right; - } - if (right == size) { - return intervals; - } - if (left >= right) return intervals; - - // (left, right - 1) represents indices between feed and fetch - size_t pivot = left; - while (pivot < right) { - auto op_type = ops->at(pivot)->Type(); - if (paddle::framework::NgraphBridge::NG_NODE_MAP.find(op_type) == - paddle::framework::NgraphBridge::NG_NODE_MAP.end()) { - ++pivot; - } else { - size_t start = pivot, end = start; - while (pivot < right && - (paddle::framework::NgraphBridge::NG_NODE_MAP.find( - ops->at(pivot)->Type()) != - paddle::framework::NgraphBridge::NG_NODE_MAP.end())) { - ++pivot; - ++end; - } - std::vector>::iterator> - interval = {ops->begin() + start, ops->begin() + end}; - intervals.push_back(interval); - } - } // end while - - return intervals; -} - -NgraphOperator::NgraphOperator( - const ProgramDesc& prog, size_t block_id, - std::vector>::iterator start, - std::vector>::iterator end, - const std::string& type, const VariableNameMap& inputs, - const VariableNameMap& outputs, const AttributeMap& attrs) - : OperatorBase(type, inputs, outputs, attrs), - pdesc_(prog), - block_(block_id) { - for (std::vector>::iterator it = start; - it != end; ++it) { - fused_ops_.push_back(std::move(*it)); - } - - for (std::vector>::iterator it = end; - (*it)->Type() != kFetchOpType; ++it) { - for (auto& var_name_item : (*it)->Inputs()) { - for (auto& var_name : var_name_item.second) { - post_op_inputs_.insert(var_name); - } - } - } - - if ((*(start - 1))->Type() == kFeedOpType && (*end)->Type() == kFetchOpType) { - is_full_ = true; - } - - Process(); -} - -void NgraphOperator::Process() { - auto& bdesc = pdesc_.Block(block_); - for (auto& var : bdesc.AllVars()) { - if (!(var->GetType() == proto::VarType::SELECTED_ROWS || - var->GetType() == proto::VarType::LOD_TENSOR || - var->GetType() == proto::VarType::LOD_TENSOR_ARRAY)) { - continue; - } - - auto var_name = var->Name(); - if (var->Name() == framework::kEmptyVarName) { - continue; - } - - if (var_name != "fetch" && var_name != "feed") { - auto pd_type = var->GetDataType(); - if (pd2ng_type_map.find(pd_type) == pd2ng_type_map.end()) { - PADDLE_THROW("Data type of var %s not found in pd2ng_type_map", - var_name); - } - var_type_map_[var_name] = pd2ng_type_map[pd_type]; - } - - if (var->Persistable()) { - persistables_.insert(var->Name()); - } - } - - for (auto* op : bdesc.AllOps()) { - if (op->Type() == kFetchOpType) { - std::string fetch_target_name = op->Input("X")[0]; - fetches_.insert(fetch_target_name); - } - } -} - -void NgraphOperator::RunImpl(const Scope& scope, - const platform::Place& place) const { - op_state ng_op_state = PARTIAL_TEST; - auto& bdesc = pdesc_.Block(block_); - for (auto* op : bdesc.AllOps()) { - if (op->Type().find("_grad") != std::string::npos) { - ng_op_state = PARTIAL_TRAIN; - break; - } - } - - if (is_full_) { - ng_op_state = ng_op_state == PARTIAL_TEST ? FULL_TEST : FULL_TRAIN; - } - - NgraphEngine ngraph_engine(scope, place, fused_ops_, var_type_map_, - persistables_, fetches_, post_op_inputs_, - ng_op_state); - ngraph_engine.Run(scope, place); -} - -std::unordered_map> - NgraphEngine::func_cache_ = {}; - -std::shared_ptr NgraphEngine::backend_ = - ngraph::runtime::Backend::create("CPU"); - -void NgraphEngine::GetNgInputShape(std::shared_ptr op) { - RuntimeContext ctx(op->Inputs(), op->Outputs(), scope_); - op->RuntimeInferShape(scope_, place_, ctx); - for (auto& var_name_item : op->Inputs()) { - for (auto& var_name : var_name_item.second) { - auto* var = scope_.FindVar(var_name); - if (var && var->IsType()) { - auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); - auto sp = Ddim2Shape(tensor_pd->dims()); - if (std::find(var_in_.begin(), var_in_.end(), var_name) != - var_in_.end()) { - if (var_node_map_->find(var_name) == var_node_map_->end()) { - auto ng_type = var_type_map_.at(var_name); - auto prm = - std::make_shared(ng_type, sp, true); - (*var_node_map_)[var_name] = prm; - (*var_in_node_map_)[var_name] = prm; - } - } - } - } - } -} - -void NgraphEngine::BuildNgNodes() { - for (auto& var_name : var_out_) { - if (var_node_map_->find(var_name) == var_node_map_->end()) { - auto* var = scope_.FindVar(var_name); - if (var && var->IsType()) { - auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); - auto& ddim = tensor_pd->dims(); - auto ng_shape = Ddim2Shape(ddim); - auto ng_type = var_type_map_.at(var_name); - auto prm = - std::make_shared(ng_type, ng_shape, true); - (*var_node_map_)[var_name] = prm; - } - } - } - - paddle::framework::NgraphBridge ngb(var_node_map_); - for (auto& op : fused_ops_) { - ngb.BuildNgNode(op); - } -} - -void NgraphEngine::BuildNgIO() { - std::unordered_set inputs; - std::unordered_set outputs; - - for (auto& op : fused_ops_) { - for (auto& var_name_item : op->Inputs()) { - for (auto& var_name : var_name_item.second) { - inputs.insert(var_name); - const bool is_output = outputs.find(var_name) != outputs.end(); - if (!is_output && - std::find(var_in_.begin(), var_in_.end(), var_name) == - var_in_.end()) { - // fill var_in here to keep lhs and rhs order - var_in_.push_back(var_name); - } - } - } - - if (op->Type() != "fill_constant") { - GetNgInputShape(op); - } - - for (auto& var_name_item : op->Outputs()) { - PADDLE_ENFORCE_LE(var_name_item.second.size(), 1, - "op %s has more than 1 output - Not handling yet", - op->Type()); - for (auto& var_name : var_name_item.second) { - outputs.insert(var_name); - } - } - } - - // var_out.clear(); - for (auto& op : fused_ops_) { - for (auto& var_name_item : op->Outputs()) { - PADDLE_ENFORCE_LE(var_name_item.second.size(), 1, - "op %s has more than 1 output - Not handling yet", - op->Type()); - for (auto& var_name : var_name_item.second) { - switch (ng_op_state_) { - case PARTIAL_TEST: - if (post_op_inputs_.find(var_name) != post_op_inputs_.end() || - fetches_.find(var_name) != fetches_.end()) { - var_out_.push_back(var_name); - } - break; - case FULL_TEST: - if (fetches_.find(var_name) != fetches_.end()) { - var_out_.push_back(var_name); - } - break; - case PARTIAL_TRAIN: - if (fetches_.find(var_name) != fetches_.end() || - post_op_inputs_.find(var_name) != post_op_inputs_.end() || - persistables_.find(var_name) != persistables_.end()) { - var_out_.push_back(var_name); - } - break; - case FULL_TRAIN: - if (fetches_.find(var_name) != fetches_.end() || - persistables_.find(var_name) != persistables_.end()) { - var_out_.push_back(var_name); - } - break; - default: - var_out_.push_back(var_name); - } - } - } - } -} - -void NgraphEngine::BuildNgFunction() { - BuildNgNodes(); - ngraph_function_ = nullptr; - ngraph::NodeVector func_outputs; - ngraph::ParameterVector func_inputs; - - for (auto& vo : var_out_) { - func_outputs.push_back(var_node_map_->at(vo)); - } - - for (auto& vi : var_in_) { - std::shared_ptr prm = - std::dynamic_pointer_cast( - var_in_node_map_->at(vi)); - func_inputs.push_back(prm); - } - - ngraph_function_ = - std::make_shared(func_outputs, func_inputs); -} - -std::shared_ptr NgraphEngine::GetCacheKey() { - auto cache_key = std::make_shared(""); - *cache_key += std::to_string(fused_ops_.size()); - for (auto& op : fused_ops_) { - *cache_key += op->Type(); - } - for (auto& var_name : var_in_) { - auto shape = var_node_map_->at(var_name)->get_shape(); - *cache_key += var_name; - *cache_key += var_type_map_.at(var_name).c_type_string(); - for (size_t i = 0; i < shape.size(); ++i) { - *cache_key += std::to_string(shape.at(i)); - } - } - - for (auto& var_name : var_out_) { - auto* var = scope_.FindVar(var_name); - if (var && var->IsType()) { - auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); - auto& ddim = tensor_pd->dims(); - for (int i = 0; i < ddim.size(); ++i) { - *cache_key += std::to_string(ddim[i]); - } - } - } - return cache_key; -} - -void NgraphEngine::GetNgFunction() { - bool cache_on = true; - if (cache_on) { - std::string cache_key_val = *GetCacheKey(); - if (func_cache_.find(cache_key_val) != func_cache_.end()) { - ngraph_function_ = func_cache_.at(cache_key_val); - } else { - BuildNgFunction(); - func_cache_[cache_key_val] = ngraph_function_; - } - } else { - BuildNgFunction(); - } -} - -void NgraphEngine::Run(const Scope& scope, const platform::Place& place) const { - std::vector> t_in; - std::vector> t_out; - - for (size_t i = 0; i < var_in_.size(); ++i) { - auto vi = var_in_.at(i); - auto sp = var_node_map_->at(vi)->get_shape(); - std::shared_ptr ti; - auto* var = scope.FindVar(vi); - if (var && var->IsType()) { - auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); - PADDLE_ENFORCE(sp == Ddim2Shape(tensor_pd->dims()), - "Ensure ngraph tensor layout align with paddle tensor"); - if (tensor_pd->type() == proto::VarType::FP32) { - const float* arr = tensor_pd->data(); - ti = backend_->create_tensor(ngraph::element::f32, sp, - const_cast(arr)); - } else if (tensor_pd->type() == proto::VarType::INT32) { - const int* arr = tensor_pd->data(); - ti = backend_->create_tensor(ngraph::element::i32, sp, - const_cast(arr)); - } else if (tensor_pd->type() == proto::VarType::INT64) { - const int64_t* arr = tensor_pd->data(); - ti = backend_->create_tensor(ngraph::element::i64, sp, - const_cast(arr)); - } else if (tensor_pd->type() == proto::VarType::FP64) { - const double* arr = tensor_pd->data(); - ti = backend_->create_tensor(ngraph::element::f64, sp, - const_cast(arr)); - } else if (tensor_pd->type() == proto::VarType::BOOL) { - const bool* arr = tensor_pd->data(); - ti = backend_->create_tensor(ngraph::element::boolean, sp, - const_cast(arr)); - } else { - PADDLE_THROW("Data type not handling for var %s", vi); - } - } else { - PADDLE_THROW("Cannot find var or tensor with var name %s", vi); - } - bool is_test = (ng_op_state_ == PARTIAL_TEST || ng_op_state_ == FULL_TEST) - ? true - : false; - bool is_persistable = - (persistables_.find(vi) != persistables_.end()) ? true : false; - if (is_test && is_persistable) { - ti->set_stale(false); - } - t_in.push_back(ti); - } - - for (size_t i = 0; i < var_out_.size(); ++i) { - auto var_name = var_out_[i]; - auto* var = scope.FindVar(var_name); - std::shared_ptr to; - if (var && var->IsType()) { - auto* tensor_pd = GetMutableLoDTensorOrSelectedRowsValueFromVar(var); - auto dd = tensor_pd->dims(); - ngraph::Shape sp = Ddim2Shape(dd); - auto ng_type = var_type_map_.at(var_name); - if (ng_type == ngraph::element::f32) { - auto pd_arr = tensor_pd->mutable_data(place); - to = backend_->create_tensor(ngraph::element::f32, sp, pd_arr); - } else if (ng_type == ngraph::element::i64) { - auto pd_arr = tensor_pd->mutable_data(place); - to = backend_->create_tensor(ngraph::element::i64, sp, pd_arr); - } else if (ng_type == ngraph::element::f64) { - auto pd_arr = tensor_pd->mutable_data(place); - to = backend_->create_tensor(ngraph::element::f64, sp, pd_arr); - } else if (ng_type == ngraph::element::boolean) { - auto pd_arr = tensor_pd->mutable_data(place); - to = backend_->create_tensor(ngraph::element::boolean, sp, pd_arr); - } else { - PADDLE_THROW("Data type not handled in for var %s", var_name); - } - t_out.push_back(to); - } else { - PADDLE_THROW("Cannot find var or tensor with var name %s", var_name); - } - } - - backend_->call(backend_->compile(ngraph_function_), t_out, t_in); -} // NgraphEngine::RunImpl -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index ab3cf308fc04e227d5402712f6bab226fea04711..9d6c10ab9e33d0e9888fa484030be9da7752512e 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -555,18 +555,17 @@ Tensor* ExecutionContext::LegacyOutput(const std::string& name) const { template <> std::vector ExecutionContext::MultiOutput( const std::string& name) const { - auto names = op().Outputs(name); + auto it = ctx_.outputs.find(name); + if (it == ctx_.outputs.end()) { + return {}; + } + const std::vector& vars = it->second; std::vector res; - res.reserve(names.size()); - std::transform(names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) -> Tensor* { - auto var = scope_.FindVar(sub_name); - if (var == nullptr) return nullptr; - PADDLE_ENFORCE( - var->IsType(), - "%s should be LoDTensor, but the received type is %s", - sub_name, ToTypeName(var->Type())); - return var->GetMutable(); + res.reserve(vars.size()); + std::transform(vars.begin(), vars.end(), std::back_inserter(res), + [&](Variable* var) -> Tensor* { + return var == nullptr ? nullptr + : var->GetMutable(); }); return res; } diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 8029129b9a6a9fcbc0ff10daa1f25b210259e9d8..47488d4dea79f285769f29c93f7888a7f783f070 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -156,6 +156,8 @@ class Autograd { for (auto it : candidate->pre_ops_) { for (OpBase* pre_op : it.second) { if (!pre_op) continue; + VLOG(5) << "op dep " << candidate->op_desc_->Type() << " <---- " + << it.first << " <---- " << pre_op->op_desc_->Type(); if (visited.find(pre_op) == visited.end()) { visited.insert(pre_op); queue.push_back(pre_op); @@ -204,59 +206,68 @@ framework::LoDTensor& VarBase::GradValue() { } std::map> OpBase::ApplyGrad() { - if (!grad_op_desc_ && backward_id_ <= 0) { + if (grad_op_descs_.empty() && backward_id_ <= 0) { LOG(WARNING) << "op with no grad: " << op_desc_->Type(); return {}; } - std::map> grad_outputs; + std::vector grad_outputs; if (backward_id_ > 0) { VLOG(3) << "py_layer_grad"; - grad_outputs[framework::GradVarName(PyLayer::kFwdOut)] = PyLayer::ApplyGrad( - backward_id_, - grad_input_vars_[framework::GradVarName(PyLayer::kFwdInp)]); + grad_outputs.resize(1); + grad_outputs[0][framework::GradVarName(PyLayer::kFwdOut)] = + PyLayer::ApplyGrad( + backward_id_, + grad_input_vars_[0][framework::GradVarName(PyLayer::kFwdInp)]); } else { - VLOG(3) << "op grad " << grad_op_desc_->Type(); - for (auto it : grad_output_vars_) { - auto& outputs = grad_outputs[it.first]; - for (size_t i = 0; i < it.second.size(); ++i) { - // Allocate a new variable - Variable* tmp_var = new framework::Variable(); - tmp_var->GetMutable(); - outputs.push_back(tmp_var); + grad_outputs.resize(grad_op_descs_.size()); + for (size_t k = 0; k < grad_op_descs_.size(); ++k) { + framework::OpDesc* grad_op_desc = grad_op_descs_[k]; + VLOG(3) << "op grad " << grad_op_desc->Type(); + for (auto it : grad_output_vars_[k]) { + auto& outputs = grad_outputs[k][it.first]; + for (size_t i = 0; i < it.second.size(); ++i) { + // Allocate a new variable + Variable* tmp_var = new framework::Variable(); + tmp_var->GetMutable(); + outputs.push_back(tmp_var); + } } - } - framework::RuntimeContext ctx(grad_input_vars_, grad_outputs); + framework::RuntimeContext ctx(grad_input_vars_[k], grad_outputs[k]); - // No need to do compile time infer shape here. - // grad_op_desc_->InferShape(*block_); - grad_op_desc_->InferVarType(block_); + // No need to do compile time infer shape here. + // grad_op_desc_->InferShape(*block_); + grad_op_desc->InferVarType(block_); - std::unique_ptr opbase = - framework::OpRegistry::CreateOp(*grad_op_desc_); - framework::OperatorWithKernel* op_kernel = - dynamic_cast(opbase.get()); - PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel"); + std::unique_ptr opbase = + framework::OpRegistry::CreateOp(*grad_op_desc); + framework::OperatorWithKernel* op_kernel = + dynamic_cast(opbase.get()); + PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel"); - framework::Scope scope; - PreparedOp p = PreparedOp::Prepare(ctx, *op_kernel, place_); - p.op.RuntimeInferShape(scope, place_, ctx); - p.func(framework::ExecutionContext(p.op, scope, *p.dev_ctx, p.ctx)); + framework::Scope scope; + PreparedOp p = PreparedOp::Prepare(ctx, *op_kernel, place_); + p.op.RuntimeInferShape(scope, place_, ctx); + p.func(framework::ExecutionContext(p.op, scope, *p.dev_ctx, p.ctx)); + } } - for (auto it : grad_output_vars_) { - auto& outputs = grad_outputs[it.first]; - auto& origin_outputs = it.second; - PADDLE_ENFORCE_EQ(outputs.size(), origin_outputs.size()); - - for (size_t i = 0; i < outputs.size(); ++i) { - framework::Variable* grad = outputs[i]; - framework::Variable* orig_grad = origin_outputs[i]; - AddTo(grad, orig_grad, place_); - delete grad; + for (size_t k = 0; k < grad_output_vars_.size(); ++k) { + for (auto it : grad_output_vars_[k]) { + auto& outputs = grad_outputs[k][it.first]; + auto& origin_outputs = it.second; + PADDLE_ENFORCE_EQ(outputs.size(), origin_outputs.size()); + + for (size_t i = 0; i < outputs.size(); ++i) { + framework::Variable* grad = outputs[i]; + framework::Variable* orig_grad = origin_outputs[i]; + AddTo(grad, orig_grad, place_); + delete grad; + } } } + return input_vars_; } diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index 633924aa417b8bd64bf4921054f82fdb7f7868fe..78205486c5534ac0c61cc6d545bdafa4dfc95695 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -28,6 +28,7 @@ #include "paddle/fluid/framework/var_desc.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/imperative/type_defs.h" @@ -140,16 +141,24 @@ class VarBase { void RunBackward(); void TrackPreOp(OpBase* pre_op, const std::string& pre_op_out_name, - int pre_op_out_idx, bool stop_gradient) { + int pre_op_out_idx, bool pre_op_stop_gradient) { pre_op_ = pre_op; pre_op_out_name_ = pre_op_out_name; pre_op_out_idx_ = pre_op_out_idx; - stop_gradient_ = stop_gradient; + if (pre_op_stop_gradient) { + stop_gradient_ = pre_op_stop_gradient; + } } void ClearGradient() { - delete grads_; - grads_ = new VarBase(true); + VLOG(1) << "clear gradient of " << var_desc_->Name(); + if (grads_ && grads_->var_ && grads_->var_->IsInitialized()) { + auto grads_t = grads_->var_->GetMutable(); + operators::math::set_constant( + *(platform::DeviceContextPool::Instance().Get( + grads_->var_->Get().place())), + grads_t, 0.0); + } } framework::LoDTensor& GradValue(); @@ -184,12 +193,13 @@ class OpBase { OpBase() : op_desc_(nullptr), forward_id_(-1), - grad_op_desc_(nullptr), backward_id_(-1), place_(platform::CPUPlace()) {} virtual ~OpBase() { - if (grad_op_desc_) delete grad_op_desc_; + for (framework::OpDesc* desc : grad_op_descs_) { + delete desc; + } } std::map> ApplyGrad(); @@ -198,9 +208,11 @@ class OpBase { // For pure python PyLayer, use `forward_id_`, otherwise, use op_desc_. framework::OpDesc* op_desc_; int forward_id_; - // When has backward, one of `grad_op_desc_` or `backward_id_` is set, + + // When has backward, one of `grad_op_descs_` or `backward_id_` is set, // not both. - framework::OpDesc* grad_op_desc_; + // Note: each fwd op corresponds to a vector of bwd ops. + std::vector grad_op_descs_; int backward_id_; platform::Place place_; @@ -210,8 +222,11 @@ class OpBase { OpBasePtrMap pre_ops_; std::map> pre_ops_out_idx_; - framework::VariableValueMap grad_input_vars_; - framework::VariableValueMap grad_output_vars_; + // Inputs to a vector of bwd ops. + std::vector grad_input_vars_; + // Outputs to a vector of bwd ops. + std::vector grad_output_vars_; + framework::BlockDesc* block_; }; diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index 5b87839f457c24d5d6687a27faac6c0f52f5f90b..bc39d11ba00a6a7c386162a1f9201c6f992c8692 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -24,15 +24,17 @@ namespace imperative { void CreateGradOp(const framework::OpDesc& op_desc, const std::unordered_set& no_grad_set, const std::vector& grad_sub_block, - framework::OpDesc** grad_op_desc, + std::vector* grad_op_descs, std::unordered_map* grad_to_var) { - std::vector> grad_op_descs = + PADDLE_ENFORCE(grad_op_descs->empty()); + std::vector> descs = framework::OpInfoMap::Instance() .Get(op_desc.Type()) .GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block); - PADDLE_ENFORCE(grad_op_descs.size() == 1, "Only support 1 grad op now."); - // TODO(panyx0718): Leak? - *grad_op_desc = grad_op_descs[0].release(); + + for (auto& desc : descs) { + grad_op_descs->emplace_back(desc.release()); + } } void InitVar(framework::Variable* var, framework::Variable* grad_var, @@ -83,11 +85,12 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, op->input_vars_ = inputs; for (auto it : op->input_vars_) { auto& invars = invars_map[it.first]; + invars.reserve(it.second.size()); for (VarBase* inp : it.second) { PADDLE_ENFORCE_NOT_NULL(inp->var_, "op %s input %s nullptr", op->op_desc_->Type(), inp->var_desc_->Name()); - invars.push_back(inp->var_); + invars.emplace_back(inp->var_); vars[inp->var_desc_->Name()] = inp; if (inp->PreOp()) { op->pre_ops_[it.first].push_back(inp->PreOp()); @@ -104,9 +107,10 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, for (auto it : op->output_vars_) { auto& outvars = outvars_map[it.first]; const std::vector& outputs = it.second; + outvars.reserve(outputs.size()); for (size_t i = 0; i < outputs.size(); ++i) { VarBase* out = outputs[i]; - outvars.push_back(out->var_); + outvars.emplace_back(out->var_); vars[out->var_desc_->Name()] = out; framework::VarDesc* var_desc = block->FindVar(out->var_desc_->Name()); @@ -138,49 +142,52 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, prepared_op.op, scope, *prepared_op.dev_ctx, prepared_op.ctx)); if (!stop_gradient) { - framework::OpDesc* grad_op_desc; - // TODO(panyx): Is this leaked? std::unique_ptr> grad_to_var( new std::unordered_map()); - CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var.get()); - op->grad_op_desc_ = grad_op_desc; - - for (auto it : grad_op_desc->Inputs()) { - auto& grad_in_vars = op->grad_input_vars_[it.first]; - for (const std::string& grad_invar : it.second) { - block->FindRecursiveOrCreateVar(grad_invar); - auto var_it = grad_to_var->find(grad_invar); - if (var_it == grad_to_var->end()) { - auto fwd_var_it = vars.find(grad_invar); - PADDLE_ENFORCE(fwd_var_it != vars.end()); - // Forward inputs or outputs. - grad_in_vars.push_back(fwd_var_it->second->var_); - } else { + CreateGradOp(*op_desc, {}, {block}, &op->grad_op_descs_, grad_to_var.get()); + + op->grad_input_vars_.resize(op->grad_op_descs_.size()); + op->grad_output_vars_.resize(op->grad_op_descs_.size()); + for (size_t i = 0; i < op->grad_op_descs_.size(); ++i) { + framework::OpDesc* grad_op_desc = op->grad_op_descs_[i]; + for (auto it : grad_op_desc->Inputs()) { + auto& grad_in_vars = op->grad_input_vars_[i][it.first]; + for (const std::string& grad_invar : it.second) { + block->FindRecursiveOrCreateVar(grad_invar); + auto var_it = grad_to_var->find(grad_invar); + if (var_it == grad_to_var->end()) { + auto fwd_var_it = vars.find(grad_invar); + PADDLE_ENFORCE(fwd_var_it != vars.end()); + // Forward inputs or outputs. + grad_in_vars.push_back(fwd_var_it->second->var_); + } else { + VarBase* var = vars[var_it->second]; + if (!var->grads_->var_->IsInitialized()) { + InitVar(var->var_, var->grads_->var_, + prepared_op.GetDeviceContext()); + } + // Douts. + grad_in_vars.push_back(var->grads_->var_); + } + } + } + + for (auto it : grad_op_desc->Outputs()) { + auto& grad_out_vars = op->grad_output_vars_[i][it.first]; + for (const std::string& grad_outvar : it.second) { + block->FindRecursiveOrCreateVar(grad_outvar); + auto var_it = grad_to_var->find(grad_outvar); + PADDLE_ENFORCE(var_it != grad_to_var->end(), + "Could not found the grad op output var, should this " + "operator %s's stop gradient be True", + op_desc->Type()); VarBase* var = vars[var_it->second]; if (!var->grads_->var_->IsInitialized()) { InitVar(var->var_, var->grads_->var_, prepared_op.GetDeviceContext()); } - // Douts. - grad_in_vars.push_back(var->grads_->var_); - } - } - } - - for (auto it : grad_op_desc->Outputs()) { - auto& grad_out_vars = op->grad_output_vars_[it.first]; - for (const std::string& grad_outvar : it.second) { - block->FindRecursiveOrCreateVar(grad_outvar); - auto var_it = grad_to_var->find(grad_outvar); - PADDLE_ENFORCE(var_it != grad_to_var->end(), - "Could not found the grad op output var, should this " - "operator %s's stop gradient be True", - op_desc->Type()); - VarBase* var = vars[var_it->second]; - if (!var->grads_->var_->IsInitialized()) { - InitVar(var->var_, var->grads_->var_, prepared_op.GetDeviceContext()); + grad_out_vars.push_back(var->grads_->var_); } - grad_out_vars.push_back(var->grads_->var_); } } } @@ -209,10 +216,12 @@ std::vector Tracer::PyTrace(OpBase* op, out->TrackPreOp(op, PyLayer::kFwdOut, i, stop_gradient); } if (!stop_gradient) { + op->grad_input_vars_.resize(1); + op->grad_output_vars_.resize(1); auto& grad_input_vars = - op->grad_input_vars_[framework::GradVarName(PyLayer::kFwdInp)]; + op->grad_input_vars_[0][framework::GradVarName(PyLayer::kFwdInp)]; auto& grad_output_vars = - op->grad_output_vars_[framework::GradVarName(PyLayer::kFwdOut)]; + op->grad_output_vars_[0][framework::GradVarName(PyLayer::kFwdOut)]; for (const VarBase* inp : inputs) { grad_input_vars.push_back(inp->var_); diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index 71c4a54dea08d9d5e53f182949854981fe36a41a..2f31b182af7293488719e41a92b2ea78709bda02 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -28,6 +28,7 @@ #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/inference/api/paddle_analysis_config.h" #include "paddle/fluid/platform/variant.h" namespace paddle { @@ -130,6 +131,8 @@ struct Argument { DECL_ARGUMENT_FIELD(tensorrt_max_batch_size, TensorRtMaxBatchSize, int); DECL_ARGUMENT_FIELD(tensorrt_workspace_size, TensorRtWorkspaceSize, int); DECL_ARGUMENT_FIELD(tensorrt_min_subgraph_size, TensorRtMinSubgraphSize, int); + DECL_ARGUMENT_FIELD(tensorrt_precision_mode, TensorRtPrecisionMode, + AnalysisConfig::Precision); // Memory optimized related. DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool); diff --git a/paddle/fluid/inference/analysis/helper.cc b/paddle/fluid/inference/analysis/helper.cc index ca40c01fc57dbcc2ca16770a1b7d798de8b5625b..4f5c50d0d6b9ac94130cb82fb342ae5ee592f2c0 100644 --- a/paddle/fluid/inference/analysis/helper.cc +++ b/paddle/fluid/inference/analysis/helper.cc @@ -36,6 +36,14 @@ void SetAttr(framework::proto::OpDesc *op, const std::string &name, attr->set_i(data); } template <> +void SetAttr(framework::proto::OpDesc *op, const std::string &name, + const bool &data) { + auto *attr = op->add_attrs(); + attr->set_name(name); + attr->set_type(paddle::framework::proto::AttrType::BOOLEAN); + attr->set_b(data); +} +template <> void SetAttr(framework::proto::OpDesc *op, const std::string &name, const int64_t &data) { auto *attr = op->add_attrs(); diff --git a/paddle/fluid/inference/analysis/helper.h b/paddle/fluid/inference/analysis/helper.h index de04713b531dc421b885473cc8956e8ba6b63574..59107f28080dceb0a58e17d42281db5f3773de56 100644 --- a/paddle/fluid/inference/analysis/helper.h +++ b/paddle/fluid/inference/analysis/helper.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include +#include #include #include #include @@ -29,9 +30,14 @@ limitations under the License. */ #include "paddle/fluid/platform/port.h" #ifdef _WIN32 -#define GCC_ATTRIBUTE(attr__) ; +#include +#include +#define GCC_ATTRIBUTE(attr__) +#define MKDIR(path) _mkdir(path) #else +#include #define GCC_ATTRIBUTE(attr__) __attribute__((attr__)); +#define MKDIR(path) mkdir(path, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) #endif #define __SHOULD_USE_RESULT__ GCC_ATTRIBUTE(warn_unused_result) @@ -163,6 +169,54 @@ static bool PathExists(const std::string &path) { return false; } +static std::string GetDirRoot(const std::string &path) { + char sep = '/'; + +#ifdef _WIN32 + sep = '\\'; +#endif + + size_t i = path.rfind(sep, path.length()); + if (i != std::string::npos) { + return (path.substr(0, i)); + } + return path; +} + +static std::string GetOrCreateModelOptCacheDir(const std::string &model_root) { + std::string opt_cache_dir = model_root + "/_opt_cache/"; + if (!PathExists(opt_cache_dir)) { + PADDLE_ENFORCE(MKDIR(opt_cache_dir.c_str()) != -1, + "Can not create optimize cache directory: %s, Make sure you " + "have permission to write", + opt_cache_dir); + } + return opt_cache_dir; +} + +static std::string GetTrtCalibPath(const std::string &model_root, + const std::string &engine_key) { + return model_root + "/trt_calib_" + engine_key; +} + +// If there is no calib table data file in model_opt_cache_dir, return "". +static std::string GetTrtCalibTableData(const std::string &model_opt_cache_dir, + const std::string &engine_key, + bool enable_int8) { + std::string trt_calib_table_path = + GetTrtCalibPath(model_opt_cache_dir, engine_key); + if (enable_int8 && FileExists(trt_calib_table_path)) { + VLOG(3) << "Calibration table file: " << trt_calib_table_path + << "is found here"; + std::ifstream infile(trt_calib_table_path, std::ios::in); + std::stringstream buffer; + buffer << infile.rdbuf(); + std::string calibration_data(buffer.str()); + return calibration_data; + } + return ""; +} + } // namespace analysis } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index 4e1464226450b833e6d8dae2be2dcad89dd1e5e4..fe3c841186c35ea28c1d44007d91de5b997c1388 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -67,6 +67,20 @@ void IRPassManager::CreatePasses(Argument *argument, pass->Set("max_batch_size", new int(argument->tensorrt_max_batch_size())); pass->Set("min_subgraph_size", new int(argument->tensorrt_min_subgraph_size())); + pass->Set("program", + new framework::ProgramDesc *(&argument->main_program())); + + bool enable_int8 = argument->tensorrt_precision_mode() == + AnalysisConfig::Precision::kInt8; + + pass->Set("enable_int8", new bool(enable_int8)); + std::string model_opt_cache_dir = + argument->Has("model_dir") + ? argument->model_dir() + : GetDirRoot(argument->model_program_path()); + pass->Set( + "model_opt_cache_dir", + new std::string(GetOrCreateModelOptCacheDir(model_opt_cache_dir))); } // graph_ = pass->Apply(std::move(graph_)); @@ -91,11 +105,14 @@ std::unique_ptr IRPassManager::Apply(std::unique_ptr graph) { } framework::proto::ProgramDesc IRPassManager::AcquireProgram( - std::unique_ptr *graph, const ProgramDesc &program) const { + std::unique_ptr *graph, ProgramDesc *program) const { auto pass = framework::ir::PassRegistry::Instance().Get("graph_to_program_pass"); - ProgramDesc desc(program); + // Direct using ProgramDesc desc(argument->main_program()) may cause + // incomplete copies of information. + ProgramDesc desc; + desc.CopyFrom(*program->Proto()); pass->SetNotOwned("program", &desc); auto *the_graph = graph->release(); *graph = pass->Apply(std::unique_ptr(the_graph)); diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.h b/paddle/fluid/inference/analysis/ir_pass_manager.h index 983a582649706fa6eedb5aa459b5ac53b98f658b..2a595cb36b8345157b3fd26afc62aabfa98b87bc 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.h +++ b/paddle/fluid/inference/analysis/ir_pass_manager.h @@ -29,6 +29,7 @@ #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/inference/analysis/argument.h" +#include "paddle/fluid/inference/analysis/helper.h" namespace paddle { namespace inference { @@ -42,8 +43,8 @@ class IRPassManager final { std::unique_ptr Apply(std::unique_ptr graph); - framework::proto::ProgramDesc AcquireProgram( - std::unique_ptr *graph, const ProgramDesc &program) const; + framework::proto::ProgramDesc AcquireProgram(std::unique_ptr *graph, + ProgramDesc *program) const; framework::ir::Graph &graph() const { return *graph_; } diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index 5f25303cc1eaa6b563f0f8f4289b38499eb487cc..69a9caec030600332c9f11ba255e4e642bd41e96 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -13,6 +13,7 @@ // limitations under the License. #include +#include #include #include @@ -67,12 +68,33 @@ std::unique_ptr analysis::TensorRtSubgraphPass::ApplyImpl( return graph; } +std::string GenerateEngineKey(const std::set &engine_inputs, + const std::set &engine_outputs) { + std::string engine_hash_key = ""; + for (auto name : engine_inputs) { + engine_hash_key += name; + } + for (auto name : engine_outputs) { + engine_hash_key += name; + } + auto engine_key = std::to_string(std::hash()(engine_hash_key)); + return engine_key; +} + void TensorRtSubgraphPass::CreateTensorRTOp(framework::ir::Node *node, Graph *graph) const { auto *op_desc = node->Op(); auto &subgraph = *Agent(node).subgraph(); PADDLE_ENFORCE(!subgraph.empty()); + framework::ProgramDesc *program_desc = + Get("program"); + // Add new block for TensorRTEngineOP + const framework::BlockDesc &main_block = + program_desc->Block(framework::kRootBlockIndex); + // const framework::BlockDesc& main_block = program_desc->Block(0); + framework::BlockDesc *new_block = program_desc->AppendBlock(main_block); + // An fake block desc. framework::proto::BlockDesc block_proto; framework::BlockDesc block_desc(nullptr, &block_proto); @@ -82,13 +104,18 @@ void TensorRtSubgraphPass::CreateTensorRTOp(framework::ir::Node *node, subgraph.size()); for (auto *node : subgraph) { + auto *new_block_op = new_block->AppendOp(); auto *op = block_desc.AppendOp(); + *new_block_op->Proto() = *node->Op()->Proto(); *op->Proto() = *node->Op()->Proto(); } - // collect inputs - std::unordered_set input_names; - std::unordered_set input_names_with_id; + // Then, we will use the input_names_with_id and output_names_with_id to + // generate the eigine key. + // So, We use set instead of unordered_set here to ensure that the engine key + // is unique. + std::set input_names; + std::set input_names_with_id; for (auto *x : node->inputs) { input_names.insert(x->Name()); input_names_with_id.insert(x->Name() + std::to_string(x->id())); @@ -96,8 +123,8 @@ void TensorRtSubgraphPass::CreateTensorRTOp(framework::ir::Node *node, op_desc->SetInput( "Xs", std::vector(input_names.begin(), input_names.end())); - std::unordered_set output_names; - std::unordered_set output_names_with_id; + std::set output_names; + std::set output_names_with_id; for (auto *x : node->outputs) { output_names.insert(x->Name()); output_names_with_id.insert(x->Name() + std::to_string(x->id())); @@ -182,7 +209,6 @@ void TensorRtSubgraphPass::CreateTensorRTOp(framework::ir::Node *node, // to Tensor. std::vector output_mapping; for (auto name : output_names) { - // LOG(INFO) << name << " " << output_name_map.size(); PADDLE_ENFORCE(output_name_map.count(name) != 0); output_mapping.push_back(output_name_map[name]); } @@ -193,16 +219,29 @@ void TensorRtSubgraphPass::CreateTensorRTOp(framework::ir::Node *node, *vars->Add() = *node->Var()->Proto(); } } + PADDLE_ENFORCE(!block_desc.Proto()->vars().empty(), "the block has no var-desc"); PADDLE_ENFORCE(!output_mapping.empty()); - // Set attrs + op_desc->SetBlockAttr("sub_block", new_block); SetAttr(op_desc->Proto(), "subgraph", block_desc.Proto()->SerializeAsString()); + // Set attrs SetAttr(op_desc->Proto(), "max_batch_size", Get("max_batch_size")); SetAttr(op_desc->Proto(), "workspace_size", Get("workspace_size")); SetAttr(op_desc->Proto(), "parameters", ExtractParameters(graph->Nodes())); SetAttr(op_desc->Proto(), "output_name_mapping", output_mapping); + + auto enable_int8 = Get("enable_int8"); + auto engine_key = + GenerateEngineKey(input_names_with_id, output_names_with_id); + + std::string calibration_data = GetTrtCalibTableData( + Get("model_opt_cache_dir"), engine_key, enable_int8); + SetAttr(op_desc->Proto(), "calibration_data", calibration_data); + + SetAttr(op_desc->Proto(), "enable_int8", enable_int8); + SetAttr(op_desc->Proto(), "engine_key", engine_key); } std::vector ExtractParameters( diff --git a/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.cc b/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.cc index f1da37af3cc5fa55eb66a1822aefe96eda1dc4fb..6b3d80fcef0be1527062edbb37ea39cc5d95a168 100644 --- a/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.cc +++ b/paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.cc @@ -31,7 +31,11 @@ void IrGraphToProgramPass::RunImpl(Argument *argument) { } std::unique_ptr graph(argument->main_graph_ptr()); - framework::ProgramDesc desc(argument->main_program()); + + // Direct using ProgramDesc desc(argument->main_program()) may cause + // incomplete copies of information. + framework::ProgramDesc desc; + desc.CopyFrom(*argument->main_program().Proto()); pass->SetNotOwned("program", &desc); auto thegraph = pass->Apply(std::move(graph)); thegraph.release(); // the argument still own the graph. diff --git a/paddle/fluid/inference/analysis/passes/memory_optimize_pass.h b/paddle/fluid/inference/analysis/passes/memory_optimize_pass.h index 216f416de0d1003b944337ee98fb4e6a22c66fc5..2da565f2ae15a50a207173b10d4c350456086582 100644 --- a/paddle/fluid/inference/analysis/passes/memory_optimize_pass.h +++ b/paddle/fluid/inference/analysis/passes/memory_optimize_pass.h @@ -13,7 +13,9 @@ // limitations under the License. #pragma once - +#include +#include +#include #include "paddle/fluid/inference/analysis/analysis_pass.h" #include "paddle/fluid/platform/port.h" diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index e6008ba335ed89222247fc00033d1afbd6b28f16..eecab238a88e90399eb70f17caa57633af4e2a69 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -22,7 +22,7 @@ namespace paddle { -PassStrategy *contrib::AnalysisConfig::pass_builder() const { +PassStrategy *AnalysisConfig::pass_builder() const { if (!pass_builder_.get()) { if (use_gpu_) { LOG(INFO) << "Create GPU IR passes"; @@ -42,27 +42,27 @@ PassStrategy *contrib::AnalysisConfig::pass_builder() const { return pass_builder_.get(); } -contrib::AnalysisConfig::AnalysisConfig(const std::string &model_dir) { +AnalysisConfig::AnalysisConfig(const std::string &model_dir) { model_dir_ = model_dir; Update(); } -contrib::AnalysisConfig::AnalysisConfig(const std::string &prog_file, - const std::string ¶ms_file) { +AnalysisConfig::AnalysisConfig(const std::string &prog_file, + const std::string ¶ms_file) { prog_file_ = prog_file; params_file_ = params_file; Update(); } -void contrib::AnalysisConfig::SetModel(const std::string &prog_file_path, - const std::string ¶ms_file_path) { +void AnalysisConfig::SetModel(const std::string &prog_file_path, + const std::string ¶ms_file_path) { prog_file_ = prog_file_path; params_file_ = params_file_path; Update(); } -void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb, - int device_id) { +void AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb, + int device_id) { #ifdef PADDLE_WITH_CUDA use_gpu_ = true; memory_pool_init_size_mb_ = memory_pool_init_size_mb; @@ -74,13 +74,13 @@ void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb, Update(); } -void contrib::AnalysisConfig::DisableGpu() { +void AnalysisConfig::DisableGpu() { use_gpu_ = false; Update(); } -contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) { +AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { #define CP_MEMBER(member__) member__ = other.member__; // Model related. @@ -102,6 +102,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) { CP_MEMBER(tensorrt_workspace_size_); CP_MEMBER(tensorrt_max_batchsize_); CP_MEMBER(tensorrt_min_subgraph_size_); + CP_MEMBER(tensorrt_precision_mode_); // MKLDNN releated. CP_MEMBER(use_mkldnn_); CP_MEMBER(mkldnn_enabled_op_types_); @@ -129,7 +130,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) { Update(); } -void contrib::AnalysisConfig::EnableMKLDNN() { +void AnalysisConfig::EnableMKLDNN() { #ifdef PADDLE_WITH_MKLDNN pass_builder()->EnableMKLDNN(); use_mkldnn_ = true; @@ -141,9 +142,9 @@ void contrib::AnalysisConfig::EnableMKLDNN() { Update(); } -void contrib::AnalysisConfig::EnableTensorRtEngine(int workspace_size, - int max_batch_size, - int min_subgraph_size) { +void AnalysisConfig::EnableTensorRtEngine( + int workspace_size, int max_batch_size, int min_subgraph_size, + AnalysisConfig::Precision precision_mode) { #ifdef PADDLE_WITH_CUDA if (!use_gpu()) { LOG(ERROR) << "To use TensorRT engine, please call EnableGpu() first"; @@ -154,6 +155,7 @@ void contrib::AnalysisConfig::EnableTensorRtEngine(int workspace_size, tensorrt_workspace_size_ = workspace_size; tensorrt_max_batchsize_ = max_batch_size; tensorrt_min_subgraph_size_ = min_subgraph_size; + tensorrt_precision_mode_ = precision_mode; Update(); #else @@ -163,7 +165,7 @@ void contrib::AnalysisConfig::EnableTensorRtEngine(int workspace_size, } // TODO(Superjomn) refactor this, buggy. -void contrib::AnalysisConfig::Update() { +void AnalysisConfig::Update() { auto info = SerializeInfoCache(); if (info == serialized_info_cache_) return; @@ -223,7 +225,7 @@ void contrib::AnalysisConfig::Update() { } } -std::string contrib::AnalysisConfig::SerializeInfoCache() { +std::string AnalysisConfig::SerializeInfoCache() { std::stringstream ss; ss << model_dir_; ss << prog_file_; @@ -258,14 +260,14 @@ std::string contrib::AnalysisConfig::SerializeInfoCache() { return ss.str(); } -void contrib::AnalysisConfig::SetCpuMathLibraryNumThreads( +void AnalysisConfig::SetCpuMathLibraryNumThreads( int cpu_math_library_num_threads) { cpu_math_library_num_threads_ = cpu_math_library_num_threads; Update(); } -float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const { +float AnalysisConfig::fraction_of_gpu_memory_for_pool() const { #ifdef PADDLE_WITH_CUDA // Get the GPU memory details and calculate the fraction of memory for the // GPU memory pool. @@ -280,8 +282,8 @@ float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const { #endif } -void contrib::AnalysisConfig::EnableMemoryOptim( - bool static_optim, bool force_update_static_cache) { +void AnalysisConfig::EnableMemoryOptim(bool static_optim, + bool force_update_static_cache) { enable_memory_optim_ = true; static_memory_optim_ = static_optim; static_memory_optim_force_update_ = force_update_static_cache; @@ -289,14 +291,14 @@ void contrib::AnalysisConfig::EnableMemoryOptim( Update(); } -bool contrib::AnalysisConfig::enable_memory_optim() const { +bool AnalysisConfig::enable_memory_optim() const { return enable_memory_optim_; } -void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer, - size_t prog_buffer_size, - const char *param_buffer, - size_t param_buffer_size) { +void AnalysisConfig::SetModelBuffer(const char *prog_buffer, + size_t prog_buffer_size, + const char *param_buffer, + size_t param_buffer_size) { prog_file_ = std::string(prog_buffer, prog_buffer + prog_buffer_size); params_file_ = std::string(param_buffer, param_buffer + param_buffer_size); model_from_memory_ = true; @@ -304,7 +306,7 @@ void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer, Update(); } -NativeConfig contrib::AnalysisConfig::ToNativeConfig() const { +NativeConfig AnalysisConfig::ToNativeConfig() const { NativeConfig config; config.model_dir = model_dir_; config.prog_file = prog_file_; diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 9f8a78f7abc37d17b9806ea766da132f9bf4b28d..14d6ba8c56dc3fe04e27bccadd5a5155547398a4 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/inference/api/analysis_predictor.h" #include #include +#include #include #include #include @@ -25,6 +26,7 @@ #include "paddle/fluid/framework/naive_executor.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/var_type_traits.h" +#include "paddle/fluid/inference/analysis/helper.h" #include "paddle/fluid/inference/analysis/passes/memory_optimize_pass.h" #include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" @@ -37,13 +39,20 @@ #if PADDLE_WITH_TENSORRT #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h" + #endif DECLARE_bool(profile); namespace paddle { -using contrib::AnalysisConfig; +using inference::Singleton; +#if PADDLE_WITH_TENSORRT +using inference::tensorrt::TRTInt8Calibrator; +using inference::tensorrt::TRTCalibratorEngine; +using inference::tensorrt::TRTCalibratorEngineManager; +#endif namespace { bool IsPersistable(const framework::VarDesc *var) { @@ -113,6 +122,15 @@ bool AnalysisPredictor::PrepareProgram( if (!program) { if (!LoadProgramDesc()) return false; + // If not cloned, the parameters should be loaded. + // If config_.ir_optim() is True, parameters is loaded in + // OptimizeInferenceProgram(), but other persistable variables + // (like RAW type var) are not created in scope. + // If config_.ir_optim() is False, parameters is loaded in LoadParameters(), + // still need to create other persistable variables. + // So in both case, create persistable variables at first. + executor_->CreateVariables(*inference_program_, 0, true, sub_scope_); + // Optimize the program, and load parameters and modify them in the // scope_. // This will change the scope_ address. @@ -120,15 +138,6 @@ bool AnalysisPredictor::PrepareProgram( status_ir_optim_enabled_ = true; OptimizeInferenceProgram(); } else { - // If the parent_scope is passed, we assert that the persistable variables - // are already created, so just create the no persistable variables. - - // If not cloned, the parameters should be loaded - // OptimizeInferenceProgram. - // So in both cases, just the local variables are needed to load, not the - // parematers. - executor_->CreateVariables(*inference_program_, 0, true, sub_scope_); - // Load parameters LOG(INFO) << "load parameters "; LoadParameters(); @@ -339,6 +348,8 @@ void AnalysisPredictor::OptimizeInferenceProgram() { !config_.params_file().empty(), "Either model_dir or (param_file, prog_file) should be set."); PADDLE_ENFORCE(!config_.prog_file().empty()); + std::string dir = inference::analysis::GetDirRoot(config_.prog_file()); + argument_.SetModelProgramPath(config_.prog_file()); argument_.SetModelParamsPath(config_.params_file()); } @@ -349,6 +360,7 @@ void AnalysisPredictor::OptimizeInferenceProgram() { argument_.SetTensorRtWorkspaceSize(config_.tensorrt_workspace_size_); argument_.SetTensorRtMaxBatchSize(config_.tensorrt_max_batchsize_); argument_.SetTensorRtMinSubgraphSize(config_.tensorrt_min_subgraph_size_); + argument_.SetTensorRtPrecisionMode(config_.tensorrt_precision_mode_); } if (config_.use_mkldnn_) { @@ -363,7 +375,7 @@ void AnalysisPredictor::OptimizeInferenceProgram() { } argument_.SetIrAnalysisPasses(passes); argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses()); - argument_.SetScopeNotOwned(const_cast(scope_.get())); + argument_.SetScopeNotOwned(scope_.get()); Analyzer().Run(&argument_); PADDLE_ENFORCE(argument_.scope_valid()); @@ -569,7 +581,67 @@ bool AnalysisPredictor::LoadParameters() { return true; } +#if PADDLE_WITH_TENSORRT +bool AnalysisPredictor::SaveTrtCalibToDisk() { + PADDLE_ENFORCE(config_.tensorrt_engine_enabled(), + "This func can be invoked only in trt mode"); + auto &block = inference_program_->Block(0); + for (auto &op_desc : block.AllOps()) { + if (op_desc->Type() == "tensorrt_engine") { + std::string engine_name = + boost::get(op_desc->GetAttr("engine_key")); + if (!Singleton::Global().Has(engine_name)) { + LOG(ERROR) << "You should run the predictor(with trt) on the real data " + "to generate calibration info"; + return false; + } + TRTCalibratorEngine *calib_engine = + Singleton::Global().Get(engine_name); + LOG(INFO) << "Wait for calib threads done."; + calib_engine->calib_->waitAndSetDone(); + LOG(INFO) << "Generating TRT Calibration table data, this may cost a lot " + "of time..."; + calib_engine->thr_->join(); + std::string calibration_table_data = + calib_engine->calib_->getCalibrationTableAsString(); + + if (calibration_table_data.empty()) { + LOG(ERROR) << "the calibration table is empty."; + return false; + } + + std::string model_opt_cache_dir = + argument_.Has("model_dir") + ? argument_.model_dir() + : inference::analysis::GetDirRoot(argument_.model_program_path()); + + std::string calibration_table_data_path = + inference::analysis::GetTrtCalibPath( + inference::analysis::GetOrCreateModelOptCacheDir( + model_opt_cache_dir), + engine_name); + + std::ofstream ofile(calibration_table_data_path, std::ios::out); + LOG(INFO) << "Write Paddle-TRT INT8 calibration table data to file " + << calibration_table_data_path; + ofile << calibration_table_data; + ofile.close(); + } + } + // Free all calibrator resources. + Singleton::Global().DeleteALL(); + return true; +} +#endif + AnalysisPredictor::~AnalysisPredictor() { +#if PADDLE_WITH_TENSORRT + if (config_.tensorrt_engine_enabled() && + config_.tensorrt_precision_mode_ == AnalysisConfig::Precision::kInt8 && + Singleton::Global().Has()) { + SaveTrtCalibToDisk(); + } +#endif if (FLAGS_profile) { platform::DisableProfiler(platform::EventSortingKey::kTotal, "./profile.log"); @@ -653,11 +725,15 @@ bool AnalysisPredictor::need_collect_var_shapes_for_memory_optim() { return need; } +std::string AnalysisPredictor::GetSeriazlizedProgram() const { + return inference_program_->Proto()->SerializeAsString(); +} + template <> -std::unique_ptr CreatePaddlePredictor( - const contrib::AnalysisConfig &config) { - return CreatePaddlePredictor(config); +std::unique_ptr CreatePaddlePredictor( + const AnalysisConfig &config) { + return CreatePaddlePredictor( + config); } } // namespace paddle diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index a8ea67d4bd332b5614f4f6593e8397829d28c5a6..014df4ee8b6d86232212736c43a9aff32ffee011 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -33,7 +33,6 @@ using inference::analysis::Argument; using inference::analysis::Analyzer; using framework::proto::ProgramDesc; using framework::NaiveExecutor; -using contrib::AnalysisConfig; /** \brief This predictor is based on the original native predictor with IR and * Analysis support. @@ -75,6 +74,8 @@ class AnalysisPredictor : public PaddlePredictor { void SetMkldnnThreadID(int tid); + std::string GetSeriazlizedProgram() const override; + protected: // For memory optimization. bool need_collect_var_shapes_for_memory_optim(); @@ -97,6 +98,21 @@ class AnalysisPredictor : public PaddlePredictor { void GetFetchOne(const framework::LoDTensor &fetchs, PaddleTensor *output_data); +#if PADDLE_WITH_TENSORRT + // When we use Paddle-TRT INT8 engine, we need to generate calibration table + // data first, + // the calibration table contains the range for each op's input and output, + // this whole process can be divided into several steps: + // + // 1. Builds a 32-bit engine, runs it on the calibration set, and records a + // histogram for each + // tensor of the distribution of activation values. + // 2. Builds a calibration table from the histograms. + // + // After step 2, we need to store the calibration table on disk + bool SaveTrtCalibToDisk(); +#endif + // Some more detailed tests, they are made the friends of the predictor, so that // the all the details can be tested. #if PADDLE_WITH_TESTING @@ -106,7 +122,7 @@ class AnalysisPredictor : public PaddlePredictor { #endif private: - contrib::AnalysisConfig config_; + AnalysisConfig config_; Argument argument_; std::unique_ptr executor_; platform::Place place_; diff --git a/paddle/fluid/inference/api/analysis_predictor_tester.cc b/paddle/fluid/inference/api/analysis_predictor_tester.cc index 4688e93d7102109d2c7ece9ba37bc8f2d311dcf1..6d11b461082d0ed8ba08c9e280bba86737b86e71 100644 --- a/paddle/fluid/inference/api/analysis_predictor_tester.cc +++ b/paddle/fluid/inference/api/analysis_predictor_tester.cc @@ -24,7 +24,6 @@ DEFINE_string(dirname, "", "dirname to tests."); namespace paddle { -using contrib::AnalysisConfig; TEST(AnalysisPredictor, analysis_off) { AnalysisConfig config; @@ -215,6 +214,8 @@ TEST(AnalysisPredictor, memory_optim) { { // The first predictor help to cache the memory optimize strategy. auto predictor = CreatePaddlePredictor(config); + LOG(INFO) << "serialized program: " << predictor->GetSeriazlizedProgram(); + ASSERT_FALSE(predictor->GetSeriazlizedProgram().empty()); // Run several times to check the parameters are not reused by mistake. for (int i = 0; i < 5; i++) { diff --git a/paddle/fluid/inference/api/api.cc b/paddle/fluid/inference/api/api.cc index 9be059c73e20ebeeff2c4b6e8e5502e4a56fd0d6..6cd18277d63200f5bccf180a7ae3196b0ce126ff 100644 --- a/paddle/fluid/inference/api/api.cc +++ b/paddle/fluid/inference/api/api.cc @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include +#include "paddle/fluid/framework/commit.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" @@ -97,4 +99,12 @@ void PaddleBuf::Free() { } } +std::string get_version() { + std::stringstream ss; + ss << "version: " << framework::paddle_version() << "\n"; + ss << "commit: " << framework::paddle_commit() << "\n"; + ss << "branch: " << framework::paddle_compile_branch() << "\n"; + return ss.str(); +} + } // namespace paddle diff --git a/paddle/fluid/inference/api/api_impl_tester.cc b/paddle/fluid/inference/api/api_impl_tester.cc index 54895679ca37362c7267677af80274b8de95e296..e82cb53bf073d3d1ab9a518218edaf430728463f 100644 --- a/paddle/fluid/inference/api/api_impl_tester.cc +++ b/paddle/fluid/inference/api/api_impl_tester.cc @@ -295,7 +295,7 @@ TEST(inference_api_native, image_classification_gpu) { #endif TEST(PassBuilder, Delete) { - contrib::AnalysisConfig config; + AnalysisConfig config; config.DisableGpu(); config.pass_builder()->DeletePass("attention_lstm_fuse_pass"); const auto& passes = config.pass_builder()->AllPasses(); diff --git a/paddle/fluid/inference/api/api_tester.cc b/paddle/fluid/inference/api/api_tester.cc index 7a579610eefda24c911edd28b5f3a178aa10ab1e..2c450ef7cead4d5c3870d5e9186eb221e5dc19a0 100644 --- a/paddle/fluid/inference/api/api_tester.cc +++ b/paddle/fluid/inference/api/api_tester.cc @@ -61,4 +61,10 @@ TEST(paddle_inference_api, demo) { predictor->Run({}, &outputs); } +TEST(paddle_inference_api, get_version) { + LOG(INFO) << "paddle version:\n" << get_version(); + auto version = get_version(); + ASSERT_FALSE(version.empty()); +} + } // namespace paddle diff --git a/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc b/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc index 338a0cec161f352781f132aea71dd56f68840c62..f7da55c9ae368763786c1b1fd3e86d942c5e9fe8 100644 --- a/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc +++ b/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc @@ -36,7 +36,7 @@ namespace demo { */ void Main() { std::unique_ptr predictor; - paddle::contrib::AnalysisConfig config; + paddle::AnalysisConfig config; config.EnableUseGpu(100, 0); config.SetModel(FLAGS_modeldir + "/__model__", FLAGS_modeldir + "/__params__"); diff --git a/paddle/fluid/inference/api/demo_ci/vis_demo.cc b/paddle/fluid/inference/api/demo_ci/vis_demo.cc index 5320992b7e78f4aa0ea8950af03038c1953dd027..0d2c418c56db620c71d99b64ee79b18be427cc34 100644 --- a/paddle/fluid/inference/api/demo_ci/vis_demo.cc +++ b/paddle/fluid/inference/api/demo_ci/vis_demo.cc @@ -34,7 +34,6 @@ DEFINE_bool(use_gpu, false, "Whether use gpu."); namespace paddle { namespace demo { -using contrib::AnalysisConfig; /* * Use the native and analysis fluid engine to inference the demo. */ diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index f89eaeaadcc50fd7979d6807e8f2c7556e048e6c..9d9ed6a39d8324002a8850deae9bb8dd5af7ef9b 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -29,11 +29,6 @@ namespace paddle { class AnalysisPredictor; -// == -// -// ----------------------------------------------------------------------------------- -// NOTE: The following APIs are not mature yet, we are still working on them. -namespace contrib { // NOTE WIP, not stable yet. struct AnalysisConfig { @@ -42,6 +37,10 @@ struct AnalysisConfig { explicit AnalysisConfig(const std::string& model_dir); explicit AnalysisConfig(const std::string& prog_file, const std::string& params_file); + enum class Precision { + kFloat32 = 0, + kInt8, + }; /** Set model with a directory. */ @@ -135,7 +134,8 @@ struct AnalysisConfig { * subgraph is less than this, it will not transfer to TensorRT engine. */ void EnableTensorRtEngine(int workspace_size = 1 << 20, - int max_batch_size = 1, int min_subgraph_size = 3); + int max_batch_size = 1, int min_subgraph_size = 3, + Precision precision = Precision::kFloat32); /** A boolean state telling whether the TensorRT engine is used. */ bool tensorrt_engine_enabled() const { return use_tensorrt_; } @@ -229,6 +229,7 @@ struct AnalysisConfig { // We set this variable to control the minimum number of nodes in the // subgraph, 3 as default value. int tensorrt_min_subgraph_size_{3}; + Precision tensorrt_precision_mode_; // memory reuse related. bool enable_memory_optim_{false}; @@ -254,5 +255,4 @@ struct AnalysisConfig { mutable std::unique_ptr pass_builder_; }; -} // namespace contrib } // namespace paddle diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index 46b510fd1ec94c59032b8f41a2ac4d6aa87dc150..8ac8bc529183edc2f8f888ca7ba14611acaadc10 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -215,6 +215,14 @@ class PaddlePredictor { */ virtual ~PaddlePredictor() = default; + /** \brief Get the serialized model program that executes in inference phase. + * Its data type is ProgramDesc, which is a protobuf message. + */ + virtual std::string GetSeriazlizedProgram() const { + assert(false); // Force raise error. + return "NotImplemented"; + } + /** The common configs for all the predictors. */ struct Config { @@ -288,4 +296,6 @@ std::unique_ptr CreatePaddlePredictor(const ConfigT& config); int PaddleDtypeSize(PaddleDType dtype); +std::string get_version(); + } // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/CMakeLists.txt b/paddle/fluid/inference/tensorrt/CMakeLists.txt index 9afeafd176c70bc03166ec7732ae5e2faf67ea54..f4977d08c4d051b8a528e122c47948c3c81d153c 100644 --- a/paddle/fluid/inference/tensorrt/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/CMakeLists.txt @@ -1,4 +1,4 @@ -nv_library(tensorrt_engine SRCS engine.cc DEPS ${GLOB_OPERATOR_DEPS} framework_proto device_context) +nv_library(tensorrt_engine SRCS engine.cc trt_int8_calibrator.cc DEPS ${GLOB_OPERATOR_DEPS} framework_proto device_context) nv_library(tensorrt_op_teller SRCS op_teller.cc DEPS framework_proto) nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) nv_test(test_tensorrt_engine SRCS test_engine.cc DEPS dynload_cuda tensorrt_engine) diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 78b590f15d639f7b21b403413760948c6343d998..10f48462cfaf8073a4f5537d654d614d36b74db4 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -69,6 +69,13 @@ void TensorRTEngine::FreezeNetwork() { // build engine. infer_builder_->setMaxBatchSize(max_batch_); infer_builder_->setMaxWorkspaceSize(max_workspace_); + if (enable_int8_) { + infer_builder_->setInt8Mode(true); + PADDLE_ENFORCE( + calibrator_ != nullptr, + "The precision mode is 'INT8', the calibrator should not be nullptr"); + infer_builder_->setInt8Calibrator(calibrator_); + } infer_engine_.reset(infer_builder_->buildCudaEngine(*infer_network_)); PADDLE_ENFORCE(infer_engine_ != nullptr, "build cuda engine failed!"); diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index 65ab7f3caaa746cf339de67706939070a0b7d87d..cdfe09b5a7fd2d1f8548dab9421f671f5a345153 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -23,12 +23,14 @@ limitations under the License. */ #include "paddle/fluid/inference/engine.h" #include "paddle/fluid/inference/tensorrt/helper.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin.h" +#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h" #include "paddle/fluid/inference/utils/singleton.h" namespace paddle { namespace inference { namespace tensorrt { +class TRTInt8Calibrator; /* * TensorRT Engine. * @@ -55,13 +57,16 @@ class TensorRTEngine : public EngineBase { }; TensorRTEngine(int max_batch, int max_workspace, cudaStream_t stream, - int device = 0, + int device = 0, bool enable_int8 = false, + TRTInt8Calibrator* calibrator = nullptr, nvinfer1::ILogger& logger = NaiveLogger::Global()) : max_batch_(max_batch), max_workspace_(max_workspace), stream_(stream), - logger_(logger), - device_(device) {} + device_(device), + enable_int8_(enable_int8), + calibrator_(calibrator), + logger_(logger) {} virtual ~TensorRTEngine(); @@ -139,8 +144,8 @@ class TensorRTEngine : public EngineBase { // In the normal case, the paddle-trt exists bug when runing the googlenet. // When there are more than two convolutions of 1 * 1 with the same input, the // paddle-tensorrt will do the merging optimization, which fuse those conv - // into - // one conv, and then trigger bug. So, We should use strategy to avoid this + // into one conv, and then trigger bug. So, We should use strategy to avoid + // this // optimization for the time being. This bug will be fixed in the future. std::unordered_map itensor_quote_num; @@ -153,9 +158,14 @@ class TensorRTEngine : public EngineBase { // the max memory size the engine uses int max_workspace_; + cudaStream_t stream_; + // The specific GPU id that the TensorRTEngine bounded to. + int device_; + + bool enable_int8_; + TRTInt8Calibrator* calibrator_; // batch size of the current data, will be updated each Executation. int batch_size_{-1}; - cudaStream_t stream_; nvinfer1::ILogger& logger_; @@ -165,8 +175,6 @@ class TensorRTEngine : public EngineBase { std::unordered_map itensor_map_; - // The specific GPU id that the TensorRTEngine bounded to. - int device_; std::vector> owned_plugin_; // TensorRT related internal members diff --git a/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc b/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc new file mode 100644 index 0000000000000000000000000000000000000000..4a85c8b8fe6d70052edd3be59f98582c9b2e86b9 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/trt_int8_calibrator.cc @@ -0,0 +1,147 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h" +#include "glog/logging.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +// set the batch size before constructing the thread to execute engine +int TRTInt8Calibrator::getBatchSize() const { return batch_size_; } + +TRTInt8Calibrator::TRTInt8Calibrator( + const std::unordered_map& buffers, int batch_size, + std::string engine_name, const platform::Place place) + : batch_size_(batch_size), engine_name_(engine_name) { + int i = 0; + VLOG(4) << "Init a new calibrator: " << engine_name_; + for (const auto it : buffers) { + framework::Tensor temp_tensor; + std::string input_name = it.first; + int data_size = it.second; + int num_ele = data_size / sizeof(int16_t); + framework::DDim data_shape = framework::make_ddim({num_ele}); + temp_tensor.Resize(data_shape); + data_tensors_.push_back(temp_tensor); + data_buffers_[input_name] = std::pair( + static_cast(temp_tensor.mutable_data(place)), num_ele); + i += 1; + } +} + +TRTInt8Calibrator::TRTInt8Calibrator(const std::string& calib_data) + : batch_size_(0), + calib_running_(false), + data_is_set_(false), + done_(true), + calibration_table_(calib_data) {} + +void TRTInt8Calibrator::waitAndSetDone() { + std::unique_lock lk(mut_); + while ((calib_running_ || data_is_set_) && !done_) cond_.wait(lk); + if (!done_) { + done_ = true; + cond_.notify_all(); + } +} + +// There might be more than one input for trt subgraph, +// So, we use a map to store input information. +bool TRTInt8Calibrator::setBatch( + const std::unordered_map& data) { + VLOG(3) << "set batch: " << engine_name_; + std::unique_lock lk(mut_); + // There is a producer and a consumer. The producer set the batch data and + // the consumer get the batch data. The size of the data pool is one. + // So, the producer has to wait for the consumer to finish processing before + // they can set the data. + while ((calib_running_ || data_is_set_) && (!done_)) cond_.wait(lk); + // The done_ is set to true using waitAndSetDone, When all calibration data + // are processed. + if (done_) return false; + + // Sets the batch. + for (const auto& it : data) { + auto dataptr = data_buffers_.find(it.first); + if (dataptr == data_buffers_.end()) { + LOG(FATAL) << "FATAL " << engine_name_ << " input name '" << it.first + << "' does not match with the buffer names"; + } + const auto& d = dataptr->second; + PADDLE_ENFORCE( + cudaMemcpy(d.first, it.second, d.second, cudaMemcpyDeviceToDevice), + "Fail to cudaMemcpy %s for %s", engine_name_, it.first); + } + + data_is_set_ = true; + cond_.notify_all(); + return true; +} + +bool TRTInt8Calibrator::getBatch(void** bindings, const char** names, + int num_bindings) { + VLOG(4) << "get batch: " << engine_name_; + std::unique_lock lk(mut_); + // The consumer has just finished processing a data. + // The producer can set the data again. + calib_running_ = false; + cond_.notify_all(); + + // As long as there is data in the pool, the consumer can get it. + while (!data_is_set_ && !done_) cond_.wait(lk); + if (done_) return false; + + // Gets the batch + for (int i = 0; i < num_bindings; i++) { + auto it = data_buffers_.find(names[i]); + if (it == data_buffers_.end()) { + LOG(FATAL) << "Calibration engine asked for unknown tensor name '" + << names[i] << "' at position " << i; + } + bindings[i] = it->second.first; + } + + data_is_set_ = false; + calib_running_ = true; + VLOG(4) << "get batch done: " << engine_name_; + return true; +} + +void TRTInt8Calibrator::setDone() { + std::unique_lock lk(mut_); + done_ = true; + cond_.notify_all(); +} + +const void* TRTInt8Calibrator::readCalibrationCache(size_t& length) { + if (calibration_table_.empty()) return nullptr; + length = calibration_table_.size(); + return calibration_table_.data(); +} + +void TRTInt8Calibrator::writeCalibrationCache(const void* ptr, + std::size_t length) { + calibration_table_ = std::string((const char*)ptr, length); + VLOG(4) << "Got calibration data for " << engine_name_ << " " << ptr + << " length=" << length; +} +TRTInt8Calibrator::~TRTInt8Calibrator() { + VLOG(4) << "Destroying calibrator for " << engine_name_; +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/trt_int8_calibrator.h b/paddle/fluid/inference/tensorrt/trt_int8_calibrator.h new file mode 100644 index 0000000000000000000000000000000000000000..5815bc9a1464293e0a56f05e34183580eac96cea --- /dev/null +++ b/paddle/fluid/inference/tensorrt/trt_int8_calibrator.h @@ -0,0 +1,128 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include // NOLINT +#include +#include +#include +#include +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/inference/tensorrt/engine.h" +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +class TensorRTEngine; + +struct TRTInt8Calibrator : public nvinfer1::IInt8EntropyCalibrator { + public: + TRTInt8Calibrator(const std::unordered_map& buffers, + int batch_size, std::string engine_name, + const platform::Place place); + + explicit TRTInt8Calibrator(const std::string& calibration_data); + ~TRTInt8Calibrator(); + + int getBatchSize() const override; + + bool getBatch(void* bindings[], const char* names[], + int num_bindings) override; + + bool setBatch(const std::unordered_map& data); + void setDone(); + void waitAndSetDone(); + + const void* readCalibrationCache(std::size_t& length) override; + void writeCalibrationCache(const void* ptr, std::size_t length) override; + const std::string& getCalibrationTableAsString() { + return calibration_table_; + } + + private: + const int batch_size_; + + bool calib_running_{true}; + bool data_is_set_{false}; + bool done_{false}; + + std::mutex mut_; + std::condition_variable cond_; + + std::unordered_map> data_buffers_; + std::vector data_tensors_; + + std::string engine_name_; + std::string calibration_table_; +}; + +class TRTCalibratorEngine { + public: + TRTCalibratorEngine() {} + std::unique_ptr calib_; + std::unique_ptr thr_; + std::unique_ptr engine_; +}; +/* + * Manager to control the TensorRT Int8 calibration creation and deltetion. + */ +class TRTCalibratorEngineManager { + public: + bool Has() const { return res_.size() > 0; } + bool Has(const std::string& name) const { + if (res_.count(name) == 0) return false; + return res_.at(name).get() != nullptr; + } + + // Get Int8Calibrator via name + TRTCalibratorEngine* Get(const std::string& name) const { + return res_.at(name).get(); + } + + // Look up or create a calibrator. + TRTCalibratorEngine* LookupOrCreate(const std::string& engine_name) { + if (res_.count(engine_name) == 0) { + auto* p = new TRTCalibratorEngine; + res_[engine_name].reset(p); + } + return res_.at(engine_name).get(); + } + + // Create an Int8Calibrator + TRTCalibratorEngine* Create(const std::string& engine_name) { + auto* p = new TRTCalibratorEngine; + res_[engine_name].reset(p); + return p; + } + + void DeleteALL() { + for (auto& item : res_) { + item.second.reset(nullptr); + } + } + + private: + std::unordered_map> res_; +}; + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 423c39813f05af0d6aaade184914e6777c9b8a83..aa3da397ff67dd06dd750d336a49056baedaaab6 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -54,6 +54,7 @@ else() message(WARNING "These tests has been disabled in OSX or WITH_MKL=OFF before being fixed: \n test_analyzer_seq_pool1") endif() + # RNN2 set(RNN2_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/rnn2") download_model_and_data(${RNN2_INSTALL_DIR} "rnn2_model.tar.gz" "rnn2_data.txt.tar.gz") @@ -115,6 +116,10 @@ if (NOT EXISTS ${MOBILENET_INSTALL_DIR}) endif() inference_analysis_api_test_with_refer_result(test_analyzer_mobilenet_transpose ${MOBILENET_INSTALL_DIR} analyzer_vis_tester.cc SERIAL) +# googlenet +inference_analysis_api_test_with_fake_data(test_analyzer_googlenet + "${INFERENCE_DEMO_INSTALL_DIR}/googlenet" analyzer_resnet50_tester.cc "googlenet.tar.gz" SERIAL) + # resnet50 inference_analysis_api_test_with_fake_data(test_analyzer_resnet50 "${INFERENCE_DEMO_INSTALL_DIR}/resnet50" analyzer_resnet50_tester.cc "resnet50_model.tar.gz" SERIAL) @@ -123,6 +128,11 @@ inference_analysis_api_test_with_fake_data(test_analyzer_resnet50 inference_analysis_api_test_with_fake_data(test_analyzer_mobilenet_depthwise_conv "${INFERENCE_DEMO_INSTALL_DIR}/mobilenet_depthwise_conv" analyzer_resnet50_tester.cc "mobilenet_model.tar.gz" SERIAL) +# bert, max_len=20 +set(BERT_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/bert20") +download_model_and_data(${BERT_INSTALL_DIR} "bert_model.tar.gz" "bert_data_len20.txt.tar.gz") +inference_analysis_api_test(test_analyzer_bert ${BERT_INSTALL_DIR} analyzer_bert_tester.cc SERIAL) + # anakin if (WITH_ANAKIN AND WITH_MKL) # only needed in CI # anakin rnn1 diff --git a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..f646fd6d91c81b6738e4fc5278739307fa5f99b5 --- /dev/null +++ b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc @@ -0,0 +1,223 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/tests/api/tester_helper.h" + +namespace paddle { +namespace inference { + +using paddle::PaddleTensor; + +template +void GetValueFromStream(std::stringstream *ss, T *t) { + (*ss) >> (*t); +} + +template <> +void GetValueFromStream(std::stringstream *ss, std::string *t) { + *t = ss->str(); +} + +// Split string to vector +template +void Split(const std::string &line, char sep, std::vector *v) { + std::stringstream ss; + T t; + for (auto c : line) { + if (c != sep) { + ss << c; + } else { + GetValueFromStream(&ss, &t); + v->push_back(std::move(t)); + ss.str({}); + ss.clear(); + } + } + + if (!ss.str().empty()) { + GetValueFromStream(&ss, &t); + v->push_back(std::move(t)); + ss.str({}); + ss.clear(); + } +} + +template +constexpr paddle::PaddleDType GetPaddleDType(); + +template <> +constexpr paddle::PaddleDType GetPaddleDType() { + return paddle::PaddleDType::INT64; +} + +template <> +constexpr paddle::PaddleDType GetPaddleDType() { + return paddle::PaddleDType::FLOAT32; +} + +// Parse tensor from string +template +bool ParseTensor(const std::string &field, paddle::PaddleTensor *tensor) { + std::vector data; + Split(field, ':', &data); + if (data.size() < 2) return false; + + std::string shape_str = data[0]; + + std::vector shape; + Split(shape_str, ' ', &shape); + + std::string mat_str = data[1]; + + std::vector mat; + Split(mat_str, ' ', &mat); + + tensor->shape = shape; + auto size = + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()) * + sizeof(T); + tensor->data.Resize(size); + std::copy(mat.begin(), mat.end(), static_cast(tensor->data.data())); + tensor->dtype = GetPaddleDType(); + + return true; +} + +// Parse input tensors from string +bool ParseLine(const std::string &line, + std::vector *tensors) { + std::vector fields; + Split(line, ';', &fields); + + if (fields.size() < 5) return false; + + tensors->clear(); + tensors->reserve(5); + + int i = 0; + // src_id + paddle::PaddleTensor src_id; + ParseTensor(fields[i++], &src_id); + tensors->push_back(src_id); + + // pos_id + paddle::PaddleTensor pos_id; + ParseTensor(fields[i++], &pos_id); + tensors->push_back(pos_id); + + // segment_id + paddle::PaddleTensor segment_id; + ParseTensor(fields[i++], &segment_id); + tensors->push_back(segment_id); + + // self_attention_bias + paddle::PaddleTensor self_attention_bias; + ParseTensor(fields[i++], &self_attention_bias); + tensors->push_back(self_attention_bias); + + // next_segment_index + paddle::PaddleTensor next_segment_index; + ParseTensor(fields[i++], &next_segment_index); + tensors->push_back(next_segment_index); + + return true; +} + +bool LoadInputData(std::vector> *inputs) { + if (FLAGS_infer_data.empty()) { + LOG(ERROR) << "please set input data path"; + return false; + } + + std::ifstream fin(FLAGS_infer_data); + std::string line; + int sample = 0; + + // The unit-test dataset only have 10 samples, each sample have 5 feeds. + while (std::getline(fin, line)) { + std::vector feed_data; + ParseLine(line, &feed_data); + inputs->push_back(std::move(feed_data)); + sample++; + if (!FLAGS_test_all_data && sample == FLAGS_batch_size) break; + } + LOG(INFO) << "number of samples: " << sample; + + return true; +} + +void SetConfig(AnalysisConfig *config) { config->SetModel(FLAGS_infer_model); } + +void profile(bool use_mkldnn = false) { + AnalysisConfig config; + SetConfig(&config); + + if (use_mkldnn) { + config.EnableMKLDNN(); + } + + std::vector outputs; + std::vector> inputs; + LoadInputData(&inputs); + TestPrediction(reinterpret_cast(&config), + inputs, &outputs, FLAGS_num_threads); +} + +TEST(Analyzer_bert, profile) { profile(); } +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_bert, profile_mkldnn) { profile(true); } +#endif + +// Check the fuse status +TEST(Analyzer_bert, fuse_statis) { + AnalysisConfig cfg; + SetConfig(&cfg); + int num_ops; + auto predictor = CreatePaddlePredictor(cfg); + auto fuse_statis = GetFuseStatis( + static_cast(predictor.get()), &num_ops); + LOG(INFO) << "num_ops: " << num_ops; +} + +// Compare result of NativeConfig and AnalysisConfig +void compare(bool use_mkldnn = false) { + AnalysisConfig cfg; + SetConfig(&cfg); + if (use_mkldnn) { + cfg.EnableMKLDNN(); + } + + std::vector> inputs; + LoadInputData(&inputs); + CompareNativeAndAnalysis( + reinterpret_cast(&cfg), inputs); +} + +TEST(Analyzer_bert, compare) { compare(); } +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_bert, compare_mkldnn) { compare(true /* use_mkldnn */); } +#endif + +// Compare Deterministic result +TEST(Analyzer_bert, compare_determine) { + AnalysisConfig cfg; + SetConfig(&cfg); + + std::vector> inputs; + LoadInputData(&inputs); + CompareDeterministic(reinterpret_cast(&cfg), + inputs); +} +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc index e78ab942d113323fecf5510dca85fb5db734efc8..735e4fb563788438ee49ff6308d11f4dbe4962be 100644 --- a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc @@ -19,7 +19,6 @@ DEFINE_int32(max_turn_num, 9, namespace paddle { namespace inference { -using contrib::AnalysisConfig; constexpr int32_t kMaxTurnLen = 50; @@ -165,7 +164,7 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data, input_slots->push_back(std::move(response_mask_tensor)); } -void SetConfig(contrib::AnalysisConfig *cfg) { +void SetConfig(AnalysisConfig *cfg) { cfg->SetModel(FLAGS_infer_model + "/__model__", FLAGS_infer_model + "/param"); cfg->SwitchSpecifyInputNames(); cfg->SwitchIrOptim(true); @@ -187,7 +186,7 @@ void SetInput(std::vector> *inputs) { // Easy for profiling independently. void profile(bool use_mkldnn = false) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); if (use_mkldnn) { @@ -223,7 +222,7 @@ TEST(Analyzer_dam, profile_mkldnn) { profile(true /* use_mkldnn */); } // Check the fuse status TEST(Analyzer_dam, fuse_statis) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); int num_ops; @@ -256,7 +255,7 @@ void compare(bool use_mkldnn = false) { TEST(Analyzer_dam, compare_with_static_memory_optim) { // The small dam will core in CI, but works in local. if (FLAGS_max_turn_num == 9) { - contrib::AnalysisConfig cfg, cfg1; + AnalysisConfig cfg, cfg1; DataRecord data(FLAGS_infer_data, FLAGS_batch_size); std::vector> input_slots_all; @@ -282,7 +281,7 @@ TEST(Analyzer_dam, compare_with_static_memory_optim) { TEST(Analyzer_dam, compare_with_dynamic_memory_optim) { // The small dam will core in CI, but works in local. if (FLAGS_max_turn_num == 9) { - contrib::AnalysisConfig cfg, cfg1; + AnalysisConfig cfg, cfg1; DataRecord data(FLAGS_infer_data, FLAGS_batch_size); std::vector> input_slots_all; diff --git a/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc b/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc index b9666e01adb23e0cbd9257bc55081c3a5001e887..347672eaae314aa42096d48a3b044014f2ddbf84 100644 --- a/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc @@ -18,8 +18,6 @@ namespace paddle { namespace inference { namespace analysis { -using contrib::AnalysisConfig; - struct DataRecord { std::vector data; std::vector lod; diff --git a/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc b/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc index 529a0174c8542f5226e70ef4a47bde069220ecc2..089f655c180d784af66af60277bdbf32a6019599 100644 --- a/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc @@ -16,7 +16,6 @@ namespace paddle { namespace inference { -using contrib::AnalysisConfig; struct DataRecord { std::vector> query, title; @@ -75,7 +74,7 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data, } } -void SetConfig(contrib::AnalysisConfig *cfg) { +void SetConfig(AnalysisConfig *cfg) { cfg->SetModel(FLAGS_infer_model); cfg->DisableGpu(); cfg->SwitchSpecifyInputNames(); @@ -95,7 +94,7 @@ void SetInput(std::vector> *inputs) { // Easy for profiling independently. void profile(bool use_mkldnn = false) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector outputs; @@ -130,7 +129,7 @@ TEST(Analyzer_MM_DNN, profile_mkldnn) { profile(true /* use_mkldnn */); } // Check the fuse status TEST(Analyzer_MM_DNN, fuse_statis) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); int num_ops; @@ -141,7 +140,7 @@ TEST(Analyzer_MM_DNN, fuse_statis) { // Compare result of NativeConfig and AnalysisConfig void compare(bool use_mkldnn = false) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); if (use_mkldnn) { diff --git a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc index 6fef79dc4608acd6eee679ad4939e7684db98f5b..a70aa7a6ac41121a0c8ea397ebc7e24e4b206d12 100644 --- a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc @@ -16,7 +16,6 @@ namespace paddle { namespace inference { -using contrib::AnalysisConfig; struct DataRecord { std::vector> word, mention; @@ -76,7 +75,7 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data) { } } -void SetConfig(contrib::AnalysisConfig *cfg, bool memory_load = false) { +void SetConfig(AnalysisConfig *cfg, bool memory_load = false) { if (memory_load) { std::string buffer_prog, buffer_param; ReadBinaryFile(FLAGS_infer_model + "/__model__", &buffer_prog); @@ -105,7 +104,7 @@ void SetInput(std::vector> *inputs) { // Easy for profiling independently. void profile(bool memory_load = false) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg, memory_load); std::vector outputs; @@ -136,7 +135,7 @@ TEST(Analyzer_Chinese_ner, profile_memory_load) { // Check the fuse status TEST(Analyzer_Chinese_ner, fuse_statis) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); int num_ops; @@ -152,7 +151,7 @@ TEST(Analyzer_Chinese_ner, fuse_statis) { // Compare result of NativeConfig and AnalysisConfig TEST(Analyzer_Chinese_ner, compare) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector> input_slots_all; diff --git a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc index ad2c46e48d5a34a457a615f313f1ac3cc916b200..3f6c933f2bcc6ed5410cb95a48f5ee6869280fe4 100644 --- a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc @@ -16,7 +16,6 @@ namespace paddle { namespace inference { -using contrib::AnalysisConfig; struct DataRecord { std::vector> query_basic, query_phrase, title_basic, @@ -103,7 +102,7 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data, } } -void SetConfig(contrib::AnalysisConfig *cfg) { +void SetConfig(AnalysisConfig *cfg) { cfg->SetModel(FLAGS_infer_model); cfg->DisableGpu(); cfg->SwitchSpecifyInputNames(); @@ -123,7 +122,7 @@ void SetInput(std::vector> *inputs) { // Easy for profiling independently. TEST(Analyzer_Pyramid_DNN, profile) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector outputs; @@ -147,7 +146,7 @@ TEST(Analyzer_Pyramid_DNN, profile) { // Check the fuse status TEST(Analyzer_Pyramid_DNN, fuse_statis) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); int num_ops; @@ -158,7 +157,7 @@ TEST(Analyzer_Pyramid_DNN, fuse_statis) { // Compare result of NativeConfig and AnalysisConfig TEST(Analyzer_Pyramid_DNN, compare) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector> input_slots_all; diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc index 22e6366fb5cba6c7a0cde9c0c5f50f56c2e23b05..c27c39f40a2067dd2bd2150e4b1e53eab7cdf06e 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc @@ -20,7 +20,6 @@ namespace paddle { namespace inference { using namespace framework; // NOLINT -using namespace contrib; // NOLINT struct DataRecord { std::vector>> link_step_data_all; @@ -223,7 +222,7 @@ void SetInput(std::vector> *inputs) { // Easy for profiling independently. TEST(Analyzer_rnn1, profile) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); cfg.DisableGpu(); cfg.SwitchIrDebug(); @@ -237,7 +236,7 @@ TEST(Analyzer_rnn1, profile) { // Check the fuse status TEST(Analyzer_rnn1, fuse_statis) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); int num_ops; @@ -254,7 +253,7 @@ TEST(Analyzer_rnn1, fuse_statis) { // Compare result of NativeConfig and AnalysisConfig TEST(Analyzer_rnn1, compare) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector> input_slots_all; @@ -276,7 +275,7 @@ TEST(Analyzer_rnn1, compare_determine) { // Test Multi-Thread. TEST(Analyzer_rnn1, multi_thread) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector outputs; diff --git a/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc b/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc index f3e75ffbb5962885bd926af50b764bec561cc454..ca04c1365cbbffcb4a2786cde9ab240cc20aa3d8 100644 --- a/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc @@ -20,7 +20,6 @@ limitations under the License. */ namespace paddle { namespace inference { namespace analysis { -using contrib::AnalysisConfig; struct Record { std::vector data; diff --git a/paddle/fluid/inference/tests/api/config_printer.h b/paddle/fluid/inference/tests/api/config_printer.h index ecc10bafd650e52dfb73e8dd4329c697ff4f4ccc..b0c23fbd534847c8aad244749761e9c072148796 100644 --- a/paddle/fluid/inference/tests/api/config_printer.h +++ b/paddle/fluid/inference/tests/api/config_printer.h @@ -58,9 +58,8 @@ std::ostream &operator<<(std::ostream &os, const NativeConfig &config) { return os; } -std::ostream &operator<<(std::ostream &os, - const contrib::AnalysisConfig &config) { - os << GenSpaces(num_spaces) << "contrib::AnalysisConfig {\n"; +std::ostream &operator<<(std::ostream &os, const AnalysisConfig &config) { + os << GenSpaces(num_spaces) << "AnalysisConfig {\n"; num_spaces++; os << config.ToNativeConfig(); if (!config.model_from_memory()) { diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index b1f7a3464ac6027faffe283bccaf9793eae939e1..2811eb4946ea025cf6c7ab197c4e603df86f6f2d 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -56,16 +56,9 @@ DECLARE_int32(paddle_num_threads); namespace paddle { namespace inference { -float Random(float low, float high) { - static std::random_device rd; - static std::mt19937 mt(rd()); - std::uniform_real_distribution dist(low, high); - return dist(mt); -} - void PrintConfig(const PaddlePredictor::Config *config, bool use_analysis) { const auto *analysis_config = - reinterpret_cast(config); + reinterpret_cast(config); if (use_analysis) { LOG(INFO) << *analysis_config; return; @@ -109,9 +102,9 @@ void CompareResult(const std::vector &outputs, std::unique_ptr CreateTestPredictor( const PaddlePredictor::Config *config, bool use_analysis = true) { const auto *analysis_config = - reinterpret_cast(config); + reinterpret_cast(config); if (use_analysis) { - return CreatePaddlePredictor(*analysis_config); + return CreatePaddlePredictor(*analysis_config); } auto native_config = analysis_config->ToNativeConfig(); return CreatePaddlePredictor(native_config); @@ -146,7 +139,8 @@ void SetFakeImageInput(std::vector> *inputs, const std::string &dirname, bool is_combined = true, std::string model_filename = "model", std::string params_filename = "params", - const std::vector *feed_names = nullptr) { + const std::vector *feed_names = nullptr, + const int continuous_inuput_index = 0) { // Set fake_image_data PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data."); std::vector> feed_target_shapes = GetFeedTargetShapes( @@ -183,7 +177,8 @@ void SetFakeImageInput(std::vector> *inputs, float *input_data = static_cast(input.data.data()); // fill input data, for profile easily, do not use random data here. for (size_t j = 0; j < len; ++j) { - *(input_data + j) = Random(0.0, 1.0) / 10.; + *(input_data + j) = + static_cast((j + continuous_inuput_index) % len) / len; } } (*inputs).emplace_back(input_slots); diff --git a/paddle/fluid/inference/tests/api/trt_models_tester.cc b/paddle/fluid/inference/tests/api/trt_models_tester.cc index db7109b7505d4fe4dcfcf88f303aa262bc5b44fb..17a433c9d98768dbda4ba93bdceb6cc1717adc07 100644 --- a/paddle/fluid/inference/tests/api/trt_models_tester.cc +++ b/paddle/fluid/inference/tests/api/trt_models_tester.cc @@ -42,9 +42,9 @@ void SetConfig(ConfigType* config, std::string model_dir, bool use_gpu, } template <> -void SetConfig(contrib::AnalysisConfig* config, - std::string model_dir, bool use_gpu, - bool use_tensorrt, int batch_size) { +void SetConfig(AnalysisConfig* config, std::string model_dir, + bool use_gpu, bool use_tensorrt, + int batch_size) { if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) { config->SetModel(model_dir + "/" + FLAGS_prog_filename, model_dir + "/" + FLAGS_param_filename); @@ -75,11 +75,11 @@ void profile(std::string model_dir, bool use_analysis, bool use_tensorrt) { std::vector outputs; if (use_analysis || use_tensorrt) { - contrib::AnalysisConfig config; + AnalysisConfig config; config.EnableUseGpu(100, 0); config.pass_builder()->TurnOnDebug(); - SetConfig(&config, model_dir, true, use_tensorrt, - FLAGS_batch_size); + SetConfig(&config, model_dir, true, use_tensorrt, + FLAGS_batch_size); TestPrediction(reinterpret_cast(&config), inputs_all, &outputs, FLAGS_num_threads, true); } else { @@ -99,18 +99,18 @@ void compare(std::string model_dir, bool use_tensorrt) { SetFakeImageInput(&inputs_all, model_dir, false, "__model__", ""); } - contrib::AnalysisConfig analysis_config; - SetConfig(&analysis_config, model_dir, true, - use_tensorrt, FLAGS_batch_size); + AnalysisConfig analysis_config; + SetConfig(&analysis_config, model_dir, true, use_tensorrt, + FLAGS_batch_size); CompareNativeAndAnalysis( reinterpret_cast(&analysis_config), inputs_all); } void compare_continuous_input(std::string model_dir, bool use_tensorrt) { - contrib::AnalysisConfig analysis_config; - SetConfig(&analysis_config, model_dir, true, - use_tensorrt, FLAGS_batch_size); + AnalysisConfig analysis_config; + SetConfig(&analysis_config, model_dir, true, use_tensorrt, + FLAGS_batch_size); auto config = reinterpret_cast(&analysis_config); auto native_pred = CreateTestPredictor(config, false); @@ -119,9 +119,10 @@ void compare_continuous_input(std::string model_dir, bool use_tensorrt) { std::vector> inputs_all; if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) { SetFakeImageInput(&inputs_all, model_dir, true, FLAGS_prog_filename, - FLAGS_param_filename); + FLAGS_param_filename, nullptr, i); } else { - SetFakeImageInput(&inputs_all, model_dir, false, "__model__", ""); + SetFakeImageInput(&inputs_all, model_dir, false, "__model__", "", nullptr, + i); } CompareNativeAndAnalysis(native_pred.get(), analysis_pred.get(), inputs_all); diff --git a/paddle/fluid/memory/allocation/legacy_allocator.cc b/paddle/fluid/memory/allocation/legacy_allocator.cc index 5d8684f083bda8499000c9fd0a7617cf129db13b..8759ec8096cf102ab85d2c2a91eddc23a6ed0e50 100644 --- a/paddle/fluid/memory/allocation/legacy_allocator.cc +++ b/paddle/fluid/memory/allocation/legacy_allocator.cc @@ -13,9 +13,15 @@ // limitations under the License. #include "paddle/fluid/memory/allocation/legacy_allocator.h" + #include #include #include + +#ifdef PADDLE_WITH_JEMALLOC +#include +#endif + #include "glog/logging.h" #include "paddle/fluid/memory/detail/buddy_allocator.h" #include "paddle/fluid/memory/detail/system_allocator.h" @@ -95,7 +101,11 @@ struct NaiveAllocator { template <> void *Alloc(const platform::CPUPlace &place, size_t size) { VLOG(10) << "Allocate " << size << " bytes on " << platform::Place(place); +#ifdef PADDLE_WITH_JEMALLOC + void *p = malloc(size); +#else void *p = GetCPUBuddyAllocator()->Alloc(size); +#endif if (FLAGS_init_allocated_mem) { memset(p, 0xEF, size); } @@ -107,12 +117,21 @@ template <> void Free(const platform::CPUPlace &place, void *p, size_t size) { VLOG(10) << "Free pointer=" << p << " on " << platform::Place(place); +#ifdef PADDLE_WITH_JEMALLOC + free(p); +#else GetCPUBuddyAllocator()->Free(p); +#endif } template <> size_t Used(const platform::CPUPlace &place) { +#ifdef PADDLE_WITH_JEMALLOC + // fake the result of used memory when PADDLE_WITH_JEMALLOC is ON + return 0U; +#else return GetCPUBuddyAllocator()->Used(); +#endif } #ifdef PADDLE_WITH_CUDA diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index 9c5b8604f40ae56c463b54c71623feb61bd8d297..7ec9d2fed53c9c73952db7dcdfc2d8e634f3f84e 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/activation_op.h" #include -#include "paddle/fluid/operators/mkldnn_activation_op.h" +#include "paddle/fluid/operators/mkldnn/mkldnn_activation_op.h" #include "paddle/fluid/platform/port.h" namespace paddle { diff --git a/paddle/fluid/operators/beam_search_op.cc b/paddle/fluid/operators/beam_search_op.cc index e78ecc1a12309fe084a4165e5bb0d8bfb1dcf957..e93cd8615e052e4dfc6255549bf7a9b84b7dd657 100644 --- a/paddle/fluid/operators/beam_search_op.cc +++ b/paddle/fluid/operators/beam_search_op.cc @@ -51,6 +51,9 @@ class BeamSearchOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("selected_scores", "A LoDTensor containing the accumulated scores corresponding to " "Output(selected_ids)."); + AddOutput( + "parent_idx", + "A Tensor preserving the selected_ids' parent indice in pre_ids."); // Attributes stored in AttributeMap AddAttr("level", "the level of LoDTensor"); diff --git a/paddle/fluid/operators/beam_search_op.h b/paddle/fluid/operators/beam_search_op.h index 1b939e742de06aedf187d25d002d19e0a4fafc9d..f808020cc765585d1633c6c3bf528080a7e83f07 100644 --- a/paddle/fluid/operators/beam_search_op.h +++ b/paddle/fluid/operators/beam_search_op.h @@ -41,13 +41,15 @@ class BeamSearchOpKernel : public framework::OpKernel { auto selected_ids = context.Output("selected_ids"); auto selected_scores = context.Output("selected_scores"); + auto* parent_idx = context.Output("parent_idx"); PADDLE_ENFORCE_NOT_NULL(selected_ids); PADDLE_ENFORCE_NOT_NULL(selected_scores); + PADDLE_ENFORCE_NOT_NULL(parent_idx); math::BeamSearchFunctor alg; alg(context.template device_context(), pre_ids, pre_scores, - ids, scores, selected_ids, selected_scores, level, beam_size, end_id, - is_accumulated); + ids, scores, selected_ids, selected_scores, parent_idx, level, + beam_size, end_id, is_accumulated); } }; diff --git a/paddle/fluid/operators/detection/box_coder_op.cc b/paddle/fluid/operators/detection/box_coder_op.cc index 06fbb9815c52ea69e3aa9e893512e039853b9514..fdcff62e1fe59b3a2f4925bdff98632f71220abb 100644 --- a/paddle/fluid/operators/detection/box_coder_op.cc +++ b/paddle/fluid/operators/detection/box_coder_op.cc @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detection/box_coder_op.h" +#include namespace paddle { namespace operators { @@ -32,32 +33,57 @@ class BoxCoderOp : public framework::OperatorWithKernel { if (ctx->IsRuntime()) { PADDLE_ENFORCE_EQ(prior_box_dims.size(), 2, - "The rank of Input of PriorBoxVar must be 2"); + "The rank of Input PriorBox must be 2"); PADDLE_ENFORCE_EQ(prior_box_dims[1], 4, "The shape of PriorBox is [N, 4]"); if (ctx->HasInput("PriorBoxVar")) { auto prior_box_var_dims = ctx->GetInputDim("PriorBoxVar"); - PADDLE_ENFORCE_EQ(prior_box_dims, prior_box_var_dims); + PADDLE_ENFORCE( + prior_box_var_dims.size() == 1 || prior_box_var_dims.size() == 2, + "Input(PriorBoxVar) of BoxCoderOp should be 1 or 2."); + if (prior_box_var_dims.size() == 1) { + PADDLE_ENFORCE_EQ( + prior_box_var_dims[0], 4, + "The 1st dimension of Input(PriorBoxVar) should be 4" + "when the rank is 1."); + } else { + PADDLE_ENFORCE_EQ( + prior_box_dims, prior_box_var_dims, + "The dimension of Input(PriorBoxVar) should be equal to" + "the dimension of Input(PriorBox when the rank is 2.)"); + } } + } - auto code_type = - GetBoxCodeType(ctx->Attrs().Get("code_type")); - if (code_type == BoxCodeType::kEncodeCenterSize) { - PADDLE_ENFORCE_EQ(target_box_dims.size(), 2, - "The rank of Input of TargetBox must be 2"); - PADDLE_ENFORCE_EQ(target_box_dims[1], 4, - "The shape of TargetBox is [M, 4]"); - } else if (code_type == BoxCodeType::kDecodeCenterSize) { - PADDLE_ENFORCE_EQ(target_box_dims.size(), 3, - "The rank of Input of TargetBox must be 3"); + auto code_type = GetBoxCodeType(ctx->Attrs().Get("code_type")); + int axis = ctx->Attrs().Get("axis"); + if (code_type == BoxCodeType::kEncodeCenterSize) { + PADDLE_ENFORCE_EQ(target_box_dims.size(), 2, + "The rank of Input TargetBox must be 2"); + PADDLE_ENFORCE_EQ(target_box_dims[1], 4, + "The shape of TargetBox is [M, 4]"); + ctx->SetOutputDim( + "OutputBox", + framework::make_ddim({target_box_dims[0], prior_box_dims[0], 4})); + } else if (code_type == BoxCodeType::kDecodeCenterSize) { + PADDLE_ENFORCE_EQ(target_box_dims.size(), 3, + "The rank of Input TargetBox must be 3"); + if (axis == 0) { PADDLE_ENFORCE_EQ(target_box_dims[1], prior_box_dims[0]); - PADDLE_ENFORCE_EQ(target_box_dims[2], prior_box_dims[1]); + } else if (axis == 1) { + PADDLE_ENFORCE_EQ(target_box_dims[0], prior_box_dims[0]); + } else { + PADDLE_THROW("axis must be 0 or 1."); } + PADDLE_ENFORCE_EQ(target_box_dims[2], prior_box_dims[1]); + ctx->ShareDim("TargetBox", /*->*/ "OutputBox"); + } + + if (code_type == BoxCodeType::kDecodeCenterSize && axis == 1) { + ctx->ShareLoD("PriorBox", /*->*/ "OutputBox"); + } else { + ctx->ShareLoD("TargetBox", /*->*/ "OutputBox"); } - ctx->SetOutputDim( - "OutputBox", - framework::make_ddim({target_box_dims[0], prior_box_dims[0], 4})); - ctx->ShareLoD("TargetBox", /*->*/ "OutputBox"); } }; @@ -100,6 +126,21 @@ class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker { "(bool, default true) " "whether treat the priorbox as a noramlized box") .SetDefault(true); + AddAttr("axis", + "(int, default 0)" + "which axis in PriorBox to broadcast for box decode," + "for example, if axis is 0 and TargetBox has shape" + "[N, M, 4] and PriorBox has shape [M, 4], then PriorBox " + "will broadcast to [N, M, 4] for decoding. It is only valid" + "when code type is decode_center_size") + .SetDefault(0) + .InEnum({0, 1}); + AddAttr>( + "variance", + "(vector, default {})," + "variance of prior box with shape [4]. PriorBoxVar and variance can" + "not be provided at the same time.") + .SetDefault(std::vector{}); AddOutput("OutputBox", "(LoDTensor or Tensor) " "When code_type is 'encode_center_size', the output tensor of " @@ -138,7 +179,11 @@ where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote the priorbox's (anchor) center coordinates, width and height. `pxv`, `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`, `ow`, `oh` denote the -encoded/decoded coordinates, width and height. +encoded/decoded coordinates, width and height. + +During Box Decoding, two modes for broadcast are supported. Say target box has +shape [N, M, 4], and the shape of prior box can be [N, 4] or [M, 4]. Then prior +box will broadcast to target box along the assigned axis. )DOC"); } }; diff --git a/paddle/fluid/operators/detection/box_coder_op.cu b/paddle/fluid/operators/detection/box_coder_op.cu index a7af111f63d654319dd1d90d2032956951dfe49e..e078af3eb478a8bebc6a7fc6460d169d803a3c4b 100644 --- a/paddle/fluid/operators/detection/box_coder_op.cu +++ b/paddle/fluid/operators/detection/box_coder_op.cu @@ -9,6 +9,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include +#include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/detection/box_coder_op.h" #include "paddle/fluid/platform/cuda_primitives.h" @@ -16,11 +19,11 @@ namespace paddle { namespace operators { template -__global__ void EncodeCenterSizeKernel(const T* prior_box_data, - const T* prior_box_var_data, - const T* target_box_data, const int row, - const int col, const int len, - const bool normalized, T* output) { +__global__ void EncodeCenterSizeKernel( + const T* prior_box_data, const T* prior_box_var_data, + const T* target_box_data, const int row, const int col, const int len, + const bool normalized, const T prior_box_var_size, const float* variance, + const int var_size, T* output) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < row * col) { const int row_idx = idx / col; @@ -30,11 +33,9 @@ __global__ void EncodeCenterSizeKernel(const T* prior_box_data, T prior_box_height = prior_box_data[col_idx * len + 3] - prior_box_data[col_idx * len + 1] + (normalized == false); - T prior_box_center_x = - (prior_box_data[col_idx * len + 2] + prior_box_data[col_idx * len]) / 2; - T prior_box_center_y = (prior_box_data[col_idx * len + 3] + - prior_box_data[col_idx * len + 1]) / - 2; + T prior_box_center_x = prior_box_data[col_idx * len] + prior_box_width / 2; + T prior_box_center_y = + prior_box_data[col_idx * len + 1] + prior_box_height / 2; T target_box_center_x = (target_box_data[row_idx * len + 2] + target_box_data[row_idx * len]) / @@ -55,58 +56,73 @@ __global__ void EncodeCenterSizeKernel(const T* prior_box_data, output[idx * len + 2] = log(fabs(target_box_width / prior_box_width)); output[idx * len + 3] = log(fabs(target_box_height / prior_box_height)); if (prior_box_var_data) { - output[idx * len] /= prior_box_var_data[col_idx * len]; - output[idx * len + 1] /= prior_box_var_data[col_idx * len + 1]; - output[idx * len + 2] /= prior_box_var_data[col_idx * len + 2]; - output[idx * len + 3] /= prior_box_var_data[col_idx * len + 3]; + int prior_var_offset = 0; + if (prior_box_var_size == 2) { + prior_var_offset = col_idx * len; + } + output[idx * len] /= prior_box_var_data[prior_var_offset]; + output[idx * len + 1] /= prior_box_var_data[prior_var_offset + 1]; + output[idx * len + 2] /= prior_box_var_data[prior_var_offset + 2]; + output[idx * len + 3] /= prior_box_var_data[prior_var_offset + 3]; + } else if (var_size == 4) { + for (int k = 0; k < 4; ++k) { + output[idx * len + k] /= static_cast(variance[k]); + } } } } template -__global__ void DecodeCenterSizeKernel(const T* prior_box_data, - const T* prior_box_var_data, - const T* target_box_data, const int row, - const int col, const int len, - const bool normalized, T* output) { +__global__ void DecodeCenterSizeKernel( + const T* prior_box_data, const T* prior_box_var_data, + const T* target_box_data, const int row, const int col, const int len, + const bool normalized, const T prior_box_var_size, const float* variance, + const int var_size, const int axis, T* output) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; + int prior_box_offset = 0; if (idx < row * col) { const int col_idx = idx % col; - T prior_box_width = prior_box_data[col_idx * len + 2] - - prior_box_data[col_idx * len] + (normalized == false); - T prior_box_height = prior_box_data[col_idx * len + 3] - - prior_box_data[col_idx * len + 1] + + const int row_idx = idx / col; + prior_box_offset = axis == 0 ? col_idx * len : row_idx * len; + T prior_box_width = prior_box_data[prior_box_offset + 2] - + prior_box_data[prior_box_offset] + + (normalized == false); + T prior_box_height = prior_box_data[prior_box_offset + 3] - + prior_box_data[prior_box_offset + 1] + (normalized == false); T prior_box_center_x = - (prior_box_data[col_idx * len + 2] + prior_box_data[col_idx * len]) / 2; - T prior_box_center_y = (prior_box_data[col_idx * len + 3] + - prior_box_data[col_idx * len + 1]) / - 2; + prior_box_data[prior_box_offset] + prior_box_width / 2; + T prior_box_center_y = + prior_box_data[prior_box_offset + 1] + prior_box_height / 2; T target_box_width, target_box_height; T target_box_center_x, target_box_center_y; + T box_var_x = T(1), box_var_y = T(1); + T box_var_w = T(1), box_var_h = T(1); if (prior_box_var_data) { - target_box_width = exp(prior_box_var_data[col_idx * len + 2] * - target_box_data[idx * len + 2]) * - prior_box_width; - target_box_height = exp(prior_box_var_data[col_idx * len + 3] * - target_box_data[idx * len + 3]) * - prior_box_height; - target_box_center_x = prior_box_var_data[col_idx * len] * - target_box_data[idx * len] * prior_box_width + - prior_box_center_x; - target_box_center_y = prior_box_var_data[col_idx * len + 1] * - target_box_data[idx * len + 1] * - prior_box_height + - prior_box_center_y; - } else { - target_box_width = exp(target_box_data[idx * len + 2]) * prior_box_width; - target_box_height = - exp(target_box_data[idx * len + 3]) * prior_box_height; - target_box_center_x = - target_box_data[idx * len] * prior_box_width + prior_box_center_x; - target_box_center_y = target_box_data[idx * len + 1] * prior_box_height + - prior_box_center_y; + int prior_var_offset = 0; + if (prior_box_var_size == 2) { + prior_var_offset = axis == 0 ? col_idx * len : row_idx * len; + } + box_var_x = prior_box_var_data[prior_var_offset]; + box_var_y = prior_box_var_data[prior_var_offset + 1]; + box_var_w = prior_box_var_data[prior_var_offset + 2]; + box_var_h = prior_box_var_data[prior_var_offset + 3]; + } else if (var_size == 4) { + box_var_x = static_cast(variance[0]); + box_var_y = static_cast(variance[1]); + box_var_w = static_cast(variance[2]); + box_var_h = static_cast(variance[3]); } + target_box_width = + exp(box_var_w * target_box_data[idx * len + 2]) * prior_box_width; + target_box_height = + exp(box_var_h * target_box_data[idx * len + 3]) * prior_box_height; + target_box_center_x = + box_var_x * target_box_data[idx * len] * prior_box_width + + prior_box_center_x; + target_box_center_y = + box_var_y * target_box_data[idx * len + 1] * prior_box_height + + prior_box_center_y; output[idx * len] = target_box_center_x - target_box_width / 2; output[idx * len + 1] = target_box_center_y - target_box_height / 2; @@ -127,36 +143,64 @@ class BoxCoderCUDAKernel : public framework::OpKernel { auto* prior_box_var = context.Input("PriorBoxVar"); auto* target_box = context.Input("TargetBox"); auto* output_box = context.Output("OutputBox"); - + std::vector variance = context.Attr>("variance"); const T* prior_box_data = prior_box->data(); const T* target_box_data = target_box->data(); const T* prior_box_var_data = nullptr; - if (prior_box_var) prior_box_var_data = prior_box_var->data(); + auto prior_box_var_size = 0; + if (prior_box_var) { + PADDLE_ENFORCE(variance.empty(), + "Input 'PriorBoxVar' and attribute 'variance' should not" + "be used at the same time."); + prior_box_var_data = prior_box_var->data(); + prior_box_var_size = prior_box_var->dims().size(); + } + if (!(variance.empty())) { + PADDLE_ENFORCE(static_cast(variance.size()) == 4, + "Size of attribute 'variance' should be 4"); + } if (target_box->lod().size()) { PADDLE_ENFORCE_EQ(target_box->lod().size(), 1, "Only support 1 level of LoD."); } + const int var_size = static_cast(variance.size()); + + auto code_type = GetBoxCodeType(context.Attr("code_type")); + bool normalized = context.Attr("box_normalized"); + int axis = context.Attr("axis"); + auto row = target_box->dims()[0]; auto col = prior_box->dims()[0]; + if (code_type == BoxCodeType::kDecodeCenterSize) { + col = target_box->dims()[1]; + } auto len = prior_box->dims()[1]; int block = 512; int grid = (row * col + block - 1) / block; auto& device_ctx = context.cuda_device_context(); + auto& allocator = + platform::DeviceTemporaryAllocator::Instance().Get(device_ctx); + int bytes = var_size * sizeof(float); + auto dev_var = allocator.Allocate(bytes); + float* dev_var_data = reinterpret_cast(dev_var->ptr()); + auto cplace = platform::CPUPlace(); + const auto gplace = boost::get(context.GetPlace()); + memory::Copy(gplace, dev_var_data, cplace, &variance[0], bytes, + device_ctx.stream()); + output_box->mutable_data({row, col, len}, context.GetPlace()); T* output = output_box->data(); - auto code_type = GetBoxCodeType(context.Attr("code_type")); - bool normalized = context.Attr("box_normalized"); if (code_type == BoxCodeType::kEncodeCenterSize) { EncodeCenterSizeKernel<<>>( prior_box_data, prior_box_var_data, target_box_data, row, col, len, - normalized, output); + normalized, prior_box_var_size, dev_var_data, var_size, output); } else if (code_type == BoxCodeType::kDecodeCenterSize) { DecodeCenterSizeKernel<<>>( prior_box_data, prior_box_var_data, target_box_data, row, col, len, - normalized, output); + normalized, prior_box_var_size, dev_var_data, var_size, axis, output); } } }; diff --git a/paddle/fluid/operators/detection/box_coder_op.h b/paddle/fluid/operators/detection/box_coder_op.h index b2a2bcdce932032a761a1fc064fe622f7629f9bf..a0b1faf7bdc7001eba2d92b4d03fbaf9feb7bcbb 100644 --- a/paddle/fluid/operators/detection/box_coder_op.h +++ b/paddle/fluid/operators/detection/box_coder_op.h @@ -11,6 +11,7 @@ limitations under the License. */ #pragma once #include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" @@ -34,7 +35,8 @@ class BoxCoderKernel : public framework::OpKernel { void EncodeCenterSize(const framework::Tensor* target_box, const framework::Tensor* prior_box, const framework::Tensor* prior_box_var, - const bool normalized, T* output) const { + const bool normalized, + const std::vector variance, T* output) const { int64_t row = target_box->dims()[0]; int64_t col = prior_box->dims()[0]; int64_t len = prior_box->dims()[1]; @@ -53,10 +55,9 @@ class BoxCoderKernel : public framework::OpKernel { T prior_box_height = prior_box_data[j * len + 3] - prior_box_data[j * len + 1] + (normalized == false); - T prior_box_center_x = - (prior_box_data[j * len + 2] + prior_box_data[j * len]) / 2; + T prior_box_center_x = prior_box_data[j * len] + prior_box_width / 2; T prior_box_center_y = - (prior_box_data[j * len + 3] + prior_box_data[j * len + 1]) / 2; + prior_box_data[j * len + 1] + prior_box_height / 2; T target_box_center_x = (target_box_data[i * len + 2] + target_box_data[i * len]) / 2; @@ -78,10 +79,18 @@ class BoxCoderKernel : public framework::OpKernel { output[offset + 3] = std::log(std::fabs(target_box_height / prior_box_height)); if (prior_box_var) { - output[offset] /= prior_box_var_data[j * len]; - output[offset + 1] /= prior_box_var_data[j * len + 1]; - output[offset + 2] /= prior_box_var_data[j * len + 2]; - output[offset + 3] /= prior_box_var_data[j * len + 3]; + int prior_var_offset = 0; + if (prior_box_var->dims().size() == 2) { + prior_var_offset = j * len; + } + output[offset] /= prior_box_var_data[prior_var_offset]; + output[offset + 1] /= prior_box_var_data[prior_var_offset + 1]; + output[offset + 2] /= prior_box_var_data[prior_var_offset + 2]; + output[offset + 3] /= prior_box_var_data[prior_var_offset + 3]; + } else if (!(variance.empty())) { + for (int k = 0; k < 4; ++k) { + output[offset + k] /= static_cast(variance[k]); + } } } } @@ -89,58 +98,71 @@ class BoxCoderKernel : public framework::OpKernel { void DecodeCenterSize(const framework::Tensor* target_box, const framework::Tensor* prior_box, const framework::Tensor* prior_box_var, - const bool normalized, T* output) const { + const bool normalized, const int axis, + const std::vector variance, T* output) const { int64_t row = target_box->dims()[0]; - int64_t col = prior_box->dims()[0]; - int64_t len = prior_box->dims()[1]; + int64_t col = target_box->dims()[1]; + int64_t len = target_box->dims()[2]; auto* target_box_data = target_box->data(); auto* prior_box_data = prior_box->data(); const T* prior_box_var_data = nullptr; if (prior_box_var) prior_box_var_data = prior_box_var->data(); - + int prior_box_offset = 0; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { size_t offset = i * col * len + j * len; - T prior_box_width = prior_box_data[j * len + 2] - - prior_box_data[j * len] + (normalized == false); - T prior_box_height = prior_box_data[j * len + 3] - - prior_box_data[j * len + 1] + + if (axis == 0) { + prior_box_offset = j * len; + } else if (axis == 1) { + prior_box_offset = i * len; + } + T prior_box_width = prior_box_data[prior_box_offset + 2] - + prior_box_data[prior_box_offset] + + (normalized == false); + T prior_box_height = prior_box_data[prior_box_offset + 3] - + prior_box_data[prior_box_offset + 1] + (normalized == false); T prior_box_center_x = - (prior_box_data[j * len + 2] + prior_box_data[j * len]) / 2; + prior_box_data[prior_box_offset] + prior_box_width / 2; T prior_box_center_y = - (prior_box_data[j * len + 3] + prior_box_data[j * len + 1]) / 2; + prior_box_data[prior_box_offset + 1] + prior_box_height / 2; T target_box_center_x = 0, target_box_center_y = 0; T target_box_width = 0, target_box_height = 0; + T box_var_x = T(1), box_var_y = T(1); + T box_var_w = T(1), box_var_h = T(1); if (prior_box_var) { - target_box_center_x = prior_box_var_data[j * len] * - target_box_data[offset] * prior_box_width + - prior_box_center_x; - target_box_center_y = prior_box_var_data[j * len + 1] * - target_box_data[offset + 1] * - prior_box_height + - prior_box_center_y; - target_box_width = std::exp(prior_box_var_data[j * len + 2] * - target_box_data[offset + 2]) * - prior_box_width; - target_box_height = std::exp(prior_box_var_data[j * len + 3] * - target_box_data[offset + 3]) * - prior_box_height; - } else { - target_box_center_x = - target_box_data[offset] * prior_box_width + prior_box_center_x; - target_box_center_y = target_box_data[offset + 1] * prior_box_height + - prior_box_center_y; - target_box_width = - std::exp(target_box_data[offset + 2]) * prior_box_width; - target_box_height = - std::exp(target_box_data[offset + 3]) * prior_box_height; + int prior_var_offset = 0; + if (prior_box_var->dims().size() == 2) { + if (axis == 0) + prior_var_offset = j * len; + else if (axis == 1) + prior_var_offset = i * len; + } + box_var_x = prior_box_var_data[prior_var_offset]; + box_var_y = prior_box_var_data[prior_var_offset + 1]; + box_var_w = prior_box_var_data[prior_var_offset + 2]; + box_var_h = prior_box_var_data[prior_var_offset + 3]; + } else if (!(variance.empty())) { + box_var_x = static_cast(variance[0]); + box_var_y = static_cast(variance[1]); + box_var_w = static_cast(variance[2]); + box_var_h = static_cast(variance[3]); } + target_box_center_x = + box_var_x * target_box_data[offset] * prior_box_width + + prior_box_center_x; + target_box_center_y = + box_var_y * target_box_data[offset + 1] * prior_box_height + + prior_box_center_y; + target_box_width = + std::exp(box_var_w * target_box_data[offset + 2]) * prior_box_width; + target_box_height = std::exp(box_var_h * target_box_data[offset + 3]) * + prior_box_height; output[offset] = target_box_center_x - target_box_width / 2; output[offset + 1] = target_box_center_y - target_box_height / 2; @@ -157,26 +179,40 @@ class BoxCoderKernel : public framework::OpKernel { auto* prior_box_var = context.Input("PriorBoxVar"); auto* target_box = context.Input("TargetBox"); auto* output_box = context.Output("OutputBox"); - + std::vector variance = context.Attr>("variance"); + const int axis = context.Attr("axis"); if (target_box->lod().size()) { PADDLE_ENFORCE_EQ(target_box->lod().size(), 1UL, "Only support 1 level of LoD."); } + if (prior_box_var) { + PADDLE_ENFORCE(variance.empty(), + "Input 'PriorBoxVar' and attribute 'variance' should not" + "be used at the same time."); + } + if (!(variance.empty())) { + PADDLE_ENFORCE(static_cast(variance.size()) == 4, + "Size of attribute 'variance' should be 4"); + } + auto code_type = GetBoxCodeType(context.Attr("code_type")); + bool normalized = context.Attr("box_normalized"); + auto row = target_box->dims()[0]; auto col = prior_box->dims()[0]; + if (code_type == BoxCodeType::kDecodeCenterSize) { + col = target_box->dims()[1]; + } auto len = prior_box->dims()[1]; output_box->mutable_data({row, col, len}, context.GetPlace()); - auto code_type = GetBoxCodeType(context.Attr("code_type")); - bool normalized = context.Attr("box_normalized"); T* output = output_box->data(); if (code_type == BoxCodeType::kEncodeCenterSize) { EncodeCenterSize(target_box, prior_box, prior_box_var, normalized, - output); + variance, output); } else if (code_type == BoxCodeType::kDecodeCenterSize) { - DecodeCenterSize(target_box, prior_box, prior_box_var, normalized, - output); + DecodeCenterSize(target_box, prior_box, prior_box_var, normalized, axis, + variance, output); } } }; diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index 2395b181485429784e0f3dff6d056b84268ef245..f357e3ccf905309e6656f3fa87fbee45dc357c1e 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -9,9 +9,9 @@ http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - limitations under the License. */ +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detection/poly_util.h" @@ -35,30 +35,45 @@ class MultiClassNMSOp : public framework::OperatorWithKernel { auto box_dims = ctx->GetInputDim("BBoxes"); auto score_dims = ctx->GetInputDim("Scores"); + auto score_size = score_dims.size(); if (ctx->IsRuntime()) { + PADDLE_ENFORCE(score_size == 2 || score_size == 3, + "The rank of Input(Scores) must be 2 or 3"); PADDLE_ENFORCE_EQ(box_dims.size(), 3, - "The rank of Input(BBoxes) must be 3."); - PADDLE_ENFORCE_EQ(score_dims.size(), 3, - "The rank of Input(Scores) must be 3."); - PADDLE_ENFORCE(box_dims[2] == 4 || box_dims[2] == 8 || - box_dims[2] == 16 || box_dims[2] == 24 || - box_dims[2] == 32, - "The 2nd dimension of Input(BBoxes) must be 4 or 8, " - "represents the layout of coordinate " - "[xmin, ymin, xmax, ymax] or " - "4 points: [x1, y1, x2, y2, x3, y3, x4, y4] or " - "8 points: [xi, yi] i= 1,2,...,8 or " - "12 points: [xi, yi] i= 1,2,...,12 or " - "16 points: [xi, yi] i= 1,2,...,16"); - PADDLE_ENFORCE_EQ(box_dims[1], score_dims[2], - "The 1st dimensiong of Input(BBoxes) must be equal to " - "3rd dimension of Input(Scores), which represents the " - "predicted bboxes."); + "The rank of Input(BBoxes) must be 3"); + if (score_size == 3) { + PADDLE_ENFORCE(box_dims[2] == 4 || box_dims[2] == 8 || + box_dims[2] == 16 || box_dims[2] == 24 || + box_dims[2] == 32, + "The last dimension of Input(BBoxes) must be 4 or 8, " + "represents the layout of coordinate " + "[xmin, ymin, xmax, ymax] or " + "4 points: [x1, y1, x2, y2, x3, y3, x4, y4] or " + "8 points: [xi, yi] i= 1,2,...,8 or " + "12 points: [xi, yi] i= 1,2,...,12 or " + "16 points: [xi, yi] i= 1,2,...,16"); + PADDLE_ENFORCE_EQ( + box_dims[1], score_dims[2], + "The 2nd dimension of Input(BBoxes) must be equal to " + "last dimension of Input(Scores), which represents the " + "predicted bboxes."); + } else { + PADDLE_ENFORCE(box_dims[2] == 4, + "The last dimension of Input(BBoxes) must be 4"); + PADDLE_ENFORCE_EQ(box_dims[1], score_dims[1], + "The 2nd dimension of Input(BBoxes)" + "must be equal to the 2nd dimension" + " of Input(Scores)"); + } } // Here the box_dims[0] is not the real dimension of output. // It will be rewritten in the computing kernel. - ctx->SetOutputDim("Out", {box_dims[1], box_dims[2] + 2}); + if (score_size == 3) { + ctx->SetOutputDim("Out", {box_dims[1], box_dims[2] + 2}); + } else { + ctx->SetOutputDim("Out", {-1, box_dims[2] + 2}); + } } protected: @@ -123,8 +138,9 @@ static inline T JaccardOverlap(const T* box1, const T* box2, const T inter_ymin = std::max(box1[1], box2[1]); const T inter_xmax = std::min(box1[2], box2[2]); const T inter_ymax = std::min(box1[3], box2[3]); - const T inter_w = inter_xmax - inter_xmin; - const T inter_h = inter_ymax - inter_ymin; + T norm = normalized ? static_cast(0.) : static_cast(1.); + T inter_w = inter_xmax - inter_xmin + norm; + T inter_h = inter_ymax - inter_ymin + norm; const T inter_area = inter_w * inter_h; const T bbox1_area = BBoxArea(box1, normalized); const T bbox2_area = BBoxArea(box2, normalized); @@ -139,7 +155,7 @@ T PolyIoU(const T* box1, const T* box2, const size_t box_size, T bbox2_area = PolyArea(box2, box_size, normalized); T inter_area = PolyOverlapArea(box1, box2, box_size, normalized); if (bbox1_area == 0 || bbox2_area == 0 || inter_area == 0) { - // If coordinate values are is invalid + // If coordinate values are invalid // if area size <= 0, return 0. return T(0.); } else { @@ -147,12 +163,35 @@ T PolyIoU(const T* box1, const T* box2, const size_t box_size, } } +template +void SliceOneClass(const platform::DeviceContext& ctx, + const framework::Tensor& items, const int class_id, + framework::Tensor* one_class_item) { + T* item_data = one_class_item->mutable_data(ctx.GetPlace()); + const T* items_data = items.data(); + const int64_t num_item = items.dims()[0]; + const int class_num = items.dims()[1]; + if (items.dims().size() == 3) { + int item_size = items.dims()[2]; + for (int i = 0; i < num_item; ++i) { + std::memcpy(item_data + i * item_size, + items_data + i * class_num * item_size + class_id * item_size, + sizeof(T) * item_size); + } + } else { + for (int i = 0; i < num_item; ++i) { + item_data[i] = items_data[i * class_num + class_id]; + } + } +} + template class MultiClassNMSKernel : public framework::OpKernel { public: void NMSFast(const Tensor& bbox, const Tensor& scores, const T score_threshold, const T nms_threshold, const T eta, - const int64_t top_k, std::vector* selected_indices) const { + const int64_t top_k, std::vector* selected_indices, + const bool normalized) const { // The total boxes for each instance. int64_t num_boxes = bbox.dims()[0]; // 4: [xmin ymin xmax ymax] @@ -178,15 +217,16 @@ class MultiClassNMSKernel : public framework::OpKernel { T overlap = T(0.); // 4: [xmin ymin xmax ymax] if (box_size == 4) { - overlap = JaccardOverlap(bbox_data + idx * box_size, - bbox_data + kept_idx * box_size, true); + overlap = + JaccardOverlap(bbox_data + idx * box_size, + bbox_data + kept_idx * box_size, normalized); } // 8: [x1 y1 x2 y2 x3 y3 x4 y4] or 16, 24, 32 if (box_size == 8 || box_size == 16 || box_size == 24 || box_size == 32) { - overlap = - PolyIoU(bbox_data + idx * box_size, - bbox_data + kept_idx * box_size, box_size, true); + overlap = PolyIoU(bbox_data + idx * box_size, + bbox_data + kept_idx * box_size, box_size, + normalized); } keep = overlap <= adaptive_threshold; } else { @@ -205,37 +245,58 @@ class MultiClassNMSKernel : public framework::OpKernel { void MultiClassNMS(const framework::ExecutionContext& ctx, const Tensor& scores, const Tensor& bboxes, + const int scores_size, std::map>* indices, int* num_nmsed_out) const { int64_t background_label = ctx.Attr("background_label"); int64_t nms_top_k = ctx.Attr("nms_top_k"); int64_t keep_top_k = ctx.Attr("keep_top_k"); + bool normalized = ctx.Attr("normalized"); T nms_threshold = static_cast(ctx.Attr("nms_threshold")); T nms_eta = static_cast(ctx.Attr("nms_eta")); T score_threshold = static_cast(ctx.Attr("score_threshold")); + auto& dev_ctx = ctx.template device_context(); - int64_t class_num = scores.dims()[0]; - int64_t predict_dim = scores.dims()[1]; int num_det = 0; + + int64_t class_num = scores_size == 3 ? scores.dims()[0] : scores.dims()[1]; + Tensor bbox_slice, score_slice; for (int64_t c = 0; c < class_num; ++c) { if (c == background_label) continue; - Tensor score = scores.Slice(c, c + 1); - NMSFast(bboxes, score, score_threshold, nms_threshold, nms_eta, nms_top_k, - &((*indices)[c])); + if (scores_size == 3) { + score_slice = scores.Slice(c, c + 1); + bbox_slice = bboxes; + } else { + score_slice.Resize({scores.dims()[0], 1}); + bbox_slice.Resize({scores.dims()[0], 4}); + SliceOneClass(dev_ctx, scores, c, &score_slice); + SliceOneClass(dev_ctx, bboxes, c, &bbox_slice); + } + NMSFast(bbox_slice, score_slice, score_threshold, nms_threshold, nms_eta, + nms_top_k, &((*indices)[c]), normalized); + if (scores_size == 2) { + std::stable_sort((*indices)[c].begin(), (*indices)[c].end()); + } num_det += (*indices)[c].size(); } *num_nmsed_out = num_det; const T* scores_data = scores.data(); if (keep_top_k > -1 && num_det > keep_top_k) { + const T* sdata; std::vector>> score_index_pairs; for (const auto& it : *indices) { int label = it.first; - const T* sdata = scores_data + label * predict_dim; + if (scores_size == 3) { + sdata = scores_data + label * scores.dims()[1]; + } else { + score_slice.Resize({scores.dims()[0], 1}); + SliceOneClass(dev_ctx, scores, label, &score_slice); + sdata = score_slice.data(); + } const std::vector& label_indices = it.second; for (size_t j = 0; j < label_indices.size(); ++j) { int idx = label_indices[j]; - PADDLE_ENFORCE_LT(idx, predict_dim); score_index_pairs.push_back( std::make_pair(sdata[idx], std::make_pair(label, idx))); } @@ -252,31 +313,55 @@ class MultiClassNMSKernel : public framework::OpKernel { int idx = score_index_pairs[j].second.second; new_indices[label].push_back(idx); } + if (scores_size == 2) { + for (const auto& it : new_indices) { + int label = it.first; + std::stable_sort(new_indices[label].begin(), + new_indices[label].end()); + } + } new_indices.swap(*indices); *num_nmsed_out = keep_top_k; } } - void MultiClassOutput(const Tensor& scores, const Tensor& bboxes, + void MultiClassOutput(const platform::DeviceContext& ctx, + const Tensor& scores, const Tensor& bboxes, const std::map>& selected_indices, - Tensor* outs) const { + const int scores_size, Tensor* outs) const { + int64_t class_num = scores.dims()[1]; int64_t predict_dim = scores.dims()[1]; int64_t box_size = bboxes.dims()[1]; - int64_t out_dim = bboxes.dims()[1] + 2; + if (scores_size == 2) { + box_size = bboxes.dims()[2]; + } + int64_t out_dim = box_size + 2; auto* scores_data = scores.data(); auto* bboxes_data = bboxes.data(); auto* odata = outs->data(); - + const T* sdata; + Tensor bbox; + bbox.Resize({scores.dims()[0], box_size}); int count = 0; for (const auto& it : selected_indices) { int label = it.first; - const T* sdata = scores_data + label * predict_dim; const std::vector& indices = it.second; + if (scores_size == 2) { + SliceOneClass(ctx, bboxes, label, &bbox); + } else { + sdata = scores_data + label * predict_dim; + } for (size_t j = 0; j < indices.size(); ++j) { int idx = indices[j]; - const T* bdata = bboxes_data + idx * box_size; - odata[count * out_dim] = label; // label - odata[count * out_dim + 1] = sdata[idx]; // score + odata[count * out_dim] = label; // label + const T* bdata; + if (scores_size == 3) { + bdata = bboxes_data + idx * box_size; + odata[count * out_dim + 1] = sdata[idx]; // score + } else { + bdata = bbox.data() + idx * box_size; + odata[count * out_dim + 1] = *(scores_data + idx * class_num + label); + } // xmin, ymin, xmax, ymax or multi-points coordinates std::memcpy(odata + count * out_dim + 2, bdata, box_size * sizeof(T)); count++; @@ -285,52 +370,64 @@ class MultiClassNMSKernel : public framework::OpKernel { } void Compute(const framework::ExecutionContext& ctx) const override { - auto* boxes = ctx.Input("BBoxes"); - auto* scores = ctx.Input("Scores"); + auto* boxes = ctx.Input("BBoxes"); + auto* scores = ctx.Input("Scores"); auto* outs = ctx.Output("Out"); auto score_dims = scores->dims(); - - int64_t batch_size = score_dims[0]; - int64_t class_num = score_dims[1]; - int64_t predict_dim = score_dims[2]; - int64_t box_dim = boxes->dims()[2]; - int64_t out_dim = boxes->dims()[2] + 2; + auto score_size = score_dims.size(); + auto& dev_ctx = ctx.template device_context(); std::vector>> all_indices; std::vector batch_starts = {0}; - for (int64_t i = 0; i < batch_size; ++i) { - Tensor ins_score = scores->Slice(i, i + 1); - ins_score.Resize({class_num, predict_dim}); - - Tensor ins_boxes = boxes->Slice(i, i + 1); - ins_boxes.Resize({predict_dim, box_dim}); - + int64_t batch_size = score_dims[0]; + int64_t box_dim = boxes->dims()[2]; + int64_t out_dim = box_dim + 2; + int num_nmsed_out = 0; + Tensor boxes_slice, scores_slice; + int n = score_size == 3 ? batch_size : boxes->lod().back().size() - 1; + for (int i = 0; i < n; ++i) { + if (score_size == 3) { + scores_slice = scores->Slice(i, i + 1); + scores_slice.Resize({score_dims[1], score_dims[2]}); + boxes_slice = boxes->Slice(i, i + 1); + boxes_slice.Resize({score_dims[2], box_dim}); + } else { + auto boxes_lod = boxes->lod().back(); + scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]); + boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]); + } std::map> indices; - int num_nmsed_out = 0; - MultiClassNMS(ctx, ins_score, ins_boxes, &indices, &num_nmsed_out); + MultiClassNMS(ctx, scores_slice, boxes_slice, score_size, &indices, + &num_nmsed_out); all_indices.push_back(indices); batch_starts.push_back(batch_starts.back() + num_nmsed_out); } int num_kept = batch_starts.back(); if (num_kept == 0) { - T* od = outs->mutable_data({1}, ctx.GetPlace()); + T* od = outs->mutable_data({1, 1}, ctx.GetPlace()); od[0] = -1; + batch_starts = {0, 1}; } else { outs->mutable_data({num_kept, out_dim}, ctx.GetPlace()); - for (int64_t i = 0; i < batch_size; ++i) { - Tensor ins_score = scores->Slice(i, i + 1); - ins_score.Resize({class_num, predict_dim}); - - Tensor ins_boxes = boxes->Slice(i, i + 1); - ins_boxes.Resize({predict_dim, box_dim}); - + for (int i = 0; i < n; ++i) { + if (score_size == 3) { + scores_slice = scores->Slice(i, i + 1); + boxes_slice = boxes->Slice(i, i + 1); + scores_slice.Resize({score_dims[1], score_dims[2]}); + boxes_slice.Resize({score_dims[2], box_dim}); + } else { + auto boxes_lod = boxes->lod().back(); + scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]); + boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]); + } int64_t s = batch_starts[i]; int64_t e = batch_starts[i + 1]; if (e > s) { Tensor out = outs->Slice(s, e); - MultiClassOutput(ins_score, ins_boxes, all_indices[i], &out); + MultiClassOutput(dev_ctx, scores_slice, boxes_slice, all_indices[i], + score_dims.size(), &out); } } } @@ -346,17 +443,24 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("BBoxes", - "(Tensor) A 3-D Tensor with shape " + "Two types of bboxes are supported:" + "1. (Tensor) A 3-D Tensor with shape " "[N, M, 4 or 8 16 24 32] represents the " "predicted locations of M bounding bboxes, N is the batch size. " "Each bounding box has four coordinate values and the layout is " - "[xmin, ymin, xmax, ymax], when box size equals to 4."); + "[xmin, ymin, xmax, ymax], when box size equals to 4." + "2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]" + "M is the number of bounding boxes, C is the class number"); AddInput("Scores", - "(Tensor) A 3-D Tensor with shape [N, C, M] represents the " + "Two types of scores are supported:" + "1. (Tensor) A 3-D Tensor with shape [N, C, M] represents the " "predicted confidence predictions. N is the batch size, C is the " "class number, M is number of bounding boxes. For each category " "there are total M scores which corresponding M bounding boxes. " - " Please note, M is equal to the 1st dimension of BBoxes. "); + " Please note, M is equal to the 2nd dimension of BBoxes. " + "2. (LoDTensor) A 2-D LoDTensor with shape [M, C]. " + "M is the number of bbox, C is the class number. In this case, " + "Input BBoxes should be the second case with shape [M, C, 4]."); AddAttr( "background_label", "(int, defalut: 0) " @@ -384,6 +488,10 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { "(int64_t) " "Number of total bboxes to be kept per image after NMS " "step. -1 means keeping all bboxes after NMS step."); + AddAttr("normalized", + "(bool, default true) " + "Whether detections are normalized.") + .SetDefault(true); AddOutput("Out", "(LoDTensor) A 2-D LoDTensor with shape [No, 6] represents the " "detections. Each row has 6 values: " @@ -399,24 +507,21 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( This operator is to do multi-class non maximum suppression (NMS) on a batched of boxes and scores. - In the NMS step, this operator greedily selects a subset of detection bounding boxes that have high scores larger than score_threshold, if providing this threshold, then selects the largest nms_top_k confidences scores if nms_top_k is larger than -1. Then this operator pruns away boxes that have high IOU (intersection over union) overlap with already selected boxes by adaptive threshold NMS based on parameters of nms_threshold and nms_eta. - Aftern NMS step, at most keep_top_k number of total bboxes are to be kept per image if keep_top_k is larger than -1. - This operator support multi-class and batched inputs. It applying NMS independently for each class. The outputs is a 2-D LoDTenosr, for each image, the offsets in first dimension of LoDTensor are called LoD, the number of offset is N + 1, where N is the batch size. If LoD[i + 1] - LoD[i] == 0, means there is no detected bbox for this image. If there is no detected boxes -for all images, all the elements in LoD are 0, and the Out only contains one -value which is -1. +for all images, all the elements in LoD are set to {1}, and the Out only +contains one value which is -1. )DOC"); } }; diff --git a/paddle/fluid/operators/distributed/proto_encoder_helper.h b/paddle/fluid/operators/distributed/proto_encoder_helper.h index 27ca1f4edc04f5fca54b1a6340243634a596939c..e9f06f54327875c0568c571627e9effb998e15be 100644 --- a/paddle/fluid/operators/distributed/proto_encoder_helper.h +++ b/paddle/fluid/operators/distributed/proto_encoder_helper.h @@ -85,7 +85,7 @@ class ProtoEncodeHelper { #define REPLACE_ENFORCE_GLOG 1 // Make sure callers didn't do operations that went over max_size promised if (paddle::platform::is_error(p_ <= limit_)) { - paddle::platform::throw_on_error(p_ <= limit_); + paddle::platform::throw_on_error(p_ <= limit_, ""); } #undef REPLACE_ENFORCE_GLOG } diff --git a/paddle/fluid/operators/dropout_op.cu b/paddle/fluid/operators/dropout_op.cu index d65491267de1ce3495d8b8250cf0cff570dfcc6a..7a6927d3e54b4ece8f17d7a1e7e431ba836edff9 100644 --- a/paddle/fluid/operators/dropout_op.cu +++ b/paddle/fluid/operators/dropout_op.cu @@ -114,4 +114,5 @@ REGISTER_OP_CUDA_KERNEL( ops::GPUDropoutKernel); REGISTER_OP_CUDA_KERNEL( dropout_grad, ops::DropoutGradKernel, + ops::DropoutGradKernel, ops::DropoutGradKernel); diff --git a/paddle/fluid/operators/elementwise/elementwise_add_mkldnn_op.cc b/paddle/fluid/operators/elementwise/mkldnn/elementwise_add_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/elementwise/elementwise_add_mkldnn_op.cc rename to paddle/fluid/operators/elementwise/mkldnn/elementwise_add_mkldnn_op.cc diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_mkldnn_op.cc b/paddle/fluid/operators/elementwise/mkldnn/elementwise_mul_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/elementwise/elementwise_mul_mkldnn_op.cc rename to paddle/fluid/operators/elementwise/mkldnn/elementwise_mul_mkldnn_op.cc diff --git a/paddle/fluid/operators/gather_op.cu b/paddle/fluid/operators/gather_op.cu index 9f4aef08cd58e72ce344a640e6564b9e360ce169..490ba9a585ee8fac82a9e1178f506a6d39e5fd1c 100644 --- a/paddle/fluid/operators/gather_op.cu +++ b/paddle/fluid/operators/gather_op.cu @@ -31,7 +31,7 @@ class GatherOpCUDAKernel : public framework::OpKernel { auto *output = ctx.Output("Out"); output->mutable_data(ctx.GetPlace()); - + if (x->numel() == 0) return; GPUGather(ctx.device_context(), *x, *index, output); } }; @@ -45,14 +45,13 @@ class GatherGradOpCUDAKernel : public framework::OpKernel { auto *Index = ctx.Input("Index"); auto *dX = ctx.Output(framework::GradVarName("X")); auto *dO = ctx.Input(framework::GradVarName("Out")); - auto *x = ctx.Input("X"); dX->mutable_data(ctx.GetPlace()); auto dxt = framework::EigenVector::Flatten(*dX); auto &place = *ctx.template device_context() .eigen_device(); dxt.device(place) = dxt.constant(static_cast(0)); - + if (dO->numel() == 0) return; GPUScatterAssign(ctx.device_context(), *dO, *Index, dX); } }; @@ -61,11 +60,14 @@ class GatherGradOpCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(gather, ops::GatherOpCUDAKernel, ops::GatherOpCUDAKernel, ops::GatherOpCUDAKernel, - ops::GatherOpCUDAKernel); + ops::GatherOpCUDAKernel, + ops::GatherOpCUDAKernel); REGISTER_OP_CUDA_KERNEL(gather_grad, ops::GatherGradOpCUDAKernel, ops::GatherGradOpCUDAKernel, ops::GatherGradOpCUDAKernel, - ops::GatherGradOpCUDAKernel); + ops::GatherGradOpCUDAKernel, + ops::GatherGradOpCUDAKernel); diff --git a/paddle/fluid/operators/gather_op.h b/paddle/fluid/operators/gather_op.h index 2dd726bebb1bc2e4d83844c0b98df01c390e622f..2e18298cf8e34d5f70369c89b3b3b2a9ced0ce62 100644 --- a/paddle/fluid/operators/gather_op.h +++ b/paddle/fluid/operators/gather_op.h @@ -35,7 +35,7 @@ class GatherOpKernel : public framework::OpKernel { auto *output = ctx.Output("Out"); output->mutable_data(ctx.GetPlace()); - + if (x->numel() == 0) return; CPUGather(ctx.device_context(), *x, *index, output); } }; @@ -56,7 +56,7 @@ class GatherGradientOpKernel : public framework::OpKernel { auto &place = *ctx.template device_context() .eigen_device(); dxt.device(place) = dxt.constant(static_cast(0)); - + if (dO->numel() == 0) return; ScatterAssign(ctx.device_context(), *dO, *Index, dX); } }; diff --git a/paddle/fluid/operators/hierarchical_sigmoid_op.h b/paddle/fluid/operators/hierarchical_sigmoid_op.h index 1a7ca963010112bbcab69f1ceeb9cb8d19ca9b9e..4d5a84bcafed1ab0739349e1dbc7b5a9f9ad64ec 100644 --- a/paddle/fluid/operators/hierarchical_sigmoid_op.h +++ b/paddle/fluid/operators/hierarchical_sigmoid_op.h @@ -136,7 +136,7 @@ class HierarchicalSigmoidOpKernel : public framework::OpKernel { sum.mutable_data(framework::make_ddim(sum_dims), ctx.GetPlace()); auto sum_mat = EigenMatrix::From(sum); out->mutable_data(ctx.GetPlace()); - auto out_mat = framework::EigenVector::Flatten(*out); + auto out_mat = framework::EigenMatrix::From(*out); if (bias) { bit_code->Add(*bias, pre_out); } diff --git a/paddle/fluid/operators/interpolate_op.cc b/paddle/fluid/operators/interpolate_op.cc index 93dd3f794f6087a3158fee1f262795871f21611a..de91ba6270ac2ed22c8380878c0a0037fb1629c0 100644 --- a/paddle/fluid/operators/interpolate_op.cc +++ b/paddle/fluid/operators/interpolate_op.cc @@ -82,6 +82,18 @@ class InterpolateOpMaker : public framework::OpProtoAndCheckerMaker { "bilinear interpolation and \"nearest\" for nearest " "neighbor interpolation.") .SetDefault("bilinear"); + AddAttr( + "align_corners", + "an optinal bool. Defaults to True. " + "If True, the centers of 4 corner pixels of the input and output " + "tensors are aligned, preserving the values at the corner pixels, " + "if Flase, are not aligned") + .SetDefault(true); + AddAttr("align_mode", + "(int, default \'1\'), optional for bilinear interpolation" + "can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 , " + "can be \'1\' for src_idx = scale*dst_index .") + .SetDefault(1); AddComment(R"DOC( This operator samples input X to given output shape by using specified interpolation method, the interpolation methods can be \"nearest\" @@ -98,6 +110,64 @@ class InterpolateOpMaker : public framework::OpProtoAndCheckerMaker { to perform linear interpolation first in one direction, and then again in the other direction. + Align_corners and align_mode are optinal parameters,the calculation method + of interpolation can be selected by them. + + Example: + + For scale: + + if align_corners = True and out_{size}>1 : + + scale_{factor} = (in_{size}-1.0)/(out_{size}-1.0) + + else: + + scale_{factor} = float(in_{size}/out_{size}) + + + Nearest neighbor interpolation: + + if: + align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = \left \lfloor {H_{in} * scale_{}factor}} \right \rfloor + W_out = \left \lfloor {W_{in} * scale_{}factor}} \right \rfloor + + else: + align_corners = True + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = round(H_{in} * scale_{factor}) + W_out = round(W_{in} * scale_{factor}) + + Bilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + + For details of nearest neighbor interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation diff --git a/paddle/fluid/operators/interpolate_op.cu b/paddle/fluid/operators/interpolate_op.cu index 99ac725f73bf60ab0fb9a467432e8a57c646ef35..b887878ea2291d6c56fec91738784e338606b84f 100644 --- a/paddle/fluid/operators/interpolate_op.cu +++ b/paddle/fluid/operators/interpolate_op.cu @@ -23,7 +23,8 @@ __global__ void KeNearestNeighborInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, - const size_t num_channels, const float ratio_h, const float ratio_w) { + const size_t num_channels, const float ratio_h, const float ratio_w, + const bool align_corners) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; @@ -35,10 +36,14 @@ __global__ void KeNearestNeighborInterpFw( int channel_id = out_id_w / out_img_size; int out_img_idy = (out_id_w % out_img_size) / out_img_w; - int in_img_idy = static_cast(ratio_h * out_img_idy + 0.5); + int in_img_idy = (align_corners) + ? static_cast(ratio_h * out_img_idy + 0.5) + : static_cast(ratio_h * out_img_idy); int out_img_idx = tid % out_img_w; - int in_img_idx = static_cast(ratio_w * out_img_idx + 0.5); + int in_img_idx = (align_corners) + ? static_cast(ratio_w * out_img_idx + 0.5) + : static_cast(ratio_w * out_img_idx); out[tid] = in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; @@ -50,7 +55,8 @@ __global__ void KeNearestNeighborInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, - const size_t num_channels, const float ratio_h, const float ratio_w) { + const size_t num_channels, const float ratio_h, const float ratio_w, + const bool align_corners) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; @@ -62,10 +68,14 @@ __global__ void KeNearestNeighborInterpBw( int channel_id = out_id_w / out_img_size; int out_img_idy = (out_id_w % out_img_size) / out_img_w; - int in_img_idy = static_cast(ratio_h * out_img_idy + 0.5); + int in_img_idy = (align_corners) + ? static_cast(ratio_h * out_img_idy + 0.5) + : static_cast(ratio_h * out_img_idy); int out_img_idx = tid % out_img_w; - int in_img_idx = static_cast(ratio_w * out_img_idx + 0.5); + int in_img_idx = (align_corners) + ? static_cast(ratio_w * out_img_idx + 0.5) + : static_cast(ratio_w * out_img_idx); T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; @@ -79,10 +89,12 @@ __global__ void KeBilinearInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, - const size_t num_channels, const float ratio_h, const float ratio_w) { + const size_t num_channels, const float ratio_h, const float ratio_w, + const bool align_corners, const int align_mode) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; + bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; @@ -91,15 +103,23 @@ __global__ void KeBilinearInterpFw( int channel_id = out_id_w / out_img_size; int out_img_idy = (out_id_w % out_img_size) / out_img_w; - int in_img_idy = ratio_h * out_img_idy; + int in_img_idy = align_flag + ? static_cast(ratio_h * (out_img_idy + 0.5) - 0.5) + : static_cast(ratio_h * out_img_idy); + in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; - T h1lambda = ratio_h * out_img_idy - in_img_idy; + T h1lambda = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5 - in_img_idy + : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int out_img_idx = tid % out_img_w; - int in_img_idx = ratio_w * out_img_idx; + int in_img_idx = align_flag + ? static_cast(ratio_w * (out_img_idx + 0.5) - 0.5) + : static_cast(ratio_w * out_img_idx); + in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; - T w1lambda = ratio_w * out_img_idx - in_img_idx; + T w1lambda = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 - in_img_idx + : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + @@ -118,10 +138,12 @@ __global__ void KeBilinearInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, - const size_t num_channels, const T ratio_h, const T ratio_w) { + const size_t num_channels, const T ratio_h, const T ratio_w, + const bool align_corners, const int align_mode) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; + bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; @@ -130,15 +152,22 @@ __global__ void KeBilinearInterpBw( int channel_id = out_id_w / out_img_size; int out_img_idy = (out_id_w % out_img_size) / out_img_w; - int in_img_idy = ratio_h * out_img_idy; + int in_img_idy = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5 + : ratio_h * out_img_idy; + in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; - T h1lambda = ratio_h * out_img_idy - in_img_idy; + T h1lambda = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5 - in_img_idy + : ratio_h * out_img_idy - in_img_idy; + T h2lambda = 1.f - h1lambda; int out_img_idx = tid % out_img_w; - int in_img_idx = ratio_w * out_img_idx; + int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 + : ratio_w * out_img_idx; + in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; - T w1lambda = ratio_w * out_img_idx - in_img_idx; + T w1lambda = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 - in_img_idx + : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + @@ -175,6 +204,9 @@ class InterpolateOpCUDAKernel : public framework::OpKernel { out_w = size_data[1]; } + bool align_corners = ctx.Attr("align_corners"); + int align_mode = ctx.Attr("align_mode"); + int n = input->dims()[0]; int c = input->dims()[1]; int in_h = input->dims()[2]; @@ -188,10 +220,16 @@ class InterpolateOpCUDAKernel : public framework::OpKernel { int in_chw = c * in_hw; int out_chw = c * out_hw; - float ratio_h = - (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; - float ratio_w = - (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; + float ratio_h = 0.f; + float ratio_w = 0.f; + if (out_h > 1) { + ratio_h = (align_corners) ? static_cast(in_h - 1) / (out_h - 1) + : static_cast(in_h) / out_h; + } + if (out_w > 1) { + ratio_w = (align_corners) ? static_cast(in_w - 1) / (out_w - 1) + : static_cast(in_w) / out_w; + } if (in_h == out_h && in_w == out_w) { framework::TensorCopy(*input, ctx.GetPlace(), output); @@ -206,12 +244,12 @@ class InterpolateOpCUDAKernel : public framework::OpKernel { KeNearestNeighborInterpFw< T><<>>( input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, - out_chw, c, ratio_h, ratio_w); + out_chw, c, ratio_h, ratio_w, align_corners); } else if ("bilinear" == interp_method) { KeBilinearInterpFw< T><<>>( input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, - out_chw, c, ratio_h, ratio_w); + out_chw, c, ratio_h, ratio_w, align_corners, align_mode); } } }; @@ -234,6 +272,10 @@ class InterpolateGradOpCUDAKernel : public framework::OpKernel { int out_h = ctx.Attr("out_h"); int out_w = ctx.Attr("out_w"); auto out_size = ctx.Input("OutSize"); + + bool align_corners = ctx.Attr("align_corners"); + int align_mode = ctx.Attr("align_mode"); + if (out_size != nullptr) { Tensor sizes; framework::TensorCopy(*out_size, platform::CPUPlace(), &sizes); @@ -252,10 +294,16 @@ class InterpolateGradOpCUDAKernel : public framework::OpKernel { int in_chw = c * in_hw; int out_chw = c * out_hw; - float ratio_h = - (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; - float ratio_w = - (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; + float ratio_h = 0.f; + float ratio_w = 0.f; + if (out_h > 1) { + ratio_h = (align_corners) ? static_cast(in_h - 1) / (out_h - 1) + : static_cast(in_h) / out_h; + } + if (out_w > 1) { + ratio_w = (align_corners) ? static_cast(in_w - 1) / (out_w - 1) + : static_cast(in_w) / out_w; + } if (in_h == out_h && in_w == out_w) { framework::TensorCopy(*output_grad, ctx.GetPlace(), input_grad); @@ -270,12 +318,12 @@ class InterpolateGradOpCUDAKernel : public framework::OpKernel { KeNearestNeighborInterpBw< T><<>>( input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, - out_w, n, out_chw, c, ratio_h, ratio_w); + out_w, n, out_chw, c, ratio_h, ratio_w, align_corners); } else if ("bilinear" == interp_method) { KeBilinearInterpBw< T><<>>( input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, - out_w, n, out_chw, c, ratio_h, ratio_w); + out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode); } } }; diff --git a/paddle/fluid/operators/interpolate_op.h b/paddle/fluid/operators/interpolate_op.h index 7fdb3e1f5a2ff82284d89dd0759e357978e1d873..c631ad1dd158ce114169602f073d69b2291b5b3b 100644 --- a/paddle/fluid/operators/interpolate_op.h +++ b/paddle/fluid/operators/interpolate_op.h @@ -26,14 +26,17 @@ template static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, - const int out_h, const int out_w) { + const int out_h, const int out_w, + const bool align_corners) { auto input_t = EigenTensor::From(input); auto output_t = EigenTensor::From(*output); for (int k = 0; k < out_h; k++) { // loop for images - int in_k = static_cast(ratio_h * k + 0.5); + int in_k = (align_corners) ? static_cast(ratio_h * k + 0.5) + : static_cast(ratio_h * k); for (int l = 0; l < out_w; l++) { - int in_l = static_cast(ratio_w * l + 0.5); + int in_l = (align_corners) ? static_cast(ratio_w * l + 0.5) + : static_cast(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels @@ -48,20 +51,29 @@ template static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, - const int c, const int out_h, - const int out_w) { + const int c, const int out_h, const int out_w, + const bool align_corners, + const bool align_mode) { auto input_t = EigenTensor::From(input); auto output_t = EigenTensor::From(*output); + bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images - int y_n = static_cast(ratio_h * k); + int y_n = align_flag ? static_cast(ratio_h * (k + 0.5) - 0.5) + : static_cast(ratio_h * k); + y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); - float d_n = ratio_h * k - y_n; + float d_n = + align_flag ? ratio_h * (k + 0.5) - 0.5 - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { - int x_w = static_cast(ratio_w * l); + int x_w = (align_mode == 0 && !align_corners) + ? static_cast(ratio_w * (l + 0.5) - 0.5) + : static_cast(ratio_w * l); + x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); - float d_w = ratio_w * l - x_w; + float d_w = + align_flag ? ratio_w * (l + 0.5) - 0.5 - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches @@ -78,19 +90,20 @@ static void BilinearInterpolation(const Tensor& input, Tensor* output, } template -static void NearestNeighborInterpolateGrad(const Tensor& output_grad, - Tensor* input_grad, - const float ratio_h, - const float ratio_w, const int n, - const int c, const int out_h, - const int out_w) { +static void NearestNeighborInterpolateGrad( + const Tensor& output_grad, Tensor* input_grad, const float ratio_h, + const float ratio_w, const int n, const int c, const int out_h, + const int out_w, const bool align_corners) { auto input_grad_t = EigenTensor::From(*input_grad); auto output_grad_t = EigenTensor::From(output_grad); + for (int k = 0; k < out_h; k++) { // loop for images - int in_k = static_cast(ratio_h * k + 0.5); + int in_k = (align_corners) ? static_cast(ratio_h * k + 0.5) + : static_cast(ratio_h * k); for (int l = 0; l < out_w; l++) { - int in_l = static_cast(ratio_w * l + 0.5); + int in_l = (align_corners) ? static_cast(ratio_w * l + 0.5) + : static_cast(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels @@ -106,19 +119,28 @@ static void BilinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, - const int out_h, const int out_w) { + const int out_h, const int out_w, + const bool align_corners, + const int align_mode) { auto input_grad_t = EigenTensor::From(*input_grad); auto output_grad_t = EigenTensor::From(output_grad); + bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images - int y_n = static_cast(ratio_h * k); + int y_n = align_flag ? static_cast(ratio_h * (k + 0.5) - 0.5) + : static_cast(ratio_h * k); + y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); - float d_n = ratio_h * k - y_n; + float d_n = + align_flag ? ratio_h * (k + 0.5) - 0.5 - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { - int x_w = static_cast(ratio_w * l); + int x_w = align_flag ? static_cast(ratio_w * (l + 0.5) - 0.5) + : static_cast(ratio_w * l); + x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); - float d_w = ratio_w * l - x_w; + float d_w = + align_flag ? ratio_w * (l + 0.5) - 0.5 - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches @@ -134,7 +156,6 @@ static void BilinearInterpolationGrad(const Tensor& output_grad, } } } - template class InterpolateKernel : public framework::OpKernel { public: @@ -151,6 +172,8 @@ class InterpolateKernel : public framework::OpKernel { out_h = out_size_data[0]; out_w = out_size_data[1]; } + bool align_corners = ctx.Attr("align_corners"); + int align_mode = ctx.Attr("align_mode"); const int n = input->dims()[0]; const int c = input->dims()[1]; @@ -168,17 +191,24 @@ class InterpolateKernel : public framework::OpKernel { return; } - float ratio_h = - (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; - float ratio_w = - (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; + float ratio_h = 0.f; + float ratio_w = 0.f; + + if (out_h > 1) { + ratio_h = (align_corners) ? static_cast(in_h - 1) / (out_h - 1) + : static_cast(in_h) / out_h; + } + if (out_w > 1) { + ratio_w = (align_corners) ? static_cast(in_w - 1) / (out_w - 1) + : static_cast(in_w) / out_w; + } if ("bilinear" == interp_method) { BilinearInterpolation(*input, output, ratio_h, ratio_w, in_h, in_w, n, - c, out_h, out_w); + c, out_h, out_w, align_corners, align_mode); } else if ("nearest" == interp_method) { NearestNeighborInterpolate(*input, output, ratio_h, ratio_w, n, c, - out_h, out_w); + out_h, out_w, align_corners); } } }; @@ -200,6 +230,8 @@ class InterpolateGradKernel : public framework::OpKernel { out_h = out_size_data[0]; out_w = out_size_data[1]; } + bool align_corners = ctx.Attr("align_corners"); + int align_mode = ctx.Attr("align_mode"); const int n = input->dims()[0]; const int c = input->dims()[1]; @@ -217,17 +249,26 @@ class InterpolateGradKernel : public framework::OpKernel { return; } - float ratio_h = - (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; - float ratio_w = - (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; + float ratio_h = 0.f; + float ratio_w = 0.f; + + if (out_h > 1) { + ratio_h = (align_corners) ? static_cast(in_h - 1) / (out_h - 1) + : static_cast(in_h) / out_h; + } + if (out_w > 1) { + ratio_w = (align_corners) ? static_cast(in_w - 1) / (out_w - 1) + : static_cast(in_w) / out_w; + } if ("bilinear" == interp_method) { BilinearInterpolationGrad(*output_grad, input_grad, ratio_h, ratio_w, - in_h, in_w, n, c, out_h, out_w); + in_h, in_w, n, c, out_h, out_w, + align_corners, align_mode); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad(*output_grad, input_grad, ratio_h, - ratio_w, n, c, out_h, out_w); + ratio_w, n, c, out_h, out_w, + align_corners); } } }; diff --git a/paddle/fluid/operators/jit/benchmark.cc b/paddle/fluid/operators/jit/benchmark.cc index 186c37c56ec9410ac9a31503e33e7e334d0afc40..5c5a61f64093802697eb21452267471129c7fcf3 100644 --- a/paddle/fluid/operators/jit/benchmark.cc +++ b/paddle/fluid/operators/jit/benchmark.cc @@ -158,7 +158,7 @@ void BenchAllImpls(const typename KernelTuples::attr_type& attr, Args... args) { using Tensor = paddle::framework::Tensor; -template +template void BenchXYZNKernel() { for (int d : TestSizes()) { Tensor x, y, z; @@ -175,7 +175,7 @@ void BenchXYZNKernel() { } } -template +template void BenchAXYNKernel() { for (int d : TestSizes()) { const T a = static_cast(3); @@ -187,10 +187,23 @@ void BenchAXYNKernel() { RandomVec(d, x_data); BenchAllImpls, PlaceType>(d, &a, x.data(), y_data, d); + // test inplace + BenchAllImpls, PlaceType>(d, &a, x.data(), x_data, + d); + } +} + +template +void BenchXRNKernel() { + for (int d : TestSizes()) { + Tensor x; + RandomVec(d, x.mutable_data({d}, PlaceType())); + T res; + BenchAllImpls, PlaceType>(d, x.data(), &res, d); } } -template +template void BenchXYNKernel() { for (int d : TestSizes()) { Tensor x, y; @@ -203,7 +216,7 @@ void BenchXYNKernel() { } } -template +template void BenchLSTMKernel() { for (bool use_peephole : {true, false}) { for (int d : TestSizes()) { @@ -240,7 +253,7 @@ void BenchLSTMKernel() { } } -template +template void BenchGRUKernel() { for (int d : TestSizes()) { const jit::gru_attr_t attr(d, jit::kVSigmoid, jit::kVTanh); @@ -262,7 +275,7 @@ void BenchGRUKernel() { } } -template +template void BenchSeqPoolKernel() { std::vector pool_types = { jit::SeqPoolType::kSum, jit::SeqPoolType::kAvg, jit::SeqPoolType::kSqrt}; @@ -284,7 +297,7 @@ void BenchSeqPoolKernel() { } } -template +template void BenchMatMulKernel() { for (int m : {1, 2, 3, 4}) { for (int n : TestSizes()) { @@ -305,57 +318,64 @@ void BenchMatMulKernel() { } } +template +void BenchSoftmaxKernel() { + for (int bs : {1, 2, 10}) { + for (int n : TestSizes()) { + Tensor x, y; + x.Resize({bs, n}); + y.Resize({bs, n}); + RandomVec(bs * n, x.mutable_data(PlaceType()), -2.f, 2.f); + const T* x_data = x.data(); + T* y_data = y.mutable_data(PlaceType()); + BenchAllImpls, PlaceType>(n, x_data, y_data, n, + bs); + } + } +} + using T = float; -using PlaceType = paddle::platform::CPUPlace; +using CPUPlace = paddle::platform::CPUPlace; // xyzn -BENCH_FP32_CPU(kVMul) { BenchXYZNKernel(); } - -BENCH_FP32_CPU(kVAdd) { BenchXYZNKernel(); } - -BENCH_FP32_CPU(kVAddRelu) { BenchXYZNKernel(); } - -BENCH_FP32_CPU(kVSub) { BenchXYZNKernel(); } +BENCH_FP32_CPU(kVMul) { BenchXYZNKernel(); } +BENCH_FP32_CPU(kVAdd) { BenchXYZNKernel(); } +BENCH_FP32_CPU(kVAddRelu) { BenchXYZNKernel(); } +BENCH_FP32_CPU(kVSub) { BenchXYZNKernel(); } // axyn -BENCH_FP32_CPU(kVScal) { BenchAXYNKernel(); } +BENCH_FP32_CPU(kVScal) { BenchAXYNKernel(); } +BENCH_FP32_CPU(kVAddBias) { BenchAXYNKernel(); } -BENCH_FP32_CPU(kVAddBias) { BenchAXYNKernel(); } +// xrn +BENCH_FP32_CPU(kHSum) { BenchXRNKernel(); } +BENCH_FP32_CPU(kHMax) { BenchXRNKernel(); } // xyn -BENCH_FP32_CPU(kVRelu) { BenchXYNKernel(); } - -BENCH_FP32_CPU(kVIdentity) { BenchXYNKernel(); } - -BENCH_FP32_CPU(kVSquare) { BenchXYNKernel(); } - -BENCH_FP32_CPU(kVExp) { BenchXYNKernel(); } - -BENCH_FP32_CPU(kVSigmoid) { BenchXYNKernel(); } - -BENCH_FP32_CPU(kVTanh) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVRelu) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVIdentity) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVSquare) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVExp) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVSigmoid) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVTanh) { BenchXYNKernel(); } // lstm and peephole -BENCH_FP32_CPU(kLSTMCtHt) { BenchLSTMKernel(); } - -BENCH_FP32_CPU(kLSTMC1H1) { BenchLSTMKernel(); } +BENCH_FP32_CPU(kLSTMCtHt) { BenchLSTMKernel(); } +BENCH_FP32_CPU(kLSTMC1H1) { BenchLSTMKernel(); } // gru functions -BENCH_FP32_CPU(kGRUH1) { BenchGRUKernel(); } - -BENCH_FP32_CPU(kGRUHtPart1) { - BenchGRUKernel(); -} - -BENCH_FP32_CPU(kGRUHtPart2) { - BenchGRUKernel(); -} +BENCH_FP32_CPU(kGRUH1) { BenchGRUKernel(); } +BENCH_FP32_CPU(kGRUHtPart1) { BenchGRUKernel(); } +BENCH_FP32_CPU(kGRUHtPart2) { BenchGRUKernel(); } // seq pool function -BENCH_FP32_CPU(kSeqPool) { BenchSeqPoolKernel(); } +BENCH_FP32_CPU(kSeqPool) { BenchSeqPoolKernel(); } // matmul -BENCH_FP32_CPU(kMatMul) { BenchMatMulKernel(); } +BENCH_FP32_CPU(kMatMul) { BenchMatMulKernel(); } + +// softmax +BENCH_FP32_CPU(kSoftmax) { BenchSoftmaxKernel(); } // Benchmark all jit kernels including jitcode, mkl and refer. // To use this tool, run command: ./benchmark [options...] diff --git a/paddle/fluid/operators/jit/gen/CMakeLists.txt b/paddle/fluid/operators/jit/gen/CMakeLists.txt index 40310c2d2b372a414054f75348e8e1b4471bf3d2..2ea8f927e1a13867fa2065841fac05e766735237 100644 --- a/paddle/fluid/operators/jit/gen/CMakeLists.txt +++ b/paddle/fluid/operators/jit/gen/CMakeLists.txt @@ -28,3 +28,5 @@ USE_JITKERNEL_GEN(kGRUHtPart1) USE_JITKERNEL_GEN(kGRUHtPart2) USE_JITKERNEL_GEN(kNCHW16CMulNC) USE_JITKERNEL_GEN(kSeqPool) +USE_JITKERNEL_GEN(kHMax) +USE_JITKERNEL_GEN(kHSum) diff --git a/paddle/fluid/operators/jit/gen/act.cc b/paddle/fluid/operators/jit/gen/act.cc index a2a5661b93ad3d885983c502566860aa313d110f..e7a7375879064eb27c94315fe7b93eece7866b92 100644 --- a/paddle/fluid/operators/jit/gen/act.cc +++ b/paddle/fluid/operators/jit/gen/act.cc @@ -81,9 +81,7 @@ void VActJitCode::genCode() { #define DECLARE_ACT_CREATOR(name) \ class name##Creator : public JitCodeCreator { \ public: \ - bool UseMe(const int& attr) const override { \ - return platform::MayIUse(platform::avx); \ - } \ + bool UseMe(const int& attr) const override; \ size_t CodeSize(const int& d) const override; \ std::unique_ptr CreateJitCode(const int& attr) const override { \ return make_unique(attr, CodeSize(attr)); \ @@ -98,6 +96,30 @@ DECLARE_ACT_CREATOR(VSigmoid); DECLARE_ACT_CREATOR(VTanh); // TODO(TJ): tuning use me +bool VReluCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx); +} + +bool VSquareCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx); +} + +bool VIdentityCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx); +} + +bool VExpCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx) && d < 32; +} + +bool VSigmoidCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx); +} + +bool VTanhCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx); +} + size_t VReluCreator::CodeSize(const int& d) const { return 96 /* init size */ + (d / YMM_FLOAT_BLOCK + 3) * 4 /* instructions */ * diff --git a/paddle/fluid/operators/jit/gen/hopv.cc b/paddle/fluid/operators/jit/gen/hopv.cc new file mode 100644 index 0000000000000000000000000000000000000000..e7884017198623d996fe98a55691da6e342d656a --- /dev/null +++ b/paddle/fluid/operators/jit/gen/hopv.cc @@ -0,0 +1,103 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#include "paddle/fluid/operators/jit/gen/hopv.h" +#include "paddle/fluid/operators/jit/registry.h" +#include "paddle/fluid/platform/cpu_info.h" + +namespace paddle { +namespace operators { +namespace jit { +namespace gen { + +void HOPVJitCode::genCode() { + const int num_blocks = num_ / YMM_FLOAT_BLOCK; + int offset = 0; + + if (num_blocks > 0) { + // load one firstly + vmovups(ymm_tmp, ptr[param_src]); + offset += sizeof(float) * YMM_FLOAT_BLOCK; + for (int i = 1; i < num_blocks; ++i) { + vmovups(ymm_src, ptr[param_src + offset]); + process(ymm_tmp, ymm_src, ymm_tmp); + offset += sizeof(float) * YMM_FLOAT_BLOCK; + } + vextractf128(xmm_dst, ymm_tmp, 1); + process(xmm_dst, xmm_dst, xmm_tmp); + } else { + if (type_ == operand_type::MAX) { + vbroadcastss(ymm_dst, ptr[param_src]); + } else if (type_ == operand_type::ADD) { + vxorps(ymm_dst, ymm_dst, ymm_dst); + } + } + + int rest = num_ % YMM_FLOAT_BLOCK; + if (rest >= 4) { + vmovups(xmm_src, ptr[param_src + offset]); + offset += sizeof(float) * 4; + rest -= 4; + process(xmm_dst, xmm_dst, xmm_src); + } + + vpermilps(xmm_tmp, xmm_dst, 16 + 8 + 3); + process(xmm_dst, xmm_dst, xmm_tmp); + + if (rest >= 2) { + vmovq(xmm_src, ptr[param_src + offset]); + offset += sizeof(float) * 2; + rest -= 2; + process(xmm_dst, xmm_dst, xmm_src); + } + + vpermilps(xmm_tmp, xmm_dst, 1); + process(xmm_dst, xmm_dst, xmm_tmp); + + if (rest >= 1) { + vmovss(xmm_src, ptr[param_src + offset]); + process(xmm_dst, xmm_dst, xmm_src); + } + vmovss(ptr[param_dst], xmm_dst); + ret(); +} + +#define DECLARE_HOP_CREATOR(name) \ + class name##Creator : public JitCodeCreator { \ + public: \ + bool UseMe(const int& attr) const override { \ + return platform::MayIUse(platform::avx); \ + } \ + size_t CodeSize(const int& d) const override { \ + return 96 + d / YMM_FLOAT_BLOCK * 4 * 8; \ + } \ + std::unique_ptr CreateJitCode(const int& attr) const override { \ + return make_unique(attr, CodeSize(attr)); \ + } \ + } + +DECLARE_HOP_CREATOR(HMax); +DECLARE_HOP_CREATOR(HSum); + +#undef DECLARE_HOP_CREATOR + +} // namespace gen +} // namespace jit +} // namespace operators +} // namespace paddle + +namespace gen = paddle::operators::jit::gen; + +REGISTER_JITKERNEL_GEN(kHMax, gen::HMaxCreator); +REGISTER_JITKERNEL_GEN(kHSum, gen::HSumCreator); diff --git a/paddle/fluid/operators/jit/gen/hopv.h b/paddle/fluid/operators/jit/gen/hopv.h new file mode 100644 index 0000000000000000000000000000000000000000..d3bc94b63d3f962cd655367a2afe1a08582b06fa --- /dev/null +++ b/paddle/fluid/operators/jit/gen/hopv.h @@ -0,0 +1,90 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#pragma once + +#include +#include "glog/logging.h" +#include "paddle/fluid/operators/jit/gen/jitcode.h" + +namespace paddle { +namespace operators { +namespace jit { +namespace gen { + +// horizontal operand vector +class HOPVJitCode : public JitCode { + public: + explicit HOPVJitCode(int d, operand_type type, size_t code_size = 256 * 1024, + void* code_ptr = nullptr) + : JitCode(code_size, code_ptr), num_(d), type_(type) { + if (!(type_ == operand_type::MAX || type_ == operand_type::ADD)) { + LOG(FATAL) << "Do not support this operand type: " << type_; + } + this->genCode(); + } + + virtual const char* name() const { + std::string base = "VXXJitCode"; + if (type_ == operand_type::MAX) { + base += "_MAX"; + } else { + base += "_SUM"; + } + return base.c_str(); + } + void genCode() override; + + protected: + template + void process(JMM& dst, JMM& src1, JMM& src2) { // NOLINT + if (type_ == operand_type::MAX) { + vmaxps(dst, src1, src2); + } else if (type_ == operand_type::ADD) { + vaddps(dst, src1, src2); + } + } + + private: + int num_; + operand_type type_; + reg64_t param_src{abi_param1}; + reg64_t param_dst{abi_param2}; + reg64_t param_attr{abi_param3}; + + ymm_t ymm_tmp = ymm_t(0); + ymm_t ymm_src = ymm_t(1); + ymm_t ymm_dst = ymm_t(2); + + xmm_t xmm_tmp = xmm_t(0); + xmm_t xmm_src = xmm_t(1); + xmm_t xmm_dst = xmm_t(2); +}; + +#define DECLARE_HOP_JITCODE(name, op_type) \ + class name##JitCode : public HOPVJitCode { \ + public: \ + explicit name##JitCode(int d, size_t code_size, void* code_ptr = nullptr) \ + : HOPVJitCode(d, op_type, code_size, code_ptr) {} \ + }; + +DECLARE_HOP_JITCODE(HMax, operand_type::MAX); +DECLARE_HOP_JITCODE(HSum, operand_type::ADD); + +#undef DECLARE_HOP_JITCODE + +} // namespace gen +} // namespace jit +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/jit/gen/jitcode.h b/paddle/fluid/operators/jit/gen/jitcode.h index f63d40ad5a559ab87a9b3735406671cfd936d9e4..c388109604bc57e8475e79a6c57eecb5bfebfb52 100644 --- a/paddle/fluid/operators/jit/gen/jitcode.h +++ b/paddle/fluid/operators/jit/gen/jitcode.h @@ -47,6 +47,7 @@ using Label = Xbyak::Label; typedef enum { MUL = 0, + MAX, ADD, SUB, RELU, diff --git a/paddle/fluid/operators/jit/helper.cc b/paddle/fluid/operators/jit/helper.cc index 5dbe22a81b4866bdf60a03710d8ffd0b7bcb597b..4dac2f2460f72c7da63f48c82549b948cc253153 100644 --- a/paddle/fluid/operators/jit/helper.cc +++ b/paddle/fluid/operators/jit/helper.cc @@ -49,6 +49,9 @@ const char* to_string(KernelType kt) { ONE_CASE(kNCHW16CMulNC); ONE_CASE(kSeqPool); ONE_CASE(kMatMul); + ONE_CASE(kHMax); + ONE_CASE(kHSum); + ONE_CASE(kSoftmax); default: PADDLE_THROW("Not support type: %d, or forget to add it.", kt); return "NOT JITKernel"; diff --git a/paddle/fluid/operators/jit/helper.h b/paddle/fluid/operators/jit/helper.h index fbf34fc4b3db49596b6be0360c00e77c12fab9b8..7bdc45779b7d39d36db0d52ca9361943cdcdef3e 100644 --- a/paddle/fluid/operators/jit/helper.h +++ b/paddle/fluid/operators/jit/helper.h @@ -118,6 +118,28 @@ typename KernelTuples::func_type Get( return GetRefer(); } +template +class KernelFuncsCache { + public: + KernelFuncsCache() = default; + static KernelFuncsCache& Instance() { + static thread_local KernelFuncsCache g_func_cache; + return g_func_cache; + } + + bool Has(int key) const { return funcs_.find(key) != funcs_.end(); } + + typename KernelTuples::func_type At(int key) { return funcs_.at(key); } + + void Insert(int key, typename KernelTuples::func_type func) { + funcs_.emplace(key, func); + } + + private: + std::unordered_map funcs_; + DISABLE_COPY_AND_ASSIGN(KernelFuncsCache); +}; + const char* to_string(KernelType kt); const char* to_string(SeqPoolType kt); diff --git a/paddle/fluid/operators/jit/kernel_base.h b/paddle/fluid/operators/jit/kernel_base.h index adb101bd5cdf231ac330dbf44beb4c24c1fcf29e..42a58580f7b1e0832af57398ba9c29882b6cc6fb 100644 --- a/paddle/fluid/operators/jit/kernel_base.h +++ b/paddle/fluid/operators/jit/kernel_base.h @@ -20,6 +20,7 @@ namespace paddle { namespace operators { namespace jit { +// TODO(TJ): reorder by alphabet typedef enum { kNone = 0, kVMul = 1, @@ -44,6 +45,9 @@ typedef enum { kNCHW16CMulNC, kSeqPool, kMatMul, + kHSum, // horizontal max + kHMax, // horizontal sum + kSoftmax, } KernelType; typedef enum { @@ -70,6 +74,10 @@ struct XYNTuples { typedef void (*func_type)(const T*, T*, int); }; +// x, return and int +template +struct XRNTuples : public XYNTuples {}; + typedef struct { void* gates; // gates: x_ch, x_ih, x_fh, x_oh const void* ct_1; @@ -159,6 +167,13 @@ struct LayerNormTuples { const float, int); }; +template +struct SoftmaxTuples { + typedef T data_type; + typedef int attr_type; + typedef void (*func_type)(const T*, T*, int, int); +}; + // nChw16c = nChw16c .* NC template struct NCHW16CMulNCTuples { diff --git a/paddle/fluid/operators/jit/more/mix/CMakeLists.txt b/paddle/fluid/operators/jit/more/mix/CMakeLists.txt index e05f204b1eebd03c7a00157d96d0482f4a44a7fb..dd039d29152961210958470a48f086a133ab640c 100644 --- a/paddle/fluid/operators/jit/more/mix/CMakeLists.txt +++ b/paddle/fluid/operators/jit/more/mix/CMakeLists.txt @@ -12,3 +12,4 @@ USE_JITKERNEL_MORE(kLSTMC1H1, mix) USE_JITKERNEL_MORE(kGRUH1, mix) USE_JITKERNEL_MORE(kGRUHtPart1, mix) USE_JITKERNEL_MORE(kGRUHtPart2, mix) +USE_JITKERNEL_MORE(kSoftmax, mix) diff --git a/paddle/fluid/operators/jit/more/mix/mix.cc b/paddle/fluid/operators/jit/more/mix/mix.cc index df0a85256b1f546d5f64be73925cf58b87a25bd7..0f42ac158ca7926981df55936cb903d5f4ae4806 100644 --- a/paddle/fluid/operators/jit/more/mix/mix.cc +++ b/paddle/fluid/operators/jit/more/mix/mix.cc @@ -48,6 +48,65 @@ void VTanh(const T* x, T* y, int n) { compute_addbias(&b, y, y, n); } +void Softmax(const T* x, T* y, int n, int bs) { + typename XRNTuples::func_type compute_hmax{nullptr}; + typename XRNTuples::func_type compute_hsum{nullptr}; + typename AXYNTuples::func_type compute_vscal{nullptr}; + typename AXYNTuples::func_type compute_vaddbias{nullptr}; + typename XYNTuples::func_type compute_vexp{nullptr}; + + if (!KernelFuncsCache>::Instance().Has(n)) { + compute_hmax = Get, platform::CPUPlace>(n); + KernelFuncsCache>::Instance().Insert(n, compute_hmax); + } else { + compute_hmax = KernelFuncsCache>::Instance().At(n); + } + + if (!KernelFuncsCache>::Instance().Has(n)) { + compute_hsum = Get, platform::CPUPlace>(n); + KernelFuncsCache>::Instance().Insert(n, compute_hsum); + } else { + compute_hsum = KernelFuncsCache>::Instance().At(n); + } + + if (!KernelFuncsCache>::Instance().Has(n)) { + compute_vscal = Get, platform::CPUPlace>(n); + KernelFuncsCache>::Instance().Insert(n, + compute_vscal); + } else { + compute_vscal = KernelFuncsCache>::Instance().At(n); + } + + if (!KernelFuncsCache>::Instance().Has(n)) { + compute_vaddbias = Get, platform::CPUPlace>(n); + KernelFuncsCache>::Instance().Insert( + n, compute_vaddbias); + } else { + compute_vaddbias = + KernelFuncsCache>::Instance().At(n); + } + + if (!KernelFuncsCache>::Instance().Has(n)) { + compute_vexp = Get, platform::CPUPlace>(n); + KernelFuncsCache>::Instance().Insert(n, compute_vexp); + } else { + compute_vexp = KernelFuncsCache>::Instance().At(n); + } + + for (int i = 0; i < bs; ++i) { + T scalar; + compute_hmax(x, &scalar, n); + scalar = static_cast(0) - scalar; + compute_vaddbias(&scalar, x, y, n); // x - max + compute_vexp(y, y, n); + compute_hsum(y, &scalar, n); + scalar = static_cast(1) / scalar; + compute_vscal(&scalar, y, y, n); + x += n; + y += n; + } +} + void (*getActFunc(KernelType type, int d))(const T*, T*, int) { // NOLINT if (type == kVSigmoid) { return Get, platform::CPUPlace>(d); @@ -184,6 +243,8 @@ bool VSigmoidKernel::UseMe(const int& d) const { return true; } bool VTanhKernel::UseMe(const int& d) const { return true; } +bool SoftmaxKernel::UseMe(const int& d) const { return true; } + bool LSTMCtHtKernel::UseMe(const lstm_attr_t& attr) const { return true; } bool LSTMC1H1Kernel::UseMe(const lstm_attr_t& attr) const { return true; } @@ -207,6 +268,7 @@ namespace mix = paddle::operators::jit::more::mix; REGISTER_MORE_KERNEL(kVSigmoid, VSigmoid); REGISTER_MORE_KERNEL(kVTanh, VTanh); +REGISTER_MORE_KERNEL(kSoftmax, Softmax); REGISTER_MORE_KERNEL(kLSTMCtHt, LSTMCtHt); REGISTER_MORE_KERNEL(kLSTMC1H1, LSTMC1H1); REGISTER_MORE_KERNEL(kGRUH1, GRUH1); diff --git a/paddle/fluid/operators/jit/more/mix/mix.h b/paddle/fluid/operators/jit/more/mix/mix.h index a70ecdf9348f511311307b4c27bb4506222a7439..d64af192197a0b339a39a1862c028875da2f3900 100644 --- a/paddle/fluid/operators/jit/more/mix/mix.h +++ b/paddle/fluid/operators/jit/more/mix/mix.h @@ -26,6 +26,7 @@ using T = float; void VSigmoid(const T* x, T* y, int n); void VTanh(const T* x, T* y, int n); +void Softmax(const T* x, T* y, int n, int bs); void LSTMCtHt(lstm_t* step, const lstm_attr_t* attr); void LSTMC1H1(lstm_t* step, const lstm_attr_t* attr); @@ -45,6 +46,9 @@ void GRUHtPart2(gru_t* step, const gru_attr_t* attr); DECLARE_MORE_KERNEL(VSigmoid, XYNTuples); DECLARE_MORE_KERNEL(VTanh, XYNTuples); +// XRN +DECLARE_MORE_KERNEL(Softmax, SoftmaxTuples); + DECLARE_MORE_KERNEL(LSTMCtHt, LSTMTuples); DECLARE_MORE_KERNEL(LSTMC1H1, LSTMTuples); diff --git a/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt b/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt index 667c6dfad6676d00ab994564bff57c90caa0cb41..f9e5aea32e7cd48e9b39c4c3ee0e30f4a5c84f6f 100644 --- a/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt +++ b/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt @@ -12,3 +12,4 @@ USE_JITKERNEL_MORE(kVSquare, mkl) USE_JITKERNEL_MORE(kVSigmoid, mkl) USE_JITKERNEL_MORE(kVTanh, mkl) USE_JITKERNEL_MORE(kSeqPool, mkl) +USE_JITKERNEL_MORE(kSoftmax, mkl) diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.cc b/paddle/fluid/operators/jit/more/mkl/mkl.cc index fccdc68f5efa34bac6f5a34a41569d2f77416284..28a37198dae19a57509934ec784746bc23436e7a 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.cc +++ b/paddle/fluid/operators/jit/more/mkl/mkl.cc @@ -116,6 +116,16 @@ void VAXPY(double a, const double* x, double* y, int n) { platform::dynload::cblas_daxpy(n, a, x, 1, y, 1); } +template <> +void ASum(const float* x, float* res, int n) { + res[0] = platform::dynload::cblas_sasum(n, x, 1); +} + +template <> +void ASum(const double* x, double* res, int n) { + res[0] = platform::dynload::cblas_dasum(n, x, 1); +} + // TODO(TJ): tuning me carefully on AVX, AVX2 and AVX512 template <> bool MatMulKernel::UseMe(const int& d) const { @@ -167,6 +177,12 @@ bool SeqPoolKernel::UseMe(const seq_pool_attr_t& attr) const { return true; } +template <> +bool SoftmaxKernel::UseMe(const int& d) const { + // tuned on avx2 + return platform::MayIUse(platform::avx) && d < 60; +} + #define AWALYS_USE_ME_WITH_DOUBLE(func) \ template <> \ bool func##Kernel::UseMe(const int& d) const { \ @@ -181,6 +197,7 @@ AWALYS_USE_ME_WITH_DOUBLE(VExp); AWALYS_USE_ME_WITH_DOUBLE(VSigmoid); AWALYS_USE_ME_WITH_DOUBLE(VTanh); AWALYS_USE_ME_WITH_DOUBLE(VSquare); +AWALYS_USE_ME_WITH_DOUBLE(Softmax); #undef AWALYS_USE_ME_WITH_DOUBLE } // namespace mkl @@ -204,5 +221,6 @@ REGISTER_MKL_KERNEL(kVSquare, VSquare); REGISTER_MKL_KERNEL(kVSigmoid, VSigmoid); REGISTER_MKL_KERNEL(kVTanh, VTanh); REGISTER_MKL_KERNEL(kSeqPool, SeqPool); +REGISTER_MKL_KERNEL(kSoftmax, Softmax); #undef REGISTER_MKL_KERNEL diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.h b/paddle/fluid/operators/jit/more/mkl/mkl.h index a27196fa19f1d3e9aa6c414b6b9f99a21ef49025..6b95b9c872dc12cccaef0b0737edd760447a47d0 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.h +++ b/paddle/fluid/operators/jit/more/mkl/mkl.h @@ -16,6 +16,7 @@ #include #include +#include #include "paddle/fluid/operators/jit/kernel_base.h" namespace paddle { @@ -90,6 +91,30 @@ void SeqPool(const T* x, T* y, const seq_pool_attr_t* attr) { } } +template +void ASum(const T* x, T* res, int n); + +template +void Softmax(const T* x, T* y, int n, int bs) { + std::vector entities(bs); + for (int i = 0; i < bs; ++i) { + entities[i] = x[i * n]; + for (int c = 1; c < n; ++c) { + entities[i] = x[i * n + c] > entities[i] ? x[i * n + c] : entities[i]; + } + for (int c = 0; c < n; ++c) { + y[i * n + c] = x[i * n + c] - entities[i]; + } + } + VExp(y, y, n * bs); + for (int i = 0; i < bs; ++i) { + T sum; + ASum(&y[i * n], &sum, n); + sum = static_cast(1) / sum; + VScal(&sum, &y[i * n], &y[i * n], n); + } +} + #define DECLARE_MKL_KERNEL(name, tuples) \ template \ class name##Kernel : public KernelMore> { \ @@ -117,6 +142,8 @@ DECLARE_MKL_KERNEL(VSquare, XYNTuples); DECLARE_MKL_KERNEL(SeqPool, SeqPoolTuples); +DECLARE_MKL_KERNEL(Softmax, SoftmaxTuples); + #undef DECLARE_MKL_KERNEL } // namespace mkl diff --git a/paddle/fluid/operators/jit/refer/CMakeLists.txt b/paddle/fluid/operators/jit/refer/CMakeLists.txt index 4b9bc5e8d49c62404d5d4ef99b7c50987fcb415a..9f2935828ca300dbdb71b0fefb6b9883cb45e4b0 100644 --- a/paddle/fluid/operators/jit/refer/CMakeLists.txt +++ b/paddle/fluid/operators/jit/refer/CMakeLists.txt @@ -29,3 +29,6 @@ USE_JITKERNEL_REFER(kNCHW16CMulNC) USE_JITKERNEL_REFER(kSeqPool) USE_JITKERNEL_REFER(kMatMul) USE_JITKERNEL_REFER(kVSquare) +USE_JITKERNEL_REFER(kHSum) +USE_JITKERNEL_REFER(kHMax) +USE_JITKERNEL_REFER(kSoftmax) diff --git a/paddle/fluid/operators/jit/refer/refer.cc b/paddle/fluid/operators/jit/refer/refer.cc index 3512ad7fe7921381afb6152330fff6be34de5ad7..b8adb40ec7e1b64df2b04a3201292db235af7b19 100644 --- a/paddle/fluid/operators/jit/refer/refer.cc +++ b/paddle/fluid/operators/jit/refer/refer.cc @@ -52,4 +52,9 @@ REGISTER_REFER_KERNEL(kSeqPool, SeqPool); REGISTER_REFER_KERNEL(kMatMul, MatMul); +REGISTER_REFER_KERNEL(kHMax, HMax); +REGISTER_REFER_KERNEL(kHSum, HSum); + +REGISTER_REFER_KERNEL(kSoftmax, Softmax); + #undef REGISTER_REFER_KERNEL diff --git a/paddle/fluid/operators/jit/refer/refer.h b/paddle/fluid/operators/jit/refer/refer.h index 97d029358594d757f0e1874e9c87ecb8f97c9d50..5a074db7e0e8ab49dc281e1809edef23e6a25c42 100644 --- a/paddle/fluid/operators/jit/refer/refer.h +++ b/paddle/fluid/operators/jit/refer/refer.h @@ -378,6 +378,40 @@ void MatMul(const T* A, const T* B, T* C, int M, int N, int K) { } } +template +void HMax(const T* x, T* res, int n) { + res[0] = x[0]; + for (int i = 1; i < n; ++i) { + res[0] = res[0] < x[i] ? x[i] : res[0]; + } +} + +template +void HSum(const T* x, T* res, int n) { + res[0] = x[0]; + for (int i = 1; i < n; ++i) { + res[0] += x[i]; + } +} + +// y = e^(x - max(x)) +// y = y / sum(y) +template +void Softmax(const T* x, T* y, int n, int bs = 1) { + for (int i = 0; i < bs; ++i) { + T scalar; + HMax(x, &scalar, n); + scalar = static_cast(0) - scalar; + VAddBias(&scalar, x, y, n); // x - max + VExp(y, y, n); + HSum(y, &scalar, n); + scalar = static_cast(1) / scalar; + VScal(&scalar, y, y, n); + x += n; + y += n; + } +} + #define DECLARE_REFER_KERNEL(name, tuples) \ template \ class name##Kernel : public ReferKernel> { \ @@ -421,6 +455,11 @@ DECLARE_REFER_KERNEL(SeqPool, SeqPoolTuples); DECLARE_REFER_KERNEL(MatMul, MatMulTuples); +DECLARE_REFER_KERNEL(HMax, XRNTuples); +DECLARE_REFER_KERNEL(HSum, XRNTuples); + +DECLARE_REFER_KERNEL(Softmax, SoftmaxTuples); + #undef DECLARE_REFER_KERNEL } // namespace refer diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index 68a79b6314e4cf86f5b715b9c6694924126b12da..cc461552898fc68661ce548a520d65215d3572b4 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -61,6 +61,7 @@ std::vector TestSizes() { } namespace jit = paddle::operators::jit; +using CPUPlace = paddle::platform::CPUPlace; template struct TestFuncWithRefer { @@ -121,6 +122,40 @@ struct TestFuncWithRefer, T, std::vector, } }; +template +struct TestFuncWithRefer, std::vector, std::vector, + int, int> { + void operator()(const typename jit::SoftmaxTuples::func_type tgt, + const std::vector& x, const std::vector& yref, int n, + int bs) { + EXPECT_TRUE(tgt != nullptr); + EXPECT_EQ(yref.size(), x.size()); + EXPECT_EQ(x.size(), static_cast(n * bs)); + const T* x_data = x.data(); + const T* yref_data = yref.data(); + std::vector ytgt(n * bs); + T* ytgt_data = ytgt.data(); + // test normal + tgt(x_data, ytgt_data, n, bs); + ExpectEQ(ytgt_data, yref_data, n * bs); + // test inplace x + std::copy(x.begin(), x.end(), ytgt.begin()); + tgt(ytgt_data, ytgt_data, n, bs); + ExpectEQ(ytgt_data, yref_data, n * bs); + } +}; + +template +struct TestFuncWithRefer, std::vector, T> { + void operator()(const typename jit::XRNTuples::func_type tgt, + const std::vector& x, const T ref_res) { + EXPECT_TRUE(tgt != nullptr); + T tgt_res; + tgt(x.data(), &tgt_res, x.size()); + ExpectEQ(&tgt_res, &ref_res, 1); + } +}; + template struct TestFuncWithRefer, std::vector, std::vector> { void operator()(const typename jit::XYNTuples::func_type tgt, @@ -172,7 +207,7 @@ struct TestFuncWithRefer, std::vector, std::vector, T* ht_data = ht.data(); T* checked_data = checked.data(); - paddle::operators::jit::lstm_t step; + jit::lstm_t step; step.gates = x_data; step.ct_1 = ct_1_data; step.ct = ct_data; @@ -208,7 +243,7 @@ struct TestFuncWithRefer, std::vector, std::vector, const T* ht_ref_data = ht_ref.data(); T* x_data = x.data(); T* ht_data = ht.data(); - paddle::operators::jit::gru_t step; + jit::gru_t step; step.gates = x_data; step.ht_1 = ht_1_data; step.ht = ht_data; @@ -255,8 +290,8 @@ struct TestFuncWithRefer, std::vector, std::vector, } }; -template +template void TestAllImpls(const typename KernelTuples::attr_type& attr, Args... args) { TestFuncWithRefer test; // test jitcode @@ -286,9 +321,8 @@ void TestAllImpls(const typename KernelTuples::attr_type& attr, Args... args) { test(tgt, args...); } -template +template void TestXYZNKernel() { - namespace jit = paddle::operators::jit; VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); for (int d : TestSizes()) { auto ref = jit::GetRefer>(); @@ -320,9 +354,8 @@ void TestXYZNKernel() { } } -template +template void TestAXYNKernel() { - namespace jit = paddle::operators::jit; VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); for (int d : TestSizes()) { auto ref = jit::GetRefer>(); @@ -347,9 +380,26 @@ void TestAXYNKernel() { } } -template +template +void TestXRNKernel() { + VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); + auto last_acc = acc; + acc = 1e-4; + for (int d : TestSizes()) { + auto ref = jit::GetRefer>(); + EXPECT_TRUE(ref != nullptr); + std::vector x(d); + RandomVec(d, x.data(), -2.f, 2.f); + T ref_res; + ref(x.data(), &ref_res, d); + TestAllImpls, PlaceType, std::vector, T>(d, x, + ref_res); + } + acc = last_acc; +} + +template void TestXYNKernel() { - namespace jit = paddle::operators::jit; VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); for (int d : TestSizes()) { auto ref = jit::GetRefer>(); @@ -373,9 +423,8 @@ void TestXYNKernel() { } } -template +template void TestLSTMKernel() { - namespace jit = paddle::operators::jit; VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); std::vector all_acts = {"sigmoid", "tanh", "relu", "identity"}; for (int d : TestSizes()) { @@ -424,9 +473,8 @@ void TestLSTMKernel() { } } -template +template void TestGRUKernel() { - namespace jit = paddle::operators::jit; VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); std::vector all_acts = {"sigmoid", "tanh", "relu", "identity"}; for (int d : TestSizes()) { @@ -459,7 +507,7 @@ void TestGRUKernel() { } } -template +template void TestSeqPoolKernel() { VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); std::vector pool_types = { @@ -484,7 +532,7 @@ void TestSeqPoolKernel() { } } -template +template void TestMatMulKernel() { VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); auto last_acc = acc; @@ -510,7 +558,32 @@ void TestMatMulKernel() { acc = last_acc; } -template +template +void TestSoftmaxKernel() { + VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); + for (int bs : {1, 2, 10}) { + for (int n : TestSizes()) { + auto ref = jit::GetRefer>(); + EXPECT_TRUE(ref != nullptr); + std::vector x(bs * n), y(bs * n); + RandomVec(bs * n, x.data(), -2.f, 2.f); + const T* x_data = x.data(); + T* y_data = y.data(); + + std::vector xinp(x.size()); // inplace test + std::copy(x.begin(), x.end(), xinp.begin()); + ref(x_data, y_data, n, bs); + T* xinp_data = xinp.data(); + ref(xinp_data, xinp_data, n, bs); + ExpectEQ(xinp_data, y_data, n * bs); + + TestAllImpls, PlaceType, std::vector, + std::vector>(n, x, y, n, bs); + } + } +} + +template void TestNCHW16CMulNCKernel() { VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); const int n = 3, c = 16 * 4, h = 10, w = 10; @@ -565,129 +638,123 @@ void TestNCHW16CMulNCKernel() { // XYZNTuple TEST(JITKernel, kVMul) { - namespace jit = paddle::operators::jit; - TestXYZNKernel(); - TestXYZNKernel(); + TestXYZNKernel(); + TestXYZNKernel(); } TEST(JITKernel, kVAdd) { - namespace jit = paddle::operators::jit; - TestXYZNKernel(); - TestXYZNKernel(); + TestXYZNKernel(); + TestXYZNKernel(); } TEST(JITKernel, kVAddRelu) { - namespace jit = paddle::operators::jit; - TestXYZNKernel(); - TestXYZNKernel(); + TestXYZNKernel(); + TestXYZNKernel(); } TEST(JITKernel, kVSub) { - namespace jit = paddle::operators::jit; - TestXYZNKernel(); - TestXYZNKernel(); + TestXYZNKernel(); + TestXYZNKernel(); } // AXYNTuples TEST(JITKernel, kVScal) { - namespace jit = paddle::operators::jit; - TestAXYNKernel(); - TestAXYNKernel(); + TestAXYNKernel(); + TestAXYNKernel(); } TEST(JITKernel, kVAddBias) { - namespace jit = paddle::operators::jit; - TestAXYNKernel(); - TestAXYNKernel(); + TestAXYNKernel(); + TestAXYNKernel(); +} + +// XRNTuples +TEST(JITKernel, kHMax) { + TestXRNKernel(); + TestXRNKernel(); +} + +TEST(JITKernel, kHSum) { + TestXRNKernel(); + TestXRNKernel(); } // XYNTuples TEST(JITKernel, kVRelu) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } TEST(JITKernel, kVIdentity) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } TEST(JITKernel, kVSquare) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } TEST(JITKernel, kVExp) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } TEST(JITKernel, kVSigmoid) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } TEST(JITKernel, kVTanh) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } // LSTM TEST(JITKernel, kLSTMCtHt) { - namespace jit = paddle::operators::jit; - TestLSTMKernel(); - TestLSTMKernel(); + TestLSTMKernel(); + TestLSTMKernel(); } TEST(JITKernel, kLSTMC1H1) { - namespace jit = paddle::operators::jit; - TestLSTMKernel(); - TestLSTMKernel(); + TestLSTMKernel(); + TestLSTMKernel(); } // GRU TEST(JITKernel, kGRUH1) { - namespace jit = paddle::operators::jit; - TestGRUKernel(); - TestGRUKernel(); + TestGRUKernel(); + TestGRUKernel(); } TEST(JITKernel, kGRUHtPart1) { - namespace jit = paddle::operators::jit; - TestGRUKernel(); - TestGRUKernel(); + TestGRUKernel(); + TestGRUKernel(); } TEST(JITKernel, kGRUHtPart2) { - namespace jit = paddle::operators::jit; - TestGRUKernel(); - TestGRUKernel(); + TestGRUKernel(); + TestGRUKernel(); } TEST(JITKernel, kSeqPool) { - namespace jit = paddle::operators::jit; - TestSeqPoolKernel(); - TestSeqPoolKernel(); + TestSeqPoolKernel(); + TestSeqPoolKernel(); } TEST(JITKernel, kMatMul) { - namespace jit = paddle::operators::jit; - TestMatMulKernel(); - TestMatMulKernel(); + TestMatMulKernel(); + TestMatMulKernel(); +} + +TEST(JITKernel, kSoftmax) { + TestSoftmaxKernel(); + TestSoftmaxKernel(); } TEST(JITKernel, kNCHW16CMulNC) { - namespace jit = paddle::operators::jit; - TestNCHW16CMulNCKernel(); - TestNCHW16CMulNCKernel(); + TestNCHW16CMulNCKernel(); + TestNCHW16CMulNCKernel(); } // TODO(yihua/TJ): add crf decoding and layer norm unit tests diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index fd15539f7b6727496988c9b13d0d2551659a420a..0af8b9e69cfe09890f28ef2028baa19319a5c379 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -17,6 +17,7 @@ limitations under the License. */ #include "paddle/fluid/operators/lookup_table_op.h" #include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { @@ -193,8 +194,11 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel, - ops::LookupTableCUDAKernel); + ops::LookupTableCUDAKernel, + ops::LookupTableCUDAKernel); REGISTER_OP_CUDA_KERNEL(lookup_table_grad, ops::LookupTableGradCUDAKernel, - ops::LookupTableGradCUDAKernel); + ops::LookupTableGradCUDAKernel, + ops::LookupTableGradCUDAKernel); diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index 6bbb7155dda9b2c844f793a63adb861c2ed956e8..e20524012a5839fd250b7426a5efc42b7e87fe87 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -53,7 +53,7 @@ math_library(sequence2batch) math_library(sequence_padding) math_library(sequence_pooling DEPS math_function jit_kernel_helper) math_library(sequence_scale) -math_library(softmax DEPS math_function) +math_library(softmax DEPS math_function jit_kernel_helper) math_library(beam_search DEPS math_function) math_library(matrix_bit_code) diff --git a/paddle/fluid/operators/math/beam_search.cc b/paddle/fluid/operators/math/beam_search.cc index fb7119273a734feba870fdabade6a4faa1d5e9a3..69971ef7423eff6bc3f8543a491edb6b0bbd00ca 100644 --- a/paddle/fluid/operators/math/beam_search.cc +++ b/paddle/fluid/operators/math/beam_search.cc @@ -29,8 +29,9 @@ class BeamSearchFunctor { const framework::LoDTensor *ids, const framework::LoDTensor *scores, framework::LoDTensor *selected_ids, - framework::LoDTensor *selected_scores, size_t level, - size_t beam_size, int end_id, bool is_accumulated) { + framework::LoDTensor *selected_scores, + framework::Tensor *parent_idx, size_t level, size_t beam_size, + int end_id, bool is_accumulated) { auto abs_lod = framework::ToAbsOffset(scores->lod()); auto &high_level = abs_lod[level]; @@ -57,11 +58,13 @@ class BeamSearchFunctor { std::vector({static_cast(num_instances), 1})); selected_ids->Resize(dims); selected_scores->Resize(dims); + parent_idx->Resize({static_cast(num_instances)}); auto *selected_ids_data = selected_ids->mutable_data(platform::CPUPlace()); auto *selected_scores_data = selected_scores->mutable_data(platform::CPUPlace()); + auto *parent_idx_data = parent_idx->mutable_data(platform::CPUPlace()); // fill in data std::vector low_level; @@ -69,6 +72,7 @@ class BeamSearchFunctor { for (auto &items : selected_items) { low_level.push_back(low_offset); for (auto &item : items) { + parent_idx_data[low_offset] = static_cast(low_level.size() - 1); selected_ids_data[low_offset] = item.id; selected_scores_data[low_offset] = item.score; low_offset++; diff --git a/paddle/fluid/operators/math/beam_search.cu b/paddle/fluid/operators/math/beam_search.cu index d94e3023ce537cb9fa456e079c4fa3cf57fb954d..61d021ef627f1ccd90b992c2078a7f3ca879422d 100644 --- a/paddle/fluid/operators/math/beam_search.cu +++ b/paddle/fluid/operators/math/beam_search.cu @@ -157,10 +157,10 @@ __device__ __forceinline__ bool PruneEndBeams(Triple* top_beam_local, } __device__ __forceinline__ void WriteBack( - int64_t* selected_ids, float* selected_scores, size_t* selected_offsets, - Triple* top_beam_local, const int seq_offset_start, - const int seq_offset_end, const int selected_seq_start, - const int selected_seq_length) { + int64_t* selected_ids, float* selected_scores, int* parent_idx, + size_t* selected_offsets, Triple* top_beam_local, + const int seq_offset_start, const int seq_offset_end, + const int selected_seq_start, const int selected_seq_length) { const int tid = threadIdx.x; // use 1 thread only for each sequence int global_index = selected_seq_start; for (int global_offset = seq_offset_start; global_offset < seq_offset_end; @@ -171,6 +171,7 @@ __device__ __forceinline__ void WriteBack( selected_ids[global_index] = static_cast(top_beam_local[local_index].id); selected_scores[global_index] = top_beam_local[local_index].score; + parent_idx[global_index] = static_cast(global_offset); global_index++; } } @@ -180,11 +181,11 @@ __device__ __forceinline__ void WriteBack( template __device__ void BeamSearchDetails( - int64_t* selected_ids, float* selected_scores, size_t* selected_offsets, - const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, - const float* scores, const int seq_offset_start, const int seq_offset_end, - const int seq_width, int beam_size, int end_id, bool is_accumulated, - int num_used_threads) { + int64_t* selected_ids, float* selected_scores, int* parent_idx, + size_t* selected_offsets, const int64_t* pre_ids, const float* pre_scores, + const int64_t* ids, const float* scores, const int seq_offset_start, + const int seq_offset_end, const int seq_width, int beam_size, int end_id, + bool is_accumulated, int num_used_threads) { __shared__ Triple top_beam[MaxLength]; int num_items = 0; @@ -228,15 +229,15 @@ __device__ void BeamSearchDetails( selected_offsets[0] = 0; } - WriteBack(selected_ids, selected_scores, selected_offsets, top_beam_local, - seq_offset_start, seq_offset_end, selected_seq_start, - selected_seq_length); + WriteBack(selected_ids, selected_scores, parent_idx, selected_offsets, + top_beam_local, seq_offset_start, seq_offset_end, + selected_seq_start, selected_seq_length); } } template __global__ void BeamSearchKernel(int64_t* selected_ids, float* selected_scores, - size_t* selected_offsets, + int* parent_idx, size_t* selected_offsets, const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, const float* scores, const size_t* seq_offsets, @@ -250,24 +251,25 @@ __global__ void BeamSearchKernel(int64_t* selected_ids, float* selected_scores, int seq_offset_end = static_cast(seq_offsets[seq_id + 1]); BeamSearchDetails( - selected_ids, selected_scores, selected_offsets, pre_ids, pre_scores, ids, - scores, seq_offset_start, seq_offset_end, seq_width, beam_size, end_id, - is_accumulated, num_used_threads); + selected_ids, selected_scores, parent_idx, selected_offsets, pre_ids, + pre_scores, ids, scores, seq_offset_start, seq_offset_end, seq_width, + beam_size, end_id, is_accumulated, num_used_threads); } template __global__ void BeamSearchKernelSingle( - int64_t* selected_ids, float* selected_scores, size_t* selected_offsets, - const int64_t* pre_ids, const float* pre_scores, const int64_t* ids, - const float* scores, const int seq_length, const int seq_width, - int beam_size, int end_id, bool is_accumulated, int num_used_threads) { + int64_t* selected_ids, float* selected_scores, int* parent_idx, + size_t* selected_offsets, const int64_t* pre_ids, const float* pre_scores, + const int64_t* ids, const float* scores, const int seq_length, + const int seq_width, int beam_size, int end_id, bool is_accumulated, + int num_used_threads) { const int seq_offset_start = 0; const int seq_offset_end = seq_length; BeamSearchDetails( - selected_ids, selected_scores, selected_offsets, pre_ids, pre_scores, ids, - scores, seq_offset_start, seq_offset_end, seq_width, beam_size, end_id, - is_accumulated, num_used_threads); + selected_ids, selected_scores, parent_idx, selected_offsets, pre_ids, + pre_scores, ids, scores, seq_offset_start, seq_offset_end, seq_width, + beam_size, end_id, is_accumulated, num_used_threads); } static inline int GetNumUsedThreads(const int max_threads_per_seq, @@ -300,8 +302,9 @@ class BeamSearchFunctor { const framework::LoDTensor* ids, const framework::LoDTensor* scores, framework::LoDTensor* selected_ids, - framework::LoDTensor* selected_scores, size_t level, - size_t beam_size, int end_id, bool is_accumulated) { + framework::LoDTensor* selected_scores, + framework::Tensor* parent_idx, size_t level, size_t beam_size, + int end_id, bool is_accumulated) { auto abs_lod = framework::ToAbsOffset(scores->lod()); const int64_t* pre_ids_data = pre_ids->data(); @@ -322,6 +325,8 @@ class BeamSearchFunctor { selected_ids->mutable_data(selected_dims, context.GetPlace()); float* selected_scores_data = selected_scores->mutable_data(selected_dims, context.GetPlace()); + int* parent_idx_data = parent_idx->mutable_data( + {static_cast(num_seqs * beam_size)}, context.GetPlace()); framework::LoD selected_lod(2); selected_lod[0].assign(abs_lod[level].begin(), abs_lod[level].end()); @@ -339,9 +344,9 @@ class BeamSearchFunctor { CUDA_LAUNCH_KERNEL_HELPER( BeamSearchKernelSingle<<< 1, kMaxThreadsPerSeq, 0, context.stream()>>>( - selected_ids_data, selected_scores_data, selected_offsets, - pre_ids_data, pre_scores_data, ids_data, scores_data, - seq_length, static_cast(seq_width), + selected_ids_data, selected_scores_data, parent_idx_data, + selected_offsets, pre_ids_data, pre_scores_data, ids_data, + scores_data, seq_length, static_cast(seq_width), static_cast(beam_size), static_cast(end_id), is_accumulated, num_used_threads)); } @@ -357,9 +362,9 @@ class BeamSearchFunctor { CUDA_LAUNCH_KERNEL_HELPER( BeamSearchKernel<<< 1, num_seqs * kMaxThreadsPerSeq, 0, context.stream()>>>( - selected_ids_data, selected_scores_data, selected_offsets, - pre_ids_data, pre_scores_data, ids_data, scores_data, - seq_offsets, static_cast(num_seqs), + selected_ids_data, selected_scores_data, parent_idx_data, + selected_offsets, pre_ids_data, pre_scores_data, ids_data, + scores_data, seq_offsets, static_cast(num_seqs), static_cast(seq_width), static_cast(beam_size), end_id, is_accumulated, num_used_threads)); } @@ -379,6 +384,7 @@ class BeamSearchFunctor { {static_cast(selected_lod[1].back()), 1}); selected_ids->Resize(final_selected_dims); selected_scores->Resize(final_selected_dims); + parent_idx->Resize({static_cast(selected_lod[1].back())}); } } }; diff --git a/paddle/fluid/operators/math/beam_search.h b/paddle/fluid/operators/math/beam_search.h index 3cd17f426c5596582c91f2b3f0cc5ba513e3aa4b..4474e7ea52affed792572d02202ec2577c471e50 100644 --- a/paddle/fluid/operators/math/beam_search.h +++ b/paddle/fluid/operators/math/beam_search.h @@ -104,14 +104,12 @@ class BeamSearchFunctor { * Return false if all the input tensor is empty, in machine translation task * that means no candidates is provided, and the task will stop running. */ - void operator()(const DeviceContext& context, - const framework::LoDTensor* pre_ids, - const framework::LoDTensor* pre_scores, - const framework::LoDTensor* ids, - const framework::LoDTensor* scores, - framework::LoDTensor* selected_ids, - framework::LoDTensor* selected_scores, size_t level, - size_t beam_size, int end_id, bool is_accumulated); + void operator()( + const DeviceContext& context, const framework::LoDTensor* pre_ids, + const framework::LoDTensor* pre_scores, const framework::LoDTensor* ids, + const framework::LoDTensor* scores, framework::LoDTensor* selected_ids, + framework::LoDTensor* selected_scores, framework::Tensor* parent_idx, + size_t level, size_t beam_size, int end_id, bool is_accumulated); }; } // namespace math diff --git a/paddle/fluid/operators/math/beam_search_test.cc b/paddle/fluid/operators/math/beam_search_test.cc index 1c29ee95f6b109209316e4e8c8f3cda37eac62ae..7ea8eb8b00db328ca13d3d33d751aca4eac66dae 100644 --- a/paddle/fluid/operators/math/beam_search_test.cc +++ b/paddle/fluid/operators/math/beam_search_test.cc @@ -93,13 +93,14 @@ void TestBeamSearch() { paddle::framework::LoDTensor selected_ids; paddle::framework::LoDTensor selected_scores; + paddle::framework::LoDTensor parent_idx; size_t level = 0; size_t beam_size = 2; int end_id = 0; paddle::operators::math::BeamSearchFunctor beamsearch; beamsearch(*context, &pre_ids, &pre_scores, &ids, &scores, &selected_ids, - &selected_scores, level, beam_size, end_id, true); + &selected_scores, &parent_idx, level, beam_size, end_id, true); ASSERT_EQ(selected_ids.lod(), selected_scores.lod()); diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index 1d9d98b10646af9e199f6c481740d30745888707..1ff9ff684fc8001afb0f768a033b4c5bd1592702 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/jit/kernels.h" -#include "paddle/fluid/operators/math/blas.h" namespace paddle { namespace operators { namespace math { @@ -81,28 +81,10 @@ class SoftmaxFunctor> { const int kBatchDim = 0; const int kClassDim = 1; // 2D data. Batch x C - const int batch_size = in_dims[kBatchDim]; - const int num_classes = in_dims[kClassDim]; - std::vector entities(batch_size); - auto blas = math::GetBlas(context); - for (int n = 0; n < batch_size; ++n) { - entities[n] = in_data[n * num_classes]; - for (int c = 1; c < num_classes; ++c) { - entities[n] = in_data[n * num_classes + c] > entities[n] - ? in_data[n * num_classes + c] - : entities[n]; - } - for (int c = 0; c < num_classes; ++c) { - out_data[n * num_classes + c] = - in_data[n * num_classes + c] - entities[n]; - } - } - - blas.VEXP(num_classes * batch_size, out_data, out_data); - for (int n = 0; n < batch_size; ++n) { - auto sum = blas.ASUM(num_classes, &out_data[n * num_classes], 1); - blas.SCAL(num_classes, 1.0f / sum, &out_data[n * num_classes]); - } + auto compute_softmax = + jit::Get, platform::CPUPlace>( + in_dims[kClassDim]); + compute_softmax(in_data, out_data, in_dims[kClassDim], in_dims[kBatchDim]); } }; diff --git a/paddle/fluid/operators/activation_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/activation_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc diff --git a/paddle/fluid/operators/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/batch_norm_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc diff --git a/paddle/fluid/operators/concat_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/concat_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/conv_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc diff --git a/paddle/fluid/operators/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/conv_transpose_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc diff --git a/paddle/fluid/operators/dequantize_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/dequantize_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/dequantize_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/dequantize_mkldnn_op.cc diff --git a/paddle/fluid/operators/fc_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/fc_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc diff --git a/paddle/fluid/operators/gaussian_random_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/gaussian_random_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc diff --git a/paddle/fluid/operators/lrn_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc similarity index 88% rename from paddle/fluid/operators/lrn_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc index 4e4f977fcc742856b877ef0b7f9a3cc9879aefce..097ba01d401dbc7969e30f576cac2567c874ed99 100644 --- a/paddle/fluid/operators/lrn_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc @@ -67,7 +67,13 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel { mid->mutable_data(ctx.GetPlace()); const int n = ctx.Attr("n"); - const float alpha = ctx.Attr("alpha"); + // MKL-DNN implements LRN in a caffe way: + // http://caffe.berkeleyvision.org/tutorial/layers/lrn.html + // Where sum of squares is divided by size of normalization window + // this is not the case for PaddlePaddle LRN. + // Hence we need to compensate for this diffrence by + // multipliing alpha by size of window(n) + const float alpha = ctx.Attr("alpha") * static_cast(n); const float beta = ctx.Attr("beta"); const float k = ctx.Attr("k"); const bool is_test = ctx.Attr("is_test"); @@ -78,10 +84,7 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel { auto dims = paddle::framework::vectorize2int(x->dims()); auto src_md = paddle::platform::MKLDNNMemDesc( - dims, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); - - auto dst_md = paddle::platform::MKLDNNMemDesc( - dims, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); + dims, mkldnn::memory::data_type::f32, x->format()); auto forward_desc = mkldnn::lrn_forward::desc{mkldnn::prop_kind::forward, mkldnn::lrn_across_channels, @@ -92,8 +95,6 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel { k}; auto src_memory_pd = mkldnn::memory::primitive_desc{src_md, mkldnn_engine}; - auto dst_memory = mkldnn::memory{{dst_md, mkldnn_engine}, - static_cast(output_data)}; if (!is_test) { const std::string key = ctx.op().Output("Out"); @@ -110,11 +111,16 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel { src_memory->set_data_handle( static_cast(const_cast(input_data))); + auto dst_memory = mkldnn::memory(forward_pd->dst_primitive_desc(), + static_cast(output_data)); auto workspace_memory = insert_to_context( key_workspace_memory, dev_ctx, forward_pd->workspace_primitive_desc()); run_primitive(*forward_pd, *src_memory, *workspace_memory, dst_memory); + + out->set_layout(framework::DataLayout::kMKLDNN); + out->set_format(platform::GetMKLDNNFormat(dst_memory)); } else { auto forward_pd = mkldnn::lrn_forward::primitive_desc{forward_desc, mkldnn_engine}; @@ -122,8 +128,13 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel { src_memory_pd, static_cast(const_cast(input_data))}; auto workspace_memory = mkldnn::memory{forward_pd.workspace_primitive_desc()}; + auto dst_memory = mkldnn::memory(forward_pd.dst_primitive_desc(), + static_cast(output_data)); run_primitive(forward_pd, src_memory, workspace_memory, dst_memory); + + out->set_layout(framework::DataLayout::kMKLDNN); + out->set_format(platform::GetMKLDNNFormat(dst_memory)); } } }; @@ -151,7 +162,7 @@ class LRNMKLDNNGradOpKernel : public paddle::framework::OpKernel { const std::string key_workspace_memory = key + "@lrn_workspace_memory"; const int n = ctx.Attr("n"); - const float alpha = ctx.Attr("alpha"); + const float alpha = ctx.Attr("alpha") * static_cast(n); const float beta = ctx.Attr("beta"); const float k = ctx.Attr("k"); diff --git a/paddle/fluid/operators/mkldnn_activation_op.h b/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h similarity index 100% rename from paddle/fluid/operators/mkldnn_activation_op.h rename to paddle/fluid/operators/mkldnn/mkldnn_activation_op.h diff --git a/paddle/fluid/operators/pool_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/pool_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc diff --git a/paddle/fluid/operators/quantize_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/quantize_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc diff --git a/paddle/fluid/operators/softmax_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/softmax_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc diff --git a/paddle/fluid/operators/sum_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/sum_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc diff --git a/paddle/fluid/operators/transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc similarity index 100% rename from paddle/fluid/operators/transpose_mkldnn_op.cc rename to paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc diff --git a/paddle/fluid/operators/ngraph/ngraph_bridge.cc b/paddle/fluid/operators/ngraph/ngraph_bridge.cc index d6e897ed4666261cdd0bd6565f61abb218d971e5..13b168ce4553c3377a62d9781b185fa7303c1136 100644 --- a/paddle/fluid/operators/ngraph/ngraph_bridge.cc +++ b/paddle/fluid/operators/ngraph/ngraph_bridge.cc @@ -38,6 +38,8 @@ std::map +#include + +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { + +void BuildPool2dNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto x_shape = x->get_shape(); + + std::string pooling_type = op_attrs.Get("pooling_type"); + std::vector ksize = op_attrs.Get>("ksize"); + std::vector strides = op_attrs.Get>("strides"); + std::vector paddings = op_attrs.Get>("paddings"); + + PADDLE_ENFORCE_EQ(x_shape.size() - 2, ksize.size(), + "Handling 2d pooling only"); + + if (op_attrs.Get("global_pooling")) { + for (size_t i = 0; i < ksize.size(); ++i) { + paddings[i] = 0; + ksize[i] = static_cast(x_shape.at(i + 2)); + } + } + + ngraph::Shape ng_padding_below{static_cast(paddings.at(0)), + static_cast(paddings.at(1))}; + ngraph::Shape ng_padding_above{static_cast(paddings.at(0)), + static_cast(paddings.at(1))}; + ngraph::Shape ng_ksize_shape{static_cast(ksize.at(0)), + static_cast(ksize.at(1))}; + ngraph::Strides ng_strides{static_cast(strides.at(0)), + static_cast(strides.at(1))}; + + auto ComputeCeiledOutput = [](size_t in, size_t k, size_t p, size_t s) { + return (in - k + 2 * p) / s + 1; + }; + + if (op_attrs.Get("ceil_mode")) { + auto dummy_out = paddle::platform::GetOutputNode(op, "Out", ngb_node_map); + auto dummpy_shape = dummy_out->get_shape(); + for (size_t i = 0; i < ng_padding_above.size(); ++i) { + auto desired_size = ComputeCeiledOutput(x_shape[i + 2], ksize[i], + paddings[i], strides[i]); + if (desired_size != dummpy_shape[i + 2]) { + ng_padding_above[i] += strides[i]; + } + } + } + + bool padding_exclusive = op_attrs.Get("exclusive"); + if (pooling_type == "max") { + auto pool2d = std::make_shared( + x, ng_ksize_shape, ng_strides, ng_padding_below, ng_padding_above); + paddle::platform::SetOutputNode(op, "Out", pool2d, ngb_node_map); + } else if (pooling_type == "avg") { + std::shared_ptr pool2d; + if (op_attrs.Get("adaptive")) { + auto ComputeAdaptive = [](size_t in, size_t k) { + return std::floor(in / k); + }; + ng_strides[0] = x_shape.size() == 4 + ? ComputeAdaptive(x_shape[3], ksize[0]) + : ng_strides[0]; + ng_strides[1] = x_shape.size() == 4 + ? ComputeAdaptive(x_shape[3], ksize[0]) + : ng_strides[1]; + pool2d = + std::make_shared(x, ng_ksize_shape, ng_strides); + } else { + pool2d = std::make_shared( + x, ng_ksize_shape, ng_strides, ng_padding_below, ng_padding_above, + !padding_exclusive); + } + paddle::platform::SetOutputNode(op, "Out", pool2d, ngb_node_map); + } else { + PADDLE_THROW("Support max and avg pooling only"); + } +} + +void BuildPool2dGradNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + auto out = paddle::platform::GetInputNode(op, "Out", ngb_node_map); + auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto x_shape = x->get_shape(); + + std::string pooling_type = op_attrs.Get("pooling_type"); + std::vector ksize = op_attrs.Get>("ksize"); + std::vector strides = op_attrs.Get>("strides"); + std::vector paddings = op_attrs.Get>("paddings"); + + PADDLE_ENFORCE_EQ(x_shape.size() - 2, ksize.size(), + "Handling 2d pooling only"); + + if (op_attrs.Get("global_pooling")) { + for (size_t i = 0; i < ksize.size(); ++i) { + paddings[i] = 0; + ksize[i] = static_cast(x_shape.at(i + 2)); + } + } + + ngraph::Shape ng_padding_below{static_cast(paddings.at(0)), + static_cast(paddings.at(1))}; + ngraph::Shape ng_padding_above{static_cast(paddings.at(0)), + static_cast(paddings.at(1))}; + ngraph::Shape ng_ksize_shape{static_cast(ksize.at(0)), + static_cast(ksize.at(1))}; + ngraph::Strides ng_strides{static_cast(strides.at(0)), + static_cast(strides.at(1))}; + + bool padding_exclusive = op_attrs.Get("exclusive"); + if (pooling_type == "max") { + auto pool2d_grad = std::make_shared( + x, dout, out, ng_ksize_shape, ng_strides, ng_padding_below, + ng_padding_above); + paddle::platform::SetOutputNode(op, "X@GRAD", pool2d_grad, ngb_node_map); + } else if (pooling_type == "avg") { + std::shared_ptr pool2d_grad; + if (op_attrs.Get("adaptive")) { + auto ComputeAdaptive = [](size_t in, size_t k) { + return std::floor(in / k); + }; + ng_strides[0] = x_shape.size() == 4 + ? ComputeAdaptive(x_shape[3], ksize[0]) + : ng_strides[0]; + ng_strides[1] = x_shape.size() == 4 + ? ComputeAdaptive(x_shape[3], ksize[0]) + : ng_strides[1]; + pool2d_grad = std::make_shared( + x->get_shape(), dout, ng_ksize_shape, ng_strides, ng_padding_below, + ng_padding_above, !padding_exclusive); + } else { + pool2d_grad = std::make_shared( + x->get_shape(), dout, ng_ksize_shape, ng_strides, ng_padding_below, + ng_padding_above, !padding_exclusive); + } + paddle::platform::SetOutputNode(op, "X@GRAD", pool2d_grad, ngb_node_map); + } else { + PADDLE_THROW("Support max and avg pooling only"); + } +} +} // namespace ngraphs +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 8eab3a6f891f1dfa91c5ce316f1419df2cd42248..32365d6a9602fa8ad2c01b59c8cd361d52ed973f 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -330,6 +330,7 @@ class Reshape2GradOp : public framework::OperatorWithKernel { } // namespace operators } // namespace paddle namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OPERATOR(reshape, ops::ReshapeOp, ops::ReshapeOpMaker, paddle::framework::DefaultGradOpDescMaker); @@ -356,16 +357,20 @@ REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, #ifdef PADDLE_WITH_CUDA REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int, ops::ReshapeKernel, - int64_t, ops::ReshapeKernel); + int64_t, ops::ReshapeKernel, plat::float16, + ops::ReshapeKernel); REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, + ops::ReshapeGradKernel, plat::float16, ops::ReshapeGradKernel); REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int, ops::ReshapeKernel, - int64_t, ops::ReshapeKernel); + int64_t, ops::ReshapeKernel, plat::float16, + ops::ReshapeKernel); REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, + ops::ReshapeGradKernel, plat::float16, ops::ReshapeGradKernel); #endif diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc index 789e61b2d332b9391ef45a8ebe58ad0f1a4d2bf0..94995fc99612adb1164e60f1a51747f74eacfb73 100644 --- a/paddle/fluid/operators/slice_op.cc +++ b/paddle/fluid/operators/slice_op.cc @@ -54,6 +54,9 @@ class SliceOp : public framework::OperatorWithKernel { out_dims[axes[i]] = end - start; } ctx->SetOutputDim("Out", out_dims); + if (axes[0] != 0) { + ctx->ShareLoD("Input", /*->*/ "Out"); + } } protected: diff --git a/paddle/fluid/operators/stack_op.cu b/paddle/fluid/operators/stack_op.cu index bf2a9e5b3d22996e688621727cb280dc9aed7859..24d0b2f906a8e0b360c3f477c9290ebe5d57a3ff 100644 --- a/paddle/fluid/operators/stack_op.cu +++ b/paddle/fluid/operators/stack_op.cu @@ -17,13 +17,16 @@ namespace plat = paddle::platform; namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(stack, ops::StackKernel, - ops::StackKernel, - ops::StackKernel, - ops::StackKernel); +REGISTER_OP_CUDA_KERNEL( + stack, ops::StackKernel, + ops::StackKernel, + ops::StackKernel, + ops::StackKernel, + ops::StackKernel); -REGISTER_OP_CUDA_KERNEL(stack_grad, - ops::StackGradKernel, - ops::StackGradKernel, - ops::StackGradKernel, - ops::StackGradKernel); +REGISTER_OP_CUDA_KERNEL( + stack_grad, ops::StackGradKernel, + ops::StackGradKernel, + ops::StackGradKernel, + ops::StackGradKernel, + ops::StackGradKernel); diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc index b993c55fad13e892efd51648b78704bec83bf2b4..031335009b692f9d1f73070c88e8e79d852cbe36 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.cc @@ -29,8 +29,14 @@ class TensorRTEngineOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Xs", "A list of inputs.").AsDuplicable(); AddOutput("Ys", "A list of outputs").AsDuplicable(); AddAttr("subgraph", "the subgraph."); + AddAttr("calibration_data", "the calibration data for int8"); + AddAttr( + "engine_key", + "The engine_key here is used to distinguish different TRT Engines"); AddAttr("max_batch_size", "the maximum batch size."); AddAttr("workspace_size", "the workspace size."); + AddAttr("sub_block", "the trt block"); + AddAttr("enable_int8", "whether swith to int8 mode"); AddComment("TensorRT engine operator."); } }; @@ -47,6 +53,6 @@ class TensorRTEngineInferVarType : public framework::VarTypeInference { namespace ops = paddle::operators; REGISTER_OPERATOR(tensorrt_engine, ops::TensorRTEngineOp, - ops::TensorRTEngineOpMaker); + ops::TensorRTEngineOpMaker, ops::TensorRTEngineOpMaker); #endif // PADDLE_WITH_CUDA diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h index e7e990f759ba411f6954c51fb697a6befbad31b1..2ff35c7c6ac6409d529de5b794bfc322b1f5dd9b 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op.h @@ -17,8 +17,10 @@ #ifdef PADDLE_WITH_CUDA #include +#include #include +#include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/inference/analysis/helper.h" @@ -62,6 +64,9 @@ nvinfer1::Dims Vec2TRT_Dims(const std::vector &shape) { using inference::Singleton; using inference::tensorrt::TensorRTEngine; +using inference::tensorrt::TRTInt8Calibrator; +using inference::tensorrt::TRTCalibratorEngine; +using inference::tensorrt::TRTCalibratorEngineManager; class TensorRTEngineOp : public framework::OperatorBase { private: @@ -70,6 +75,11 @@ class TensorRTEngineOp : public framework::OperatorBase { mutable std::unique_ptr trt_engine_; int max_batch_size_; int workspace_size_; + std::unique_ptr calibrator_; + bool enable_int8_; + std::string calibration_data_; + std::string engine_key_; + bool calibration_mode_; public: TensorRTEngineOp(const std::string &type, @@ -80,19 +90,96 @@ class TensorRTEngineOp : public framework::OperatorBase { input_names_ = Inputs("Xs"); max_batch_size_ = Attr("max_batch_size"); workspace_size_ = Attr("workspace_size"); + enable_int8_ = Attr("enable_int8"); + calibration_data_ = Attr("calibration_data"); + engine_key_ = Attr("engine_key"); auto params = Attr>("parameters"); for (const auto ¶m : params) { param_names_.insert(param); } + // calibration_mode is ture represents we need to + // generate the calibration table data. + calibration_mode_ = (enable_int8_ && calibration_data_.size() == 0); + + VLOG(4) << "calibration_mode: " << calibration_mode_; + if (enable_int8_ && calibration_data_.size()) { + calibrator_.reset(new TRTInt8Calibrator(calibration_data_)); + } } protected: + void RunNativeImpl(const framework::Scope &scope, + const platform::Place &dev_place) const { + framework::Executor executor(dev_place); + auto *block = Attr("sub_block"); + auto *program = block->Program(); + auto ¤t_scope = scope.NewScope(); + auto ctx = executor.Prepare(*program, block->ID()); + executor.RunPreparedContext(ctx.get(), ¤t_scope, false, true, true); + } + void RunImpl(const framework::Scope &scope, const platform::Place &dev_place) const override { + if (calibration_mode_ == true) { + RunCalibration(scope, dev_place); + return; + } RunTrt(scope, dev_place); } + void RunCalibration(const framework::Scope &scope, + const platform::Place &dev_place) const { + // This process will builds a 32-bit trt engine, runs it on the calibration + // set, and records a histogram for each + // tensor of the distribution of activation values. + LOG_FIRST_N(INFO, 1) << "The TRT engine: " << engine_key_ + << " is running calibration trt int8... "; + int runtime_batch = 1; + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(dev_place); + auto stream = + reinterpret_cast(dev_ctx).stream(); + if (!Singleton::Global().Has(engine_key_)) { + TRTCalibratorEngine *calib_res = + Singleton::Global().Create(engine_key_); + std::unordered_map calib_buffers; + for (auto &x : input_names_) { + if (param_names_.count(x)) continue; + auto &t = + inference::analysis::GetFromScope(scope, x); + calib_buffers[x] = t.memory_size(); + auto t_shape = framework::vectorize(t.dims()); + runtime_batch = t_shape[0]; + } + calib_res->calib_.reset(new TRTInt8Calibrator( + calib_buffers, runtime_batch, engine_key_, dev_place)); + calib_res->thr_.reset(new std::thread([&]() { + calib_res->engine_.reset(new TensorRTEngine( + max_batch_size_, workspace_size_, stream, + boost::get(dev_place).device, enable_int8_, + calib_res->calib_.get())); + VLOG(3) << "start the calib trt engine thread"; + Prepare(scope, dev_place, calib_res->engine_.get()); + })); + } + + TRTInt8Calibrator *temp_calibrator = + Singleton::Global() + .Get(engine_key_) + ->calib_.get(); + std::unordered_map calib_data; + + for (auto &x : Inputs("Xs")) { + if (param_names_.count(x)) continue; + auto &t = + inference::analysis::GetFromScope(scope, x); + calib_data.emplace(x, t.data()); + } + temp_calibrator->setBatch(calib_data); + RunNativeImpl(scope, dev_place); + } + void RunTrt(const framework::Scope &scope, const platform::Place &dev_place) const { int runtime_batch = 1; @@ -101,9 +188,10 @@ class TensorRTEngineOp : public framework::OperatorBase { auto stream = reinterpret_cast(dev_ctx).stream(); if (trt_engine_.get() == nullptr) { - trt_engine_.reset(new TensorRTEngine( - max_batch_size_, workspace_size_, stream, - boost::get(dev_place).device)); + trt_engine_.reset( + new TensorRTEngine(max_batch_size_, workspace_size_, stream, + boost::get(dev_place).device, + enable_int8_, calibrator_.get())); Prepare(scope, dev_place, trt_engine_.get()); } @@ -173,7 +261,8 @@ class TensorRTEngineOp : public framework::OperatorBase { void Prepare(const framework::Scope &scope, const platform::Place &dev_place, TensorRTEngine *engine) const { - VLOG(4) << "Prepare engine"; + LOG(INFO) << "Prepare TRT engine (Optimize model structure, Select OP " + "kernel etc). This process may cost a lot of time."; framework::proto::BlockDesc block_desc; block_desc.ParseFromString(Attr("subgraph")); diff --git a/paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc b/paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc index 391e7a1c070e040f6e90f820634c0d8b7cd40a96..5a3d9d2c1a3e8111acbad2ddcf4f5469a3a99751 100644 --- a/paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc +++ b/paddle/fluid/operators/tensorrt/tensorrt_engine_op_test.cc @@ -96,19 +96,20 @@ TEST(TensorRTEngineOp, manual) { engine_op_desc.SetType("tensorrt_engine"); engine_op_desc.SetInput("Xs", std::vector({"x"})); engine_op_desc.SetOutput("Ys", std::vector({"z0"})); - SetAttr(engine_op_desc.Proto(), "subgraph", - block_->SerializeAsString()); - SetAttr(engine_op_desc.Proto(), "max_batch_size", 2); - SetAttr(engine_op_desc.Proto(), "workspace_size", 1 << 20); - SetAttr(engine_op_desc.Proto(), "engine_uniq_key", "a_engine"); - SetAttr>(engine_op_desc.Proto(), "parameters", - std::vector({})); - SetAttr>(engine_op_desc.Proto(), - "output_name_mapping", - std::vector({"z0"})); + + engine_op_desc.SetBlockAttr("sub_block", &block_desc); + engine_op_desc.SetAttr("max_batch_size", static_cast(2)); + engine_op_desc.SetAttr("workspace_size", static_cast(1 << 20)); + engine_op_desc.SetAttr("parameters", std::vector({})); + engine_op_desc.SetAttr("engine_key", std::string("a_engine")); + engine_op_desc.SetAttr("calibration_data", std::string("")); + engine_op_desc.SetAttr("enable_int8", static_cast(false)); + engine_op_desc.SetAttr("output_name_mapping", + std::vector({"z0"})); + engine_op_desc.SetAttr("subgraph", std::string(block_->SerializeAsString())); LOG(INFO) << "create engine op"; - auto engine_op = framework::OpRegistry::CreateOp(*engine_op_desc.Proto()); + auto engine_op = framework::OpRegistry::CreateOp(engine_op_desc); LOG(INFO) << "engine_op " << engine_op.get(); framework::Scope scope; @@ -190,20 +191,19 @@ void Execute(int batch_size, int input_dim, int output_dim, int nlayers = 1) { engine_op_desc.SetInput("Xs", std::vector({"x0"})); engine_op_desc.SetOutput("Ys", std::vector({"z3"})); - SetAttr(engine_op_desc.Proto(), "subgraph", - block_->SerializeAsString()); - SetAttr(engine_op_desc.Proto(), "max_batch_size", batch_size); - SetAttr(engine_op_desc.Proto(), "workspace_size", 1 << 20); - SetAttr>( - engine_op_desc.Proto(), "parameters", - std::vector({"y0", "y1", "y2", "y3"})); - SetAttr(engine_op_desc.Proto(), "engine_uniq_key", "b_engine"); - - SetAttr>(engine_op_desc.Proto(), - "output_name_mapping", - std::vector({"z3"})); - - auto engine_op = framework::OpRegistry::CreateOp(*engine_op_desc.Proto()); + engine_op_desc.SetBlockAttr("sub_block", &block_desc); + engine_op_desc.SetAttr("max_batch_size", static_cast(batch_size)); + engine_op_desc.SetAttr("workspace_size", static_cast(1 << 20)); + engine_op_desc.SetAttr("parameters", + std::vector({"y0", "y1", "y2", "y3"})); + engine_op_desc.SetAttr("engine_key", std::string("b_engine")); + engine_op_desc.SetAttr("calibration_data", std::string("")); + engine_op_desc.SetAttr("enable_int8", static_cast(false)); + engine_op_desc.SetAttr("output_name_mapping", + std::vector({"z3"})); + engine_op_desc.SetAttr("subgraph", std::string(block_->SerializeAsString())); + + auto engine_op = framework::OpRegistry::CreateOp(engine_op_desc); // Execute them. engine_op->Run(scope, place); diff --git a/paddle/fluid/operators/transpose_op.cu.cc b/paddle/fluid/operators/transpose_op.cu.cc index b4025350fa9f3610bde43eee91cd059f3063813f..915774e5f3624f26dbd1451a99d7bf0bf75a72c8 100644 --- a/paddle/fluid/operators/transpose_op.cu.cc +++ b/paddle/fluid/operators/transpose_op.cu.cc @@ -15,19 +15,27 @@ limitations under the License. */ #include "paddle/fluid/operators/transpose_op.h" namespace ops = paddle::operators; +namespace plat = paddle::platform; + REGISTER_OP_CUDA_KERNEL( transpose, ops::TransposeKernel, - ops::TransposeKernel); + ops::TransposeKernel, + ops::TransposeKernel); REGISTER_OP_CUDA_KERNEL( transpose_grad, ops::TransposeGradKernel, - ops::TransposeGradKernel); + ops::TransposeGradKernel, + ops::TransposeGradKernel); REGISTER_OP_CUDA_KERNEL( transpose2, ops::TransposeKernel, - ops::TransposeKernel); + ops::TransposeKernel, + ops::TransposeKernel); REGISTER_OP_CUDA_KERNEL( transpose2_grad, ops::TransposeGradKernel, - ops::TransposeGradKernel); + ops::TransposeGradKernel, + ops::TransposeGradKernel); diff --git a/paddle/fluid/platform/dynload/mklml.h b/paddle/fluid/platform/dynload/mklml.h index d0619293acf2d2df0d925e969bdeb8e45cda6e2b..a260cda49138580b209e647af459e9392d9f18f1 100644 --- a/paddle/fluid/platform/dynload/mklml.h +++ b/paddle/fluid/platform/dynload/mklml.h @@ -70,6 +70,8 @@ extern void* mklml_dso_handle; __macro(cblas_ddot); \ __macro(cblas_sasum); \ __macro(cblas_dasum); \ + __macro(cblas_isamax); \ + __macro(cblas_idamax); \ __macro(cblas_sscal); \ __macro(cblas_dscal); \ __macro(vsAdd); \ diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 15413785bab3c0fd77244141e8f1840ca0cc1356..142d38f0609d963ce3ff45c595b8432b0e5edd21 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -71,9 +71,8 @@ struct EnforceNotMet : public std::exception { } } - template - EnforceNotMet(const char* f, int l, ARGS... args) { - Init(string::Sprintf(args...), f, l); + EnforceNotMet(const std::string& str, const char* f, int l) { + Init(str, f, l); } const char* what() const noexcept override { return err_str_.c_str(); } @@ -142,28 +141,23 @@ struct EOFException : public std::exception { inline bool is_error(bool stat) { return !stat; } -template -inline typename std::enable_if::type throw_on_error( - bool stat, const Args&... args) { +inline void throw_on_error(bool stat, const std::string& msg) { #ifndef REPLACE_ENFORCE_GLOG - throw std::runtime_error(string::Sprintf(args...)); + throw std::runtime_error(msg); #else - LOG(FATAL) << string::Sprintf(args...); + LOG(FATAL) << msg; #endif } #ifdef PADDLE_WITH_CUDA -inline bool is_error(cudaError_t e) { return UNLIKELY(e); } +inline bool is_error(cudaError_t e) { return e != cudaSuccess; } -template -inline typename std::enable_if::type throw_on_error( - cudaError_t e, const Args&... args) { +inline void throw_on_error(cudaError_t e, const std::string& msg) { #ifndef REPLACE_ENFORCE_GLOG - throw thrust::system_error(e, thrust::cuda_category(), - string::Sprintf(args...)); + throw thrust::system_error(e, thrust::cuda_category(), msg); #else - LOG(FATAL) << string::Sprintf(args...); + LOG(FATAL) << msg; #endif } @@ -171,14 +165,12 @@ inline bool is_error(curandStatus_t stat) { return stat != CURAND_STATUS_SUCCESS; } -template -inline typename std::enable_if::type throw_on_error( - curandStatus_t stat, const Args&... args) { +inline void throw_on_error(curandStatus_t stat, const std::string& msg) { #ifndef REPLACE_ENFORCE_GLOG throw thrust::system_error(cudaErrorLaunchFailure, thrust::cuda_category(), - string::Sprintf(args...)); + msg); #else - LOG(FATAL) << string::Sprintf(args...); + LOG(FATAL) << msg; #endif } @@ -186,14 +178,11 @@ inline bool is_error(cudnnStatus_t stat) { return stat != CUDNN_STATUS_SUCCESS; } -template -inline typename std::enable_if::type throw_on_error( - cudnnStatus_t stat, const Args&... args) { +inline void throw_on_error(cudnnStatus_t stat, const std::string& msg) { #ifndef REPLACE_ENFORCE_GLOG - throw std::runtime_error(platform::dynload::cudnnGetErrorString(stat) + - string::Sprintf(args...)); + throw std::runtime_error(platform::dynload::cudnnGetErrorString(stat) + msg); #else - LOG(FATAL) << string::Sprintf(args...); + LOG(FATAL) << platform::dynload::cudnnGetErrorString(stat) << msg; #endif } @@ -201,9 +190,7 @@ inline bool is_error(cublasStatus_t stat) { return stat != CUBLAS_STATUS_SUCCESS; } -template -inline typename std::enable_if::type throw_on_error( - cublasStatus_t stat, const Args&... args) { +inline void throw_on_error(cublasStatus_t stat, const std::string& msg) { std::string err; if (stat == CUBLAS_STATUS_NOT_INITIALIZED) { err = "CUBLAS: not initialized, "; @@ -225,87 +212,45 @@ inline typename std::enable_if::type throw_on_error( err = "CUBLAS: license error, "; } #ifndef REPLACE_ENFORCE_GLOG - throw std::runtime_error(err + string::Sprintf(args...)); + throw std::runtime_error(err + msg); #else - LOG(FATAL) << err << string::Sprintf(args...); + LOG(FATAL) << err << msg; #endif } #if !defined(__APPLE__) && !defined(_WIN32) -template -inline typename std::enable_if::type throw_on_error( - ncclResult_t stat, const Args&... args) { - if (stat == ncclSuccess) { - return; - } else { +inline bool is_error(ncclResult_t nccl_result) { + return nccl_result != ncclSuccess; +} + +inline void throw_on_error(ncclResult_t stat, const std::string& msg) { #ifndef REPLACE_ENFORCE_GLOG - throw std::runtime_error(platform::dynload::ncclGetErrorString(stat) + - string::Sprintf(args...)); + throw std::runtime_error(platform::dynload::ncclGetErrorString(stat) + msg); #else - LOG(FATAL) << platform::dynload::ncclGetErrorString(stat) - << string::Sprintf(args...); + LOG(FATAL) << platform::dynload::ncclGetErrorString(stat) << msg; #endif - } } #endif // __APPLE__ and windows #endif // PADDLE_WITH_CUDA -template -inline void throw_on_error(T e) { - throw_on_error(e, ""); -} - -#define PADDLE_THROW(...) \ - throw ::paddle::platform::EnforceNotMet(__FILE__, __LINE__, __VA_ARGS__) - -#define __PADDLE_THROW_ERROR_I(_, _9, _8, _7, _6, _5, _4, _3, _2, X_, ...) X_; - -#define __THROW_ON_ERROR_ONE_ARG(COND, ARG) \ - ::paddle::platform::throw_on_error(COND, ::paddle::string::Sprintf(ARG)); - -#ifdef _WIN32 -#define __PADDLE_THROW_ON_ERROR(COND, ...) \ - __THROW_ON_ERROR_ONE_ARG(COND, __VA_ARGS__) -#else // _WIN32 -#define __PADDLE_THROW_ON_ERROR(COND, ...) \ - __PADDLE_THROW_ERROR_I( \ - __VA_ARGS__, ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - __THROW_ON_ERROR_ONE_ARG(COND, __VA_ARGS__)) -#endif // _WIN32 - -#define __PADDLE_UNARY_COMPARE(COND, ...) \ - do { \ - auto __cond = COND; \ - if (UNLIKELY(::paddle::platform::is_error(__cond))) { \ - __PADDLE_THROW_ON_ERROR(__cond, __VA_ARGS__); \ - } \ +#define PADDLE_THROW(...) \ + throw ::paddle::platform::EnforceNotMet( \ + ::paddle::string::Sprintf(__VA_ARGS__), __FILE__, __LINE__) + +#define PADDLE_ENFORCE(COND, ...) \ + do { \ + auto __cond__ = (COND); \ + if (UNLIKELY(::paddle::platform::is_error(__cond__))) { \ + try { \ + ::paddle::platform::throw_on_error( \ + __cond__, ::paddle::string::Sprintf(__VA_ARGS__)); \ + } catch (...) { \ + throw ::paddle::platform::EnforceNotMet(std::current_exception(), \ + __FILE__, __LINE__); \ + } \ + } \ } while (0) -#ifndef REPLACE_ENFORCE_GLOG -#define __PADDLE_ENFORCE_I(COND, ...) \ - do { \ - try { \ - __PADDLE_UNARY_COMPARE(COND, __VA_ARGS__); \ - } catch (...) { \ - throw ::paddle::platform::EnforceNotMet(std::current_exception(), \ - __FILE__, __LINE__); \ - } \ - } while (0) - -#else -#define __PADDLE_ENFORCE_I(COND, ...) __PADDLE_UNARY_COMPARE(COND, __VA_ARGS__); -#endif // REPLACE_ENFORCE_GLOG - -#define __PADDLE_ENFORCE(__args) __PADDLE_ENFORCE_I __args -#define PADDLE_ENFORCE(...) __PADDLE_ENFORCE((__VA_ARGS__)) - #define PADDLE_THROW_EOF() \ do { \ throw ::paddle::platform::EOFException("There is no next data.", __FILE__, \ diff --git a/paddle/fluid/platform/nccl_helper.h b/paddle/fluid/platform/nccl_helper.h index 8df8e32098697540f02d488c873f5ae7fb29828e..6ae21ee8294bedc388f837aad3e20a2b9aca98a2 100644 --- a/paddle/fluid/platform/nccl_helper.h +++ b/paddle/fluid/platform/nccl_helper.h @@ -64,7 +64,7 @@ class NCCLGroupGuard { } inline ~NCCLGroupGuard() { - CHECK_EQ(dynload::ncclGroupEnd(), ncclSuccess); + PADDLE_ENFORCE(dynload::ncclGroupEnd()); NCCLMutex().unlock(); } }; diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index 26247026667158a2f43cdac21bf5600479455e16..39e47be606c07ed216c9fe2ff8fa75552b8b7c76 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -33,7 +33,6 @@ using paddle::PaddlePredictor; using paddle::NativeConfig; using paddle::NativePaddlePredictor; using paddle::AnalysisPredictor; -using paddle::contrib::AnalysisConfig; static void BindPaddleDType(py::module *m); static void BindPaddleBuf(py::module *m); @@ -180,8 +179,14 @@ void BindNativePredictor(py::module *m) { } void BindAnalysisConfig(py::module *m) { - py::class_(*m, "AnalysisConfig") - .def(py::init()) + py::class_ analysis_config(*m, "AnalysisConfig"); + + py::enum_(analysis_config, "Precision") + .value("Float32", AnalysisConfig::Precision::kFloat32) + .value("Int8", AnalysisConfig::Precision::kInt8) + .export_values(); + + analysis_config.def(py::init()) .def(py::init()) .def(py::init()) .def("set_model", (void (AnalysisConfig::*)(const std::string &)) & @@ -215,7 +220,8 @@ void BindAnalysisConfig(py::module *m) { .def("specify_input_name", &AnalysisConfig::specify_input_name) .def("enable_tensorrt_engine", &AnalysisConfig::EnableTensorRtEngine, py::arg("workspace_size") = 1 << 20, py::arg("max_batch_size") = 1, - py::arg("min_subgraph_size") = 3) + py::arg("min_subgraph_size") = 3, + py::arg("precision_mode") = AnalysisConfig::Precision::kFloat32) .def("tensorrt_engine_enabled", &AnalysisConfig::tensorrt_engine_enabled) .def("switch_ir_debug", &AnalysisConfig::SwitchIrDebug, py::arg("x") = true) diff --git a/paddle/fluid/string/printf.h b/paddle/fluid/string/printf.h index 0b94b60018aac3a61edfda4d7ecb762e9fe70673..16bb3771f2e9bcc07028ef2039fed8691f9aab97 100644 --- a/paddle/fluid/string/printf.h +++ b/paddle/fluid/string/printf.h @@ -84,6 +84,8 @@ void Fprintf(std::ostream& out, const char* fmt, const Args&... args) { tinyformat::vformat(out, fmt, tinyformat::makeFormatList(args...)); } +inline std::string Sprintf() { return ""; } + template std::string Sprintf(const Args&... args) { std::ostringstream oss; diff --git a/paddle/scripts/fast_install.sh b/paddle/scripts/fast_install.sh index ddeb3a1a3deb9edd0db894d4017195c3330b88cc..e2b2eb2a90bffcdcb30004a51c71451537071ce1 100644 --- a/paddle/scripts/fast_install.sh +++ b/paddle/scripts/fast_install.sh @@ -40,9 +40,11 @@ function checkMacPython2(){ else python_version="" fi + check_python=`echo $python_version | grep "Python 2"` + echo $check_python if [ "$python_version" == "" ] || [ "$python_root" == "/usr/bin/python" -a "$python_version" == "Python 2.7.10" ] ;then python_version="" - else + elif [ -n "$check_python" ];then while true do read -p "找到:$python_version, 是否使用:(y/n),输入n来输入自定义使用的python路径,或者按ctrl + c退出: " use_python @@ -60,6 +62,9 @@ function checkMacPython2(){ if [ "$use_python" == "y" ];then break fi + else + echo "您输入Python的不是Python2" + python_version="" fi done } @@ -77,9 +82,10 @@ function checkMacPython3(){ else python_version="" fi + check_python=`echo $python_version | grep "Python 3"` if [ "$python_version" == "" ] || [ "$python_root" == "/usr/bin/python" -a "$python_version" == "Python 2.7.10" ] ;then python_version="" - else + elif [ -n "$check_python" ] ;then while true do read -p "找到:$python_version, 是否使用:(y/n),输入n来输入自定义使用的python路径,或者按ctrl + c退出: " use_python @@ -97,6 +103,9 @@ function checkMacPython3(){ if [ "$use_python" == "y" ];then break fi + else + echo "您输入Python的不是Python2" + python_version="" fi done } diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index c2156a436ec73d03082fa08b6250dc77b2cee19f..1135caf4f8c32901d93270d372fdaac702acf006 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -173,7 +173,6 @@ function cmake_gen() { -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} ${PYTHON_FLAGS} -DWITH_DSO=ON - -DWITH_DOC=${WITH_DOC:-OFF} -DWITH_GPU=${WITH_GPU:-OFF} -DWITH_AMD_GPU=${WITH_AMD_GPU:-OFF} -DWITH_DISTRIBUTE=${distibuted_flag} @@ -208,7 +207,6 @@ EOF -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} \ ${PYTHON_FLAGS} \ -DWITH_DSO=ON \ - -DWITH_DOC=${WITH_DOC:-OFF} \ -DWITH_GPU=${WITH_GPU:-OFF} \ -DWITH_AMD_GPU=${WITH_AMD_GPU:-OFF} \ -DWITH_DISTRIBUTE=${distibuted_flag} \ @@ -528,31 +526,6 @@ function bind_test() { wait } - -function gen_docs() { - mkdir -p ${PADDLE_ROOT}/build - cd ${PADDLE_ROOT}/build - cat <= 0 else ( + size[0] + padding_idx) + + self._param_attr = param_attr + self._dtype = dtype + self._remote_prefetch = self._is_sparse and (not self._is_distributed) + if self._remote_prefetch: + assert self._is_sparse is True and self._is_distributed is False + + from ..layer_helper import LayerHelper + self._helper = LayerHelper('embedding', param_attr=param_attr) + self._w = self._helper.create_parameter( + attr=self._param_attr, + shape=self._size, + dtype=self._dtype, + is_bias=False) + + def parameters(self): + return [self._w] + + def forward(self, input): + out = self._helper.create_variable_for_type_inference(self._dtype) + self._helper.append_op( + type='lookup_table', + inputs={'Ids': input, + 'W': self._w}, + outputs={'Out': out}, + attrs={ + 'is_sparse': self._is_sparse, + 'is_distributed': self._is_distributed, + 'remote_prefetch': self._remote_prefetch, + 'padding_idx': self._padding_idx + }) + + return out diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 4f434328e47df4363b304ff55f587018d3157c5e..5be21ff7f7270f6ce950c069f61418c922bcedc5 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -366,17 +366,40 @@ class TruncatedNormalInitializer(Initializer): # Initialization Ops should be prepended and not appended if self._seed == 0: self._seed = block.program.random_seed + + # to be compatible of fp16 initalizers + if var.dtype == VarDesc.VarType.FP16: + out_dtype = VarDesc.VarType.FP32 + out_var = block.create_var( + name=unique_name.generate(".".join( + ['truncated_gaussian_random', 'tmp'])), + shape=var.shape, + dtype=out_dtype, + type=VarDesc.VarType.LOD_TENSOR, + persistable=False) + else: + out_dtype = var.dtype + out_var = var + op = block._prepend_op( type="truncated_gaussian_random", - outputs={"Out": var}, + outputs={"Out": out_var}, attrs={ "shape": var.shape, - "dtype": int(var.dtype), + "dtype": out_dtype, "mean": self._mean, "std": self._std_dev, "seed": self._seed }, stop_gradient=True) + + if var.dtype == VarDesc.VarType.FP16: + block.append_op( + type="cast", + inputs={"X": out_var}, + outputs={"Out": var}, + attrs={"in_dtype": out_var.dtype, + "out_dtype": var.dtype}) var.op = op return op diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index 972c51938f2b2282f8de4b090f9af3bc66f89155..a172141b3a0455769dc1ce74d098be057324e047 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -300,6 +300,17 @@ class LayerHelper(object): attr.name = unique_name.generate(".".join([self.name, suffix])) if default_initializer is None and attr.initializer is None: + if isinstance(dtype, core.VarDesc.VarType): + if dtype != core.VarDesc.VarType.FP32 and \ + dtype != core.VarDesc.VarType.FP64: + raise TypeError( + "Can not create parameter with default initializer when dtype is not float type. Set default_initializer to fit the parameter dtype!" + ) + else: + if not (dtype.startswith("float") or dtype == "double"): + raise TypeError( + "Can not create parameter with default initializer when dtype is not float type. Set default_initializer to fit the parameter dtype!" + ) if is_bias: attr._set_default_bias_initializer() else: diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index cddc302d52e0a5aea802fd7e1464f1e220c8f769..bf2f587e67099699e0f987ca248cf682ab0c22ce 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -49,6 +49,7 @@ __all__ = [ 'box_coder', 'polygon_box_transform', 'yolov3_loss', + 'multiclass_nms', ] @@ -262,8 +263,10 @@ def detection_output(loc, number is N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]` detected results, if it is 0, the i-th image has no detected results. If all images have not detected results, - all the elements in LoD are 0, and output tensor only contains one + LoD will be set to {1}, and output tensor only contains one value, which is -1. + (After version 1.3, when no boxes detected, the lod is changed + from {0} to {1}.) Examples: .. code-block:: python @@ -343,19 +346,107 @@ def box_coder(prior_box, target_box, code_type="encode_center_size", box_normalized=True, - name=None): + name=None, + axis=0): """ - ${comment} + **Box Coder Layer** + + Encode/Decode the target bounding box with the priorbox information. + + The Encoding schema described below: + + .. math:: + + ox = (tx - px) / pw / pxv + + oy = (ty - py) / ph / pyv + + ow = \log(\abs(tw / pw)) / pwv + + oh = \log(\abs(th / ph)) / phv + + The Decoding schema described below: + + .. math:: + + ox = (pw * pxv * tx * + px) - tw / 2 + + oy = (ph * pyv * ty * + py) - th / 2 + + ow = \exp(pwv * tw) * pw + tw / 2 + + oh = \exp(phv * th) * ph + th / 2 + + where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, + width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote + the priorbox's (anchor) center coordinates, width and height. `pxv`, + `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`, + `ow`, `oh` denote the encoded/decoded coordinates, width and height. + + During Box Decoding, two modes for broadcast are supported. Say target + box has shape [N, M, 4], and the shape of prior box can be [N, 4] or + [M, 4]. Then prior box will broadcast to target box along the + assigned axis. Args: - prior_box(${prior_box_type}): ${prior_box_comment} - prior_box_var(${prior_box_var_type}): ${prior_box_var_comment} - target_box(${target_box_type}): ${target_box_comment} - code_type(${code_type_type}): ${code_type_comment} - box_normalized(${box_normalized_type}): ${box_normalized_comment} + prior_box(Variable): Box list prior_box is a 2-D Tensor with shape + [M, 4] holds M boxes, each box is represented as + [xmin, ymin, xmax, ymax], [xmin, ymin] is the + left top coordinate of the anchor box, if the + input is image feature map, they are close to + the origin of the coordinate system. [xmax, ymax] + is the right bottom coordinate of the anchor box. + prior_box_var(Variable|list): prior_box_var supports two types of input. + One is variable with shape [M, 4] holds M group. + The other one is list consist of 4 elements + shared by all boxes. + target_box(Variable): This input can be a 2-D LoDTensor with shape + [N, 4] when code_type is 'encode_center_size'. + This input also can be a 3-D Tensor with shape + [N, M, 4] when code_type is 'decode_center_size'. + Each box is represented as + [xmin, ymin, xmax, ymax]. This tensor can + contain LoD information to represent a batch + of inputs. + code_type(string): The code type used with the target box. It can be + encode_center_size or decode_center_size + box_normalized(int): Whether treat the priorbox as a noramlized box. + Set true by default. + name(string): The name of box coder. + axis(int): Which axis in PriorBox to broadcast for box decode, + for example, if axis is 0 and TargetBox has shape + [N, M, 4] and PriorBox has shape [M, 4], then PriorBox + will broadcast to [N, M, 4] for decoding. It is only valid + when code type is decode_center_size. Set 0 by default. Returns: - output_box(${output_box_type}): ${output_box_comment} + output_box(Variable): When code_type is 'encode_center_size', the + output tensor of box_coder_op with shape + [N, M, 4] representing the result of N target + boxes encoded with M Prior boxes and variances. + When code_type is 'decode_center_size', + N represents the batch size and M represents + the number of deocded boxes. + + Examples: + + .. code-block:: python + + prior_box = fluid.layers.data(name='prior_box', + shape=[512, 4], + dtype='float32', + append_batch_size=False) + target_box = fluid.layers.data(name='target_box', + shape=[512,81,4], + dtype='float32', + append_batch_size=False) + output = fluid.layers.box_coder(prior_box=prior_box, + prior_box_var=[0.1,0.1,0.2,0.2], + target_box=target_box, + code_type="decode_center_size", + box_normalized=False, + axis=1) + """ helper = LayerHelper("box_coder", **locals()) @@ -366,15 +457,22 @@ def box_coder(prior_box, output_box = helper.create_variable( name=name, dtype=prior_box.dtype, persistable=False) + inputs = {"PriorBox": prior_box, "TargetBox": target_box} + attrs = { + "code_type": code_type, + "box_normalized": box_normalized, + "axis": axis + } + if isinstance(prior_box_var, Variable): + inputs['PriorBoxVar'] = prior_box_var + elif isinstance(prior_box_var, list): + attrs['variance'] = prior_box_var + else: + raise TypeError("Input variance of box_coder must be Variable or lisz") helper.append_op( type="box_coder", - inputs={ - "PriorBox": prior_box, - "PriorBoxVar": prior_box_var, - "TargetBox": target_box - }, - attrs={"code_type": code_type, - "box_normalized": box_normalized}, + inputs=inputs, + attrs=attrs, outputs={"OutputBox": output_box}) return output_box @@ -1960,3 +2058,119 @@ def generate_proposals(scores, rpn_roi_probs.stop_gradient = True return rpn_rois, rpn_roi_probs + + +def multiclass_nms(bboxes, + scores, + score_threshold, + nms_top_k, + keep_top_k, + nms_threshold=0.3, + normalized=True, + nms_eta=1., + background_label=0, + name=None): + """ + **Multiclass NMS** + + This operator is to do multi-class non maximum suppression (NMS) on + boxes and scores. + + In the NMS step, this operator greedily selects a subset of detection bounding + boxes that have high scores larger than score_threshold, if providing this + threshold, then selects the largest nms_top_k confidences scores if nms_top_k + is larger than -1. Then this operator pruns away boxes that have high IOU + (intersection over union) overlap with already selected boxes by adaptive + threshold NMS based on parameters of nms_threshold and nms_eta. + + Aftern NMS step, at most keep_top_k number of total bboxes are to be kept + per image if keep_top_k is larger than -1. + + Args: + bboxes (Variable): Two types of bboxes are supported: + 1. (Tensor) A 3-D Tensor with shape + [N, M, 4 or 8 16 24 32] represents the + predicted locations of M bounding bboxes, + N is the batch size. Each bounding box has four + coordinate values and the layout is + [xmin, ymin, xmax, ymax], when box size equals to 4. + 2. (LoDTensor) A 3-D Tensor with shape [M, C, 4] + M is the number of bounding boxes, C is the + class number + scores (Variable): Two types of scores are supported: + 1. (Tensor) A 3-D Tensor with shape [N, C, M] + represents the predicted confidence predictions. + N is the batch size, C is the class number, M is + number of bounding boxes. For each category there + are total M scores which corresponding M bounding + boxes. Please note, M is equal to the 2nd dimension + of BBoxes. + 2. (LoDTensor) A 2-D LoDTensor with shape [M, C]. + M is the number of bbox, C is the class number. + In this case, input BBoxes should be the second + case with shape [M, C, 4]. + background_label (int): The index of background label, the background + label will be ignored. If set to -1, then all + categories will be considered. Default: 0 + score_threshold (float): Threshold to filter out bounding boxes with + low confidence score. If not provided, + consider all boxes. + nms_top_k (int): Maximum number of detections to be kept according to + the confidences aftern the filtering detections based + on score_threshold. + nms_threshold (float): The threshold to be used in NMS. Default: 0.3 + nms_eta (float): The threshold to be used in NMS. Default: 1.0 + keep_top_k (int): Number of total bboxes to be kept per image after NMS + step. -1 means keeping all bboxes after NMS step. + normalized (bool): Whether detections are normalized. Default: True + name(str): Name of the multiclass nms op. Default: None. + + Returns: + Out: A 2-D LoDTensor with shape [No, 6] represents the detections. + Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax] + or A 2-D LoDTensor with shape [No, 10] represents the detections. + Each row has 10 values: + [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the + total number of detections. If there is no detected boxes for all + images, lod will be set to {1} and Out only contains one value + which is -1. + (After version 1.3, when no boxes detected, the lod is changed + from {0} to {1}) + + Examples: + .. code-block:: python + + boxes = fluid.layers.data(name='bboxes', shape=[81, 4], + dtype='float32', lod_level=1) + scores = fluid.layers.data(name='scores', shape=[81], + dtype='float32', lod_level=1) + out = fluid.layers.multiclass_nms(bboxes=boxes, + scores=scores, + background_label=0, + score_threshold=0.5, + nms_top_k=400, + nms_threshold=0.3, + keep_top_k=200, + normalized=False) + """ + helper = LayerHelper('multiclass_nms', **locals()) + + output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) + helper.append_op( + type="multiclass_nms", + inputs={'BBoxes': bboxes, + 'Scores': scores}, + attrs={ + 'background_label': background_label, + 'score_threshold': score_threshold, + 'nms_top_k': nms_top_k, + 'nms_threshold': nms_threshold, + 'nms_eta': nms_eta, + 'keep_top_k': keep_top_k, + 'nms_eta': nms_eta, + 'normalized': normalized + }, + outputs={'Out': output}) + output.stop_gradient = True + + return output diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index beb5e31211c5f9aa6bddfcb1da7e63d6480e99e1..0e4b5aadc0b0d7e87ea1cfb8e18339fe211e1eef 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -932,7 +932,7 @@ def dynamic_gru(input, create ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is initialized with Xavier. Default: None. bias_attr (ParamAttr|bool|None): The parameter attribute for the bias - of GRU. Note that the bias with :math:`(1 \\times 3D)` concatenates + of GRU.Note that the bias with :math:`(1 \\times 3D)` concatenates the bias in the update gate, reset gate and candidate calculations. If it is set to False, no bias will be applied to the update gate, reset gate and candidate calculations. If it is set to None or one @@ -1073,7 +1073,7 @@ def gru_unit(input, create ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is initialized with Xavier. Default: None. bias_attr (ParamAttr|bool|None): The parameter attribute for the bias - of GRU. Note that the bias with :math:`(1 \\times 3D)` concatenates + of GRU.Note that the bias with :math:`(1 \\times 3D)` concatenates the bias in the update gate, reset gate and candidate calculations. If it is set to False, no bias will be applied to the update gate, reset gate and candidate calculations. If it is set to None or one @@ -3877,7 +3877,8 @@ def beam_search(pre_ids, end_id, level=0, is_accumulated=True, - name=None): + name=None, + return_parent_idx=False): """ Beam search is a classical algorithm for selecting candidate words in a machine translation task. @@ -3933,10 +3934,16 @@ def beam_search(pre_ids, accumulated scores. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. + return_parent_idx(bool): Whether to return an extra Tensor variable + preserving the selected_ids' parent indice in pre_ids + in output, which can be used to gather cell states at + the next time step. Returns: - Variable: The LodTensor pair containing the selected ids and the \ - corresponding scores. + Variable: The LodTensor tuple containing the selected ids and the \ + corresponding scores. If :attr:`return_parent_idx` is :attr:`True`, \ + an extra Tensor variable preserving the selected_ids' parent indice \ + is included. Examples: .. code-block:: python @@ -3969,6 +3976,11 @@ def beam_search(pre_ids, selected_scores = helper.create_variable_for_type_inference( dtype=score_type) selected_ids = helper.create_variable_for_type_inference(dtype=id_type) + # parent_idx is a tensor used to gather cell states at the next time + # step. Though lod in selected_ids can also be used to gather by + # sequence_expand, it is not efficient. + # gather_op's index input only supports int32 dtype currently + parent_idx = helper.create_variable_for_type_inference(dtype="int32") helper.append_op( type='beam_search', @@ -3976,6 +3988,7 @@ def beam_search(pre_ids, outputs={ 'selected_ids': selected_ids, 'selected_scores': selected_scores, + 'parent_idx': parent_idx }, attrs={ # TODO(ChunweiYan) to assure other value support @@ -3984,8 +3997,10 @@ def beam_search(pre_ids, 'end_id': end_id, 'is_accumulated': is_accumulated, }) - - return selected_ids, selected_scores + if return_parent_idx: + return selected_ids, selected_scores, parent_idx + else: + return selected_ids, selected_scores def beam_search_decode(ids, scores, beam_size, end_id, name=None): @@ -5403,7 +5418,7 @@ def transpose(x, perm, name=None): Examples: .. code-block:: python - # use append_batch_size=False to avoid prepending extra + # use append_batch_size=False to avoid prepending extra # batch size in shape x = fluid.layers.data(name='x', shape=[5, 10, 15], dtype='float32', append_batch_size=False) @@ -5920,7 +5935,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): than :attr:`shape`. act (str): The non-linear activation to be applied to the reshaped tensor variable. - inplace(bool): Must use :attr:`False` if :attr:`x` is used in multiple + inplace(bool): Must use :attr:`False` if :attr:`x` is used in multiple operators. If this flag is set :attr:`True`, reuse input :attr:`x` to reshape, which will change the shape of tensor variable :attr:`x` and might cause errors when @@ -6581,7 +6596,9 @@ def image_resize(input, scale=None, name=None, resample='BILINEAR', - actual_shape=None): + actual_shape=None, + align_corners=True, + align_mode=1): """ **Resize a Batch of Images** @@ -6594,6 +6611,80 @@ def image_resize(input, 'NEAREST' : Nearest neighbor interpolation + Nearest neighbor interpolation is to perform nearest neighbor interpolation + in both the 3rd dimention(in height direction) and the 4th dimention(in width + direction) on input tensor. + + Bilinear interpolation is an extension of linear interpolation for + interpolating functions of two variables (e.g. H-direction and + W-direction in this op) on a rectilinear 2D grid. The key idea is + to perform linear interpolation first in one direction, and then + again in the other direction. + + Align_corners and align_mode are optinal parameters,the calculation method + of interpolation can be selected by them. + + Example: + + For scale: + + if align_corners = True && out_size > 1 : + + scale_factor = (in_size-1.0)/(out_size-1.0) + + else: + + scale_factor = float(in_size/out_size) + + + Nearest neighbor interpolation: + + if: + align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = \left \lfloor {H_{in} * scale_{}factor}} \right \rfloor + W_out = \left \lfloor {W_{in} * scale_{}factor}} \right \rfloor + + else: + align_corners = True + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = round(H_{in} * scale_{factor}) + W_out = round(W_{in} * scale_{factor}) + + Bilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + For details of nearest neighbor interpolation, please refer to Wikipedia: + https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation. + + For details of bilinear interpolation, please refer to Wikipedia: + https://en.wikipedia.org/wiki/Bilinear_interpolation. + + + Args: input (Variable): The input tensor of image resize layer, This is a 4-D tensor of the shape @@ -6623,6 +6714,13 @@ def image_resize(input, set, otherwise errors would be occured in graph constructing stage. Default: None + align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the + input and output tensors are aligned, preserving the values at the + corner pixels. + Default: True + align_mode(int) : An optional for bilinear interpolation. can be \'0\' + for src_idx = scale*(dst_indx+0.5)-0.5 , can be \'1\' for + src_idx = scale*dst_index . Returns: Variable: The output is a 4-D tensor of the shape @@ -6635,6 +6733,8 @@ def image_resize(input, or 'NEAREST' currently. ValueError: One of out_shape and scale must not be None. ValueError: out_shape length should be 2. + TypeError: align_corners shoule be a bool value + ValueError: align_mode can only be '0' or '1' Examples: .. code-block:: python @@ -6650,6 +6750,12 @@ def image_resize(input, "The 'resample' of image_resize can only be 'BILINEAR' or 'NEAREST' currently." ) resample_type = resample_methods[resample] + + if not isinstance(align_corners, bool): + raise TypeError("Attr align_corners should be a bool value") + if align_mode != 0 and align_mode != 1: + raise ValueError("align_mode can only be 0 or 1") + if out_shape is None and scale is None: raise ValueError("One of out_shape and scale must not be None.") helper = LayerHelper('{}_interp'.format(resample_type), **locals()) @@ -6689,9 +6795,13 @@ def image_resize(input, type='{}_interp'.format(resample_type), inputs=inputs, outputs={"Out": out}, - attrs={"out_h": out_h, - "out_w": out_w, - "interp_method": resample_type}) + attrs={ + "out_h": out_h, + "out_w": out_w, + "interp_method": resample_type, + "align_corners": align_corners, + "align_mode": align_mode + }) return out @@ -6700,7 +6810,9 @@ def resize_bilinear(input, out_shape=None, scale=None, name=None, - actual_shape=None): + actual_shape=None, + align_corners=True, + align_mode=1): """ Resize input by performing bilinear interpolation based on given output shape which specified by actual_shape, out_shape and scale @@ -6715,6 +6827,47 @@ def resize_bilinear(input, For details of bilinear interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Bilinear_interpolation + Align_corners and align_mode are optinal parameters,the calculation + method of interpolation can be selected by them. + + + Align_corners and align_mode are optinal parameters,the calculation method + of interpolation can be selected by them. + + Example: + + For scale: + + if align_corners = True && out_size > 1 : + + scale_factor = (in_size-1.0)/(out_size-1.0) + + else: + + scale_factor = float(in_size/out_size) + + Bilinear interpolation: + + if: + align_corners = False , align_mode = 0 + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = (H_{in}+0.5) * scale_{factor} - 0.5 + W_out = (W_{in}+0.5) * scale_{factor} - 0.5 + + + else: + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = H_{in} * scale_{factor} + W_out = W_{in} * scale_{factor} + + + Args: input(${x_type}): ${x_comment}. @@ -6738,6 +6891,8 @@ def resize_bilinear(input, set, otherwise errors would be occured in graph constructing stage. Default: None + align_corners(bool): ${align_corners_comment} + align_mode(bool): ${align_mode_comment} Returns: ${out_comment}. @@ -6748,7 +6903,8 @@ def resize_bilinear(input, out = fluid.layers.resize_bilinear(input, out_shape=[12, 12]) """ - return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape) + return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape, + align_corners, align_mode) @templatedoc(op_type="nearest_interp") @@ -6756,13 +6912,48 @@ def resize_nearest(input, out_shape=None, scale=None, name=None, - actual_shape=None): + actual_shape=None, + align_corners=True): """ Resize input by performing nearest neighbor interpolation in both the 3rd dimention(in height direction) and the 4th dimention(in width direction) based on given output shape which specified by actual_shape, out_shape and scale in priority order. + Example: + + For scale: + + if align_corners = True && out_size > 1 : + + scale_factor = (in_size-1.0)/(out_size-1.0) + + else: + + scale_factor = float(in_size/out_size) + + + Nearest neighbor interpolation: + + if: + align_corners = False + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = \left \lfloor {H_{in} * scale_{}factor}} \right \rfloor + W_out = \left \lfloor {W_{in} * scale_{}factor}} \right \rfloor + + else: + align_corners = True + + input : (N,C,H_in,W_in) + output: (N,C,H_out,W_out) where: + + H_out = round(H_{in} * scale_{factor}) + W_out = round(W_{in} * scale_{factor}) + + For details of nearest neighbor interpolation, please refer to Wikipedia: https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation @@ -6789,6 +6980,7 @@ def resize_nearest(input, set, otherwise errors would be occured in graph constructing stage. Default: None + align_corners(bool): ${align_corners_comment} Returns: ${out_comment}. @@ -6799,7 +6991,8 @@ def resize_nearest(input, out = fluid.layers.resize_nearest(input, out_shape=[12, 12]) """ - return image_resize(input, out_shape, scale, name, 'NEAREST', actual_shape) + return image_resize(input, out_shape, scale, name, 'NEAREST', actual_shape, + align_corners) def image_resize_short(input, out_short_len, resample='BILINEAR'): diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 6c18af7283e19bd431c8d543255d900dc89cba09..3dcf9dc06998be9c38a48f18075cbf99f3dccb1a 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -135,7 +135,7 @@ def thresholded_relu(x, threshold=None): if val is not None: kwargs[name] = val - _thresholded_relu_(**kwargs) + return _thresholded_relu_(**kwargs) thresholded_relu.__doc__ = _thresholded_relu_.__doc__ + """ diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 14f4276e2f4fc4a24d701ef05c94b88c4f0336da..e0e781a322b3eb68e3f54a66252a8d8b11a9a56f 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -387,7 +387,7 @@ class Optimizer(object): params_grads = [] for param in parameters: - if param.stop_gradient: + if param.stop_gradient or not param.trainable: continue # create gradient variable grad_var = Variable( diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 2d9ed9f9c69a15af454bfec5918fd8bab27d6e4c..3e71584a5d1b1a42b2b916d2df33a55c34557046 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -50,6 +50,19 @@ class TestDetection(unittest.TestCase): self.assertEqual(out.shape[-1], 6) print(str(program)) + def test_box_coder_api(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[4], dtype='float32') + y = layers.data(name='z', shape=[4], dtype='float32', lod_level=1) + bcoder = layers.box_coder( + prior_box=x, + prior_box_var=[0.1, 0.2, 0.1, 0.2], + target_box=y, + code_type='encode_center_size') + self.assertIsNotNone(bcoder) + print(str(program)) + def test_detection_api(self): program = Program() with program_guard(program): @@ -469,5 +482,16 @@ class TestYoloDetection(unittest.TestCase): self.assertIsNotNone(loss) +class TestMulticlassNMS(unittest.TestCase): + def test_multiclass_nms(self): + program = Program() + with program_guard(program): + bboxes = layers.data( + name='bboxes', shape=[-1, 10, 4], dtype='float32') + scores = layers.data(name='scores', shape=[-1, 10], dtype='float32') + output = layers.multiclass_nms(bboxes, scores, 0.3, 400, 200, 0.7) + self.assertIsNotNone(output) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index c23dfa01e76c21d0d162f2fed986e2eaf3a70a6d..699181d01da862dca72113e6c11630ae5693e41c 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -1,15 +1,6 @@ file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") -# The MKLDNN tests are skiped when the MKLDNN flag is OFF -if(NOT WITH_MKLDNN) - foreach(src ${TEST_OPS}) - if(${src} MATCHES ".*_mkldnn_op$") - list(REMOVE_ITEM TEST_OPS ${src}) - endif() - endforeach() -endif(NOT WITH_MKLDNN) - if(NOT WITH_DISTRIBUTE) list(REMOVE_ITEM TEST_OPS test_recv_op) list(REMOVE_ITEM TEST_OPS test_dist_transpiler) @@ -85,6 +76,7 @@ list(REMOVE_ITEM TEST_OPS test_image_classification_resnet) list(REMOVE_ITEM TEST_OPS test_bilinear_interp_op) list(REMOVE_ITEM TEST_OPS test_nearest_interp_op) list(REMOVE_ITEM TEST_OPS test_imperative_resnet) +list(REMOVE_ITEM TEST_OPS test_imperative_optimizer) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) @@ -94,6 +86,8 @@ py_test_modules(test_bilinear_interp_op MODULES test_bilinear_interp_op SERIAL) py_test_modules(test_nearest_interp_op MODULES test_nearest_interp_op SERIAL) py_test_modules(test_imperative_resnet MODULES test_imperative_resnet ENVS FLAGS_cudnn_deterministic=1) +py_test_modules(test_imperative_optimizer MODULES test_imperative_optimizer ENVS + FLAGS_cudnn_deterministic=1) if(WITH_DISTRIBUTE) py_test_modules(test_dist_train MODULES test_dist_train SERIAL) set_tests_properties(test_listen_and_serv_op PROPERTIES TIMEOUT 20) @@ -120,3 +114,7 @@ endif() if (WITH_NGRAPH) add_subdirectory(ngraph) endif() + +if (WITH_MKLDNN) + add_subdirectory(mkldnn) +endif() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/CMakeLists.txt b/python/paddle/fluid/tests/unittests/mkldnn/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..f71e04c09aa38b8cf7b3a167b84d4dc0e6cc3ec7 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mkldnn/CMakeLists.txt @@ -0,0 +1,6 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +foreach(TEST_OP ${TEST_OPS}) + py_test_modules(${TEST_OP} MODULES ${TEST_OP}) +endforeach(TEST_OP) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/__init__.py b/python/paddle/fluid/tests/unittests/mkldnn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b94a21a7e406b833797f8f521c62a2351c2bc30a --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mkldnn/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/python/paddle/fluid/tests/unittests/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py similarity index 94% rename from python/paddle/fluid/tests/unittests/test_activation_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index 611d0dd076b827b0f528f2e3a31182cc4939d1f1..ad94a4b21c347c9a2782437948c20d3b3071c679 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -17,9 +17,9 @@ from __future__ import print_function import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest from scipy.special import expit -from test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs +from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs class TestMKLDNNReluDim2(TestRelu): diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py similarity index 92% rename from python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py index 1286cee8dc1855c1b1695da46ae0b5222c065114..5fce90372d9beda9b04ab68d0a8ac5ef5c124421 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_batch_norm_mkldnn_op.py @@ -19,9 +19,9 @@ import numpy as np import paddle.fluid.core as core from paddle.fluid.op import Operator import paddle.fluid as fluid -from op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.framework import grad_var_name -from test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad +from paddle.fluid.tests.unittests.test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining): diff --git a/python/paddle/fluid/tests/unittests/test_concat_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_mkldnn_op.py similarity index 94% rename from python/paddle/fluid/tests/unittests/test_concat_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_concat_mkldnn_op.py index 0f2130f9049c7ee294444282e59c654551f76603..1a399740692eab8ccea0c984a1a4f2ac984eb045 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_concat_mkldnn_op.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest -from test_concat_op import TestConcatOp, TestConcatOp2, TestConcatOp3 +from paddle.fluid.tests.unittests.test_concat_op import TestConcatOp, TestConcatOp2, TestConcatOp3 class TestMKLDNNConcatOp(TestConcatOp): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py similarity index 98% rename from python/paddle/fluid/tests/unittests/test_conv2d_int8_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py index 5ad376cb08e488e85be6369a91d4e81031e9e9db..100a03cea0f740a615c4a08810d4ad9e8c974d7a 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py @@ -18,8 +18,8 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest -from test_conv2d_op import conv2d_forward_naive, TestConv2dOp +from paddle.fluid.tests.unittests.op_test import OpTest +from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2dOp def conv2d_forward_refer(input, filter, group, conv_param): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py similarity index 91% rename from python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py index 438d45b84033b697c3210acc44392b93bf436df0..0542eef80070cbf281ee013c28b7092a2dd17eaa 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest -from test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride, TestWithGroup, TestWith1x1, TestWithInput1x1Filter1x1 +from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride, TestWithGroup, TestWith1x1, TestWithInput1x1Filter1x1 class TestMKLDNN(TestConv2dOp): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py similarity index 94% rename from python/paddle/fluid/tests/unittests/test_conv2d_transpose_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py index deefdd09abe6b9f9ca362654f21850f598337245..9bcdb7b2a975b648471714ab628caf91b6b6f3a9 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest -from test_conv2d_transpose_op import TestConv2dTransposeOp, TestWithPad, TestWithStride +from paddle.fluid.tests.unittests.test_conv2d_transpose_op import TestConv2dTransposeOp, TestWithPad, TestWithStride class TestMKLDNN(TestConv2dTransposeOp): diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py similarity index 91% rename from python/paddle/fluid/tests/unittests/test_conv3d_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py index f0e1265e142b800587599783367eca2203033bf1..080b74502fbe83e97e88a65866e0d9b66b37033e 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest -from test_conv3d_op import TestConv3dOp, TestCase1, TestWithGroup1, TestWithGroup2, TestWith1x1, TestWithInput1x1Filter1x1 +from paddle.fluid.tests.unittests.test_conv3d_op import TestConv3dOp, TestCase1, TestWithGroup1, TestWithGroup2, TestWith1x1, TestWithInput1x1Filter1x1 class TestMKLDNN(TestConv3dOp): diff --git a/python/paddle/fluid/tests/unittests/test_dequantize_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_dequantize_mkldnn_op.py similarity index 97% rename from python/paddle/fluid/tests/unittests/test_dequantize_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_dequantize_mkldnn_op.py index 0c5e1abd7c8fb010357998c0ceaebaf21619fda9..9a54f927cbde648bbbb06d043bbc1391ee43c314 100644 --- a/python/paddle/fluid/tests/unittests/test_dequantize_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_dequantize_mkldnn_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest class TestDeQuantizeOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py similarity index 97% rename from python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py index d85cc1f856df8eaa73cef318b48a292042488edf..c3a42656b71d09dbc22abf8ce2ddc243b43b422f 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py @@ -16,8 +16,8 @@ from __future__ import print_function import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest -from test_elementwise_add_op import * +from paddle.fluid.tests.unittests.op_test import OpTest +from paddle.fluid.tests.unittests.test_elementwise_add_op import * ''' Some tests differ from the tests defined in test_elementwise_add_op.py because MKLDNN does not support tensors of number of dimensions 3. diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py similarity index 98% rename from python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py index 536e9a1c58ec4a8b1b5a7c1d3a5fe737b38d24ab..738715dd70181988028adff1c50be3a52199c312 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py @@ -15,10 +15,10 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest import paddle.fluid.core as core from paddle.fluid.op import Operator -from test_elementwise_mul_op import * +from paddle.fluid.tests.unittests.test_elementwise_mul_op import * class TestElementwiseMulMKLDNNOp_BroadcastNCHW16c(ElementwiseMulOp): diff --git a/python/paddle/fluid/tests/unittests/test_fc_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_fc_mkldnn_op.py similarity index 98% rename from python/paddle/fluid/tests/unittests/test_fc_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_fc_mkldnn_op.py index 45951a34d6f61a242cb2dc004d6801a6c1c9dd92..84229a5cffbb466ef3c69cd997adacfb21f6aae2 100644 --- a/python/paddle/fluid/tests/unittests/test_fc_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_fc_mkldnn_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest def fully_connected_naive(input, weights, bias_data=None): diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_gaussian_random_mkldnn_op.py similarity index 90% rename from python/paddle/fluid/tests/unittests/test_gaussian_random_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_gaussian_random_mkldnn_op.py index 9777ec390656d3f6166bf9f5de7bbad8b6bd786d..c18bd77bd3e6de08283f3ac3a31c73453f3c9129 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_gaussian_random_mkldnn_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest -from test_gaussian_random_op import TestGaussianRandomOp +from paddle.fluid.tests.unittests.test_gaussian_random_op import TestGaussianRandomOp class TestMKLDNN(TestGaussianRandomOp): diff --git a/python/paddle/fluid/tests/unittests/test_lrn_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_lrn_mkldnn_op.py similarity index 96% rename from python/paddle/fluid/tests/unittests/test_lrn_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_lrn_mkldnn_op.py index f6bb2ab7a696c40cb61dd5b38ca702b577fe7ea2..a5e6e116a5f1bc1e051ce3cfdac8cd1e5f3ed90e 100644 --- a/python/paddle/fluid/tests/unittests/test_lrn_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_lrn_mkldnn_op.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest -from test_lrn_op import TestLRNOp +from paddle.fluid.tests.unittests.test_lrn_op import TestLRNOp class TestLRNMKLDNNOp(TestLRNOp): diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py similarity index 94% rename from python/paddle/fluid/tests/unittests/test_pool2d_int8_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py index f4495d0bc8198189962d033ec18b8b67f1f47c84..fca906fecc5fe8d25b9251c886398f8df778043f 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py @@ -19,8 +19,8 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest -from test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive +from paddle.fluid.tests.unittests.op_test import OpTest +from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive class TestPool2dMKLDNNInt8_Op(TestPool2D_Op): diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py similarity index 90% rename from python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py index 7de5fefc148021d4109da2ac9f4b36c93a05a23f..6de43dd46e5d184ec934f2d85e0c87137e9702e0 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest -from test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 +from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 def create_test_mkldnn_class(parent): diff --git a/python/paddle/fluid/tests/unittests/test_quantize_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_quantize_mkldnn_op.py similarity index 97% rename from python/paddle/fluid/tests/unittests/test_quantize_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_quantize_mkldnn_op.py index 99607928648be437b7f944f86a0c28b99d1775c4..132f7bd039f7797fb0fc332d6f7b8c242af46535 100644 --- a/python/paddle/fluid/tests/unittests/test_quantize_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_quantize_mkldnn_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest class TestQuantizeOp(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py similarity index 92% rename from python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py index 55820f31b81df9f3618d1004f6d21565564efa29..5928047b5171bcf33b024040ce79577b8aa0b53a 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_sum_mkldnn_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest -from test_sum_op import TestSumOp +from paddle.fluid.tests.unittests.test_sum_op import TestSumOp class TestMKLDNN(TestSumOp): diff --git a/python/paddle/fluid/tests/unittests/test_transpose_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py similarity index 95% rename from python/paddle/fluid/tests/unittests/test_transpose_mkldnn_op.py rename to python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py index 0c201b9e4f48df94924a248d820ae2cf73367560..4845eefe367f1ad6a2eb6ffd1f9b0598b1b4fbbd 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest -from test_transpose_op import TestTransposeOp +from paddle.fluid.tests.unittests.test_transpose_op import TestTransposeOp class TestTransposeMKLDNN(TestTransposeOp): diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py new file mode 100644 index 0000000000000000000000000000000000000000..95e592e8ec036ad231ed57ddbc706683cb7aa153 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py @@ -0,0 +1,51 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +from paddle.fluid.tests.unittests.test_pool2d_op import * + + +class TestNGRAPHPool2D_Op(TestPool2D_Op): + def init_test_case(self): + super(TestNGRAPHPool2D_Op, self).init_test_case() + + +class TestNGRAPHCase1(TestCase1): + def init_test_case(self): + super(TestNGRAPHCase1, self).init_test_case() + + +class TestNGRAPHCase2(TestCase2): + def init_test_case(self): + super(TestNGRAPHCase2, self).init_test_case() + + +class TestNGRAPHCase3(TestCase3): + def init_pool_type(self): + super(TestNGRAPHCase3, self).init_pool_type() + + +class TestNGRAPHCase4(TestCase4): + def init_pool_type(self): + super(TestNGRAPHCase4, self).init_pool_type() + + +class TestNGRAPHCase5(TestCase5): + def init_pool_type(self): + super(TestNGRAPHCase5, self).init_pool_type() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_op.py index c28dda4b53ce5d394ff11222e5df8d257b4e80da..1d9f4b78f30fefa21c189036c3731e0afe39ea9e 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_op.py @@ -38,6 +38,7 @@ class BeamSearchOpTester(unittest.TestCase): self._create_pre_ids() self.scope.var('selected_ids') self.scope.var('selected_scores') + self.scope.var('parent_idx') def test_run(self): op = Operator( @@ -48,12 +49,14 @@ class BeamSearchOpTester(unittest.TestCase): scores='scores', selected_ids='selected_ids', selected_scores='selected_scores', + parent_idx='parent_idx', level=0, beam_size=2, end_id=0, ) op.run(self.scope, core.CPUPlace()) selected_ids = self.scope.find_var("selected_ids").get_tensor() selected_scores = self.scope.find_var("selected_scores").get_tensor() + parent_idx = self.scope.find_var("parent_idx").get_tensor() self.assertTrue( np.allclose( np.array(selected_ids), np.array([4, 2, 3, 8])[:, np.newaxis])) @@ -62,6 +65,8 @@ class BeamSearchOpTester(unittest.TestCase): np.array(selected_scores), np.array([0.5, 0.6, 0.9, 0.7])[:, np.newaxis])) self.assertEqual(selected_ids.lod(), [[0, 2, 4], [0, 1, 2, 3, 4]]) + self.assertTrue( + np.allclose(np.array(parent_idx), np.array([0, 1, 2, 3]))) def _create_pre_ids(self): np_data = np.array([[1, 2, 3, 4]], dtype='int64') diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py index c8a7063dc1cd3e5cc7cd3458b51f5e74981aa75c..f60ed1d79ae5778f751d6101fde386ae3a90c0f7 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -20,7 +20,13 @@ from op_test import OpTest import paddle.fluid.core as core -def bilinear_interp_np(input, out_h, out_w, out_size=None, actual_shape=None): +def bilinear_interp_np(input, + out_h, + out_w, + out_size=None, + actual_shape=None, + align_corners=True, + align_mode=0): """bilinear interpolation implement in shape [N, C, H, W]""" if out_size is not None: out_h = out_size[0] @@ -29,25 +35,45 @@ def bilinear_interp_np(input, out_h, out_w, out_size=None, actual_shape=None): out_h = actual_shape[0] out_w = actual_shape[1] batch_size, channel, in_h, in_w = input.shape + + ratio_h = ratio_w = 0.0 if out_h > 1: - ratio_h = (in_h - 1.0) / (out_h - 1.0) - else: - ratio_h = 0.0 + if (align_corners): + ratio_h = (in_h - 1.0) / (out_h - 1.0) + else: + ratio_h = 1.0 * in_h / out_h if out_w > 1: - ratio_w = (in_w - 1.0) / (out_w - 1.0) - else: - ratio_w = 0.0 + if (align_corners): + ratio_w = (in_w - 1.0) / (out_w - 1.0) + else: + ratio_w = 1.0 * in_w / out_w out = np.zeros((batch_size, channel, out_h, out_w)) + for i in range(out_h): - h = int(ratio_h * i) + if (align_mode == 0 and not align_corners): + h = int(ratio_h * (i + 0.5) - 0.5) + else: + h = int(ratio_h * i) + + h = max(0, h) hid = 1 if h < in_h - 1 else 0 - h1lambda = ratio_h * i - h + if (align_mode == 0 and not align_corners): + h1lambda = ratio_h * (i + 0.5) - 0.5 - h + else: + h1lambda = ratio_h * i - h h2lambda = 1.0 - h1lambda for j in range(out_w): - w = int(ratio_w * j) + if (align_mode == 0 and not align_corners): + w = int(ratio_w * (j + 0.5) - 0.5) + else: + w = int(ratio_w * j) + w = max(0, w) wid = 1 if w < in_w - 1 else 0 - w1lambda = ratio_w * j - w + if (align_mode == 0 and not align_corners): + w1lambda = ratio_w * (j + 0.5) - 0.5 - w + else: + w1lambda = ratio_w * j - w w2lambda = 1.0 - w1lambda out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] + @@ -66,7 +92,8 @@ class TestBilinearInterpOp(OpTest): input_np = np.random.random(self.input_shape).astype("float32") output_np = bilinear_interp_np(input_np, self.out_h, self.out_w, - self.out_size, self.actual_shape) + self.out_size, self.actual_shape, + self.align_corners, self.align_mode) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -75,7 +102,9 @@ class TestBilinearInterpOp(OpTest): self.attrs = { 'out_h': self.out_h, 'out_w': self.out_w, - 'interp_method': self.interp_method + 'interp_method': self.interp_method, + 'align_corners': self.align_corners, + 'align_mode': self.align_mode } self.outputs = {'Out': output_np} @@ -91,6 +120,8 @@ class TestBilinearInterpOp(OpTest): self.out_h = 2 self.out_w = 2 self.out_size = np.array([3, 3]).astype("int32") + self.align_corners = True + self.align_mode = 1 class TestBilinearInterpCase1(TestBilinearInterpOp): @@ -99,6 +130,8 @@ class TestBilinearInterpCase1(TestBilinearInterpOp): self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 + self.align_corners = True + self.align_mode = 1 class TestBilinearInterpCase2(TestBilinearInterpOp): @@ -107,6 +140,8 @@ class TestBilinearInterpCase2(TestBilinearInterpOp): self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 + self.align_corners = True + self.align_mode = 1 class TestBilinearInterpCase3(TestBilinearInterpOp): @@ -115,6 +150,8 @@ class TestBilinearInterpCase3(TestBilinearInterpOp): self.input_shape = [1, 1, 128, 64] self.out_h = 64 self.out_w = 128 + self.align_corners = True + self.align_mode = 1 class TestBilinearInterpCase4(TestBilinearInterpOp): @@ -124,6 +161,8 @@ class TestBilinearInterpCase4(TestBilinearInterpOp): self.out_h = 1 self.out_w = 1 self.out_size = np.array([2, 2]).astype("int32") + self.align_corners = True + self.align_mode = 1 class TestBilinearInterpCase5(TestBilinearInterpOp): @@ -133,6 +172,8 @@ class TestBilinearInterpCase5(TestBilinearInterpOp): self.out_h = 12 self.out_w = 12 self.out_size = np.array([11, 11]).astype("int32") + self.align_corners = True + self.align_mode = 1 class TestBilinearInterpCase6(TestBilinearInterpOp): @@ -142,6 +183,8 @@ class TestBilinearInterpCase6(TestBilinearInterpOp): self.out_h = 64 self.out_w = 128 self.out_size = np.array([65, 129]).astype("int32") + self.align_corners = True + self.align_mode = 1 class TestBilinearInterpActualShape(TestBilinearInterpOp): @@ -151,6 +194,8 @@ class TestBilinearInterpActualShape(TestBilinearInterpOp): self.out_h = 64 self.out_w = 32 self.out_size = np.array([66, 40]).astype("int32") + self.align_corners = True + self.align_mode = 1 class TestBilinearInterpOpUint8(OpTest): @@ -162,14 +207,17 @@ class TestBilinearInterpOpUint8(OpTest): input_np = np.random.randint( low=0, high=256, size=self.input_shape).astype("uint8") output_np = bilinear_interp_np(input_np, self.out_h, self.out_w, - self.out_size, self.actual_shape) + self.out_size, self.actual_shape, + self.align_corners, self.align_mode) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size self.attrs = { 'out_h': self.out_h, 'out_w': self.out_w, - 'interp_method': self.interp_method + 'interp_method': self.interp_method, + 'align_corners': self.align_corners, + 'align_mode': self.align_mode } self.outputs = {'Out': output_np} @@ -181,6 +229,8 @@ class TestBilinearInterpOpUint8(OpTest): self.input_shape = [1, 3, 9, 6] self.out_h = 10 self.out_w = 9 + self.align_corners = True + self.align_mode = 1 class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8): @@ -189,6 +239,8 @@ class TestBilinearInterpCase1Uint8(TestBilinearInterpOpUint8): self.input_shape = [2, 3, 128, 64] self.out_h = 120 self.out_w = 50 + self.align_corners = True + self.align_mode = 1 class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8): @@ -198,6 +250,26 @@ class TestBilinearInterpCase2Uint8(TestBilinearInterpOpUint8): self.out_h = 5 self.out_w = 13 self.out_size = np.array([6, 15]).astype("int32") + self.align_corners = True + self.align_mode = 1 + + +class TestBilinearInterpOtherMethod1(TestBilinearInterpOp): + def set_align_mode(self): + self.align_corners = False + self.align_mode = 1 + + +class TestBilinearInterpWithMethod2(TestBilinearInterpOp): + def set_align_mode(self): + self.align_corners = False + self.align_mode = 0 + + +class TestBilinearInterpWithMethod3(TestBilinearInterpOp): + def set_align_mode(self): + self.align_corners = True + self.align_mode = 0 if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_box_coder_op.py b/python/paddle/fluid/tests/unittests/test_box_coder_op.py index 2511c5c22e012babdeb71a71d3546456ea2ceaf3..6156268bf25ada310a3d22242ecff4b9cdf1759a 100644 --- a/python/paddle/fluid/tests/unittests/test_box_coder_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_coder_op.py @@ -21,80 +21,80 @@ import math from op_test import OpTest -def box_coder(target_box, prior_box, prior_box_var, output_box, code_type, - box_normalized): - prior_box_x = ( - (prior_box[:, 2] + prior_box[:, 0]) / 2).reshape(1, prior_box.shape[0]) - prior_box_y = ( - (prior_box[:, 3] + prior_box[:, 1]) / 2).reshape(1, prior_box.shape[0]) - prior_box_width = ( - (prior_box[:, 2] - prior_box[:, 0])).reshape(1, prior_box.shape[0]) - prior_box_height = ( - (prior_box[:, 3] - prior_box[:, 1])).reshape(1, prior_box.shape[0]) - prior_box_var = prior_box_var.reshape(1, prior_box_var.shape[0], - prior_box_var.shape[1]) - if not box_normalized: - prior_box_height = prior_box_height + 1 - prior_box_width = prior_box_width + 1 - - if (code_type == "EncodeCenterSize"): - target_box_x = ((target_box[:, 2] + target_box[:, 0]) / 2).reshape( - target_box.shape[0], 1) - target_box_y = ((target_box[:, 3] + target_box[:, 1]) / 2).reshape( - target_box.shape[0], 1) - target_box_width = ((target_box[:, 2] - target_box[:, 0])).reshape( - target_box.shape[0], 1) - target_box_height = ((target_box[:, 3] - target_box[:, 1])).reshape( - target_box.shape[0], 1) - if not box_normalized: - target_box_height = target_box_height + 1 - target_box_width = target_box_width + 1 - - output_box[:,:,0] = (target_box_x - prior_box_x) / prior_box_width / \ - prior_box_var[:,:,0] - output_box[:,:,1] = (target_box_y - prior_box_y) / prior_box_height / \ - prior_box_var[:,:,1] - output_box[:,:,2] = np.log(np.fabs(target_box_width / prior_box_width)) / \ - prior_box_var[:,:,2] - output_box[:,:,3] = np.log(np.fabs(target_box_height / prior_box_height)) / \ - prior_box_var[:,:,3] - - elif (code_type == "DecodeCenterSize"): - target_box_x = prior_box_var[:,:,0] * target_box[:,:,0] * \ - prior_box_width + prior_box_x - target_box_y = prior_box_var[:,:,1] * target_box[:,:,1] * \ - prior_box_height + prior_box_y - target_box_width = np.exp(prior_box_var[:,:,2] * target_box[:,:,2]) * \ - prior_box_width - target_box_height = np.exp(prior_box_var[:,:,3] * target_box[:,:,3]) * \ - prior_box_height - - output_box[:, :, 0] = target_box_x - target_box_width / 2 - output_box[:, :, 1] = target_box_y - target_box_height / 2 - output_box[:, :, 2] = target_box_x + target_box_width / 2 - output_box[:, :, 3] = target_box_y + target_box_height / 2 - if not box_normalized: - output_box[:, :, 2] = output_box[:, :, 2] - 1 - output_box[:, :, 3] = output_box[:, :, 3] - 1 - - -def batch_box_coder(prior_box, prior_box_var, target_box, lod, code_type, - box_normalized): - n = target_box.shape[0] - m = prior_box.shape[0] +def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0): + pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False) + pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False) + pb_x = pb_w * 0.5 + p_box[:, 0] + pb_y = pb_h * 0.5 + p_box[:, 1] + shape = (1, p_box.shape[0]) if axis == 0 else (p_box.shape[0], 1) + + pb_w = pb_w.reshape(shape) + pb_h = pb_h.reshape(shape) + pb_x = pb_x.reshape(shape) + pb_y = pb_y.reshape(shape) + + if pb_v.ndim == 2: + pb_v = pb_v.reshape(1, pb_v.shape[0], pb_v.shape[1]) + if pb_v.ndim == 1: + tb_x = pb_v[0] * t_box[:, :, 0] * pb_w + pb_x + tb_y = pb_v[1] * t_box[:, :, 1] * pb_h + pb_y + tb_w = np.exp(pb_v[2] * t_box[:, :, 2]) * pb_w + tb_h = np.exp(pb_v[3] * t_box[:, :, 3]) * pb_h + else: + tb_x = pb_v[:, :, 0] * t_box[:, :, 0] * pb_w + pb_x + tb_y = pb_v[:, :, 1] * t_box[:, :, 1] * pb_h + pb_y + tb_w = np.exp(pb_v[:, :, 2] * t_box[:, :, 2]) * pb_w + tb_h = np.exp(pb_v[:, :, 3] * t_box[:, :, 3]) * pb_h + output_box[:, :, 0] = tb_x - tb_w / 2 + output_box[:, :, 1] = tb_y - tb_h / 2 + output_box[:, :, 2] = tb_x + tb_w / 2 - (not norm) + output_box[:, :, 3] = tb_y + tb_h / 2 - (not norm) + + +def box_encoder(t_box, p_box, pb_v, output_box, norm): + pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False) + pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False) + pb_x = pb_w * 0.5 + p_box[:, 0] + pb_y = pb_h * 0.5 + p_box[:, 1] + shape = (1, p_box.shape[0]) + + pb_w = pb_w.reshape(shape) + pb_h = pb_h.reshape(shape) + pb_x = pb_x.reshape(shape) + pb_y = pb_y.reshape(shape) + + if pb_v.ndim == 2: + pb_v = pb_v.reshape(1, pb_v.shape[0], pb_v.shape[1]) + tb_x = ((t_box[:, 2] + t_box[:, 0]) / 2).reshape(t_box.shape[0], 1) + tb_y = ((t_box[:, 3] + t_box[:, 1]) / 2).reshape(t_box.shape[0], 1) + tb_w = (t_box[:, 2] - t_box[:, 0]).reshape(t_box.shape[0], 1) + (not norm) + tb_h = (t_box[:, 3] - t_box[:, 1]).reshape(t_box.shape[0], 1) + (not norm) + if pb_v.ndim == 1: + output_box[:, :, 0] = (tb_x - pb_x) / pb_w / pb_v[0] + output_box[:, :, 1] = (tb_y - pb_y) / pb_h / pb_v[1] + output_box[:, :, 2] = np.log(np.fabs(tb_w / pb_w)) / pb_v[2] + output_box[:, :, 3] = np.log(np.fabs(tb_h / pb_h)) / pb_v[3] + else: + output_box[:, :, 0] = (tb_x - pb_x) / pb_w / pb_v[:, :, 0] + output_box[:, :, 1] = (tb_y - pb_y) / pb_h / pb_v[:, :, 1] + output_box[:, :, 2] = np.log(np.fabs(tb_w / pb_w)) / pb_v[:, :, 2] + output_box[:, :, 3] = np.log(np.fabs(tb_h / pb_h)) / pb_v[:, :, 3] + + +def batch_box_coder(p_box, pb_v, t_box, lod, code_type, norm, axis=0): + n = t_box.shape[0] + m = p_box.shape[0] + if code_type == "DecodeCenterSize": + m = t_box.shape[1] output_box = np.zeros((n, m, 4), dtype=np.float32) cur_offset = 0 for i in range(len(lod)): if (code_type == "EncodeCenterSize"): - box_coder(target_box[cur_offset:(cur_offset + lod[i]), :], - prior_box, prior_box_var, - output_box[cur_offset:(cur_offset + lod[i]), :, :], - code_type, box_normalized) + box_encoder(t_box[cur_offset:(cur_offset + lod[i]), :], p_box, pb_v, + output_box[cur_offset:(cur_offset + lod[i]), :, :], + norm) elif (code_type == "DecodeCenterSize"): - box_coder(target_box[cur_offset:(cur_offset + lod[i]), :, :], - prior_box, prior_box_var, - output_box[cur_offset:(cur_offset + lod[i]), :, :], - code_type, box_normalized) + box_decoder(t_box, p_box, pb_v, output_box, norm, axis) cur_offset += lod[i] return output_box @@ -106,9 +106,35 @@ class TestBoxCoderOp(OpTest): def setUp(self): self.op_type = "box_coder" lod = [[1, 1, 1, 1, 1]] - prior_box = np.random.random((10, 4)).astype('float32') - prior_box_var = np.random.random((10, 4)).astype('float32') - target_box = np.random.random((5, 10, 4)).astype('float32') + prior_box = np.random.random((81, 4)).astype('float32') + prior_box_var = np.random.random((81, 4)).astype('float32') + target_box = np.random.random((20, 81, 4)).astype('float32') + code_type = "DecodeCenterSize" + box_normalized = False + output_box = batch_box_coder(prior_box, prior_box_var, target_box, + lod[0], code_type, box_normalized) + self.inputs = { + 'PriorBox': prior_box, + 'PriorBoxVar': prior_box_var, + 'TargetBox': target_box, + } + self.attrs = { + 'code_type': 'decode_center_size', + 'box_normalized': False + } + self.outputs = {'OutputBox': output_box} + + +class TestBoxCoderOpWithOneRankVar(OpTest): + def test_check_output(self): + self.check_output() + + def setUp(self): + self.op_type = "box_coder" + lod = [[1, 1, 1, 1, 1]] + prior_box = np.random.random((81, 4)).astype('float32') + prior_box_var = np.random.random((4)).astype('float32') + target_box = np.random.random((20, 81, 4)).astype('float32') code_type = "DecodeCenterSize" box_normalized = False output_box = batch_box_coder(prior_box, prior_box_var, target_box, @@ -133,9 +159,9 @@ class TestBoxCoderOpWithoutBoxVar(OpTest): def setUp(self): self.op_type = "box_coder" lod = [[0, 1, 2, 3, 4, 5]] - prior_box = np.random.random((10, 4)).astype('float32') - prior_box_var = np.ones((10, 4)).astype('float32') - target_box = np.random.random((5, 10, 4)).astype('float32') + prior_box = np.random.random((81, 4)).astype('float32') + prior_box_var = np.ones((81, 4)).astype('float32') + target_box = np.random.random((20, 81, 4)).astype('float32') code_type = "DecodeCenterSize" box_normalized = False output_box = batch_box_coder(prior_box, prior_box_var, target_box, @@ -158,10 +184,10 @@ class TestBoxCoderOpWithLoD(OpTest): def setUp(self): self.op_type = "box_coder" - lod = [[4, 8, 8]] - prior_box = np.random.random((10, 4)).astype('float32') - prior_box_var = np.random.random((10, 4)).astype('float32') - target_box = np.random.random((20, 4)).astype('float32') + lod = [[10, 20, 20]] + prior_box = np.random.random((20, 4)).astype('float32') + prior_box_var = np.random.random((20, 4)).astype('float32') + target_box = np.random.random((50, 4)).astype('float32') code_type = "EncodeCenterSize" box_normalized = True output_box = batch_box_coder(prior_box, prior_box_var, target_box, @@ -176,5 +202,63 @@ class TestBoxCoderOpWithLoD(OpTest): self.outputs = {'OutputBox': output_box} +class TestBoxCoderOpWithAxis(OpTest): + def test_check_output(self): + self.check_output() + + def setUp(self): + self.op_type = "box_coder" + lod = [[1, 1, 1, 1, 1]] + prior_box = np.random.random((30, 4)).astype('float32') + prior_box_var = np.random.random((4)).astype('float32') + target_box = np.random.random((30, 81, 4)).astype('float32') + code_type = "DecodeCenterSize" + box_normalized = False + axis = 1 + output_box = batch_box_coder(prior_box, prior_box_var, target_box, + lod[0], code_type, box_normalized, axis) + + self.inputs = { + 'PriorBox': prior_box, + 'PriorBoxVar': prior_box_var, + 'TargetBox': target_box, + } + self.attrs = { + 'code_type': 'decode_center_size', + 'box_normalized': False, + 'axis': axis + } + self.outputs = {'OutputBox': output_box} + + +class TestBoxCoderOpWithVariance(OpTest): + def test_check_output(self): + self.check_output() + + def setUp(self): + self.op_type = "box_coder" + lod = [[1, 1, 1, 1, 1]] + prior_box = np.random.random((30, 4)).astype('float32') + prior_box_var = np.random.random((4)).astype('float32') + target_box = np.random.random((30, 81, 4)).astype('float32') + code_type = "DecodeCenterSize" + box_normalized = False + axis = 1 + output_box = batch_box_coder(prior_box, prior_box_var, target_box, + lod[0], code_type, box_normalized, axis) + + self.inputs = { + 'PriorBox': prior_box, + 'TargetBox': target_box, + } + self.attrs = { + 'code_type': 'decode_center_size', + 'box_normalized': False, + 'variance': prior_box_var.astype(np.float).flatten(), + 'axis': axis + } + self.outputs = {'OutputBox': output_box} + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index 7533ab9fdbff3d3f44b4eef88b7c805ff51cae95..baaddf9f2e5b123300f1d083b33ea644665348fd 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -66,7 +66,141 @@ class MLP(fluid.imperative.Layer): return x +class SimpleRNNCell(fluid.imperative.Layer): + def __init__(self, step_input_size, hidden_size, output_size, param_attr): + super(SimpleRNNCell, self).__init__() + self.step_input_size = step_input_size + self.hidden_size = hidden_size + self.output_size = output_size + self._dype = core.VarDesc.VarType.FP32 + from paddle.fluid.layer_helper import LayerHelper + self._helper = LayerHelper( + 'SimpleRNNCell', act="tanh", param_attr=param_attr) + + def _build_once(self, inputs, pre_hidden): + i2h_param_shape = [self.step_input_size, self.hidden_size] + h2h_param_shape = [self.hidden_size, self.hidden_size] + h2o_param_shape = [self.output_size, self.hidden_size] + self._i2h_w = self._helper.create_parameter( + attr=self._helper.param_attr, + shape=i2h_param_shape, + dtype=self._dtype, + is_bias=False) + self._h2h_w = self._helper.create_parameter( + attr=self._helper.param_attr, + shape=h2h_param_shape, + dtype=self._dtype, + is_bias=False) + self._h2o_w = self._helper.create_parameter( + attr=self._helper.param_attr, + shape=h2o_param_shape, + dtype=self._dtype, + is_bias=False) + + def forward(self, input, pre_hidden): + + tmp_i2h = self._helper.create_variable_for_type_inference(self._dtype) + tmp_h2h = self._helper.create_variable_for_type_inference(self._dtype) + hidden = self._helper.create_variable_for_type_inference(self._dype) + out = self._helper.create_variable_for_type_inference(self._dype) + softmax_out = self._helper.create_variable_for_type_inference( + self._dtype) + reduce_out = self._helper.create_variable_for_type_inference( + self._dtype) + self._helper.append_op( + type="mul", + inputs={"X": input, + "Y": self._i2h_w}, + outputs={"Out": tmp_i2h}, + attrs={"x_num_col_dims": 1, + "y_num_col_dims": 1}) + + self._helper.append_op( + type="mul", + inputs={"X": pre_hidden, + "Y": self._h2h_w}, + outputs={"Out": tmp_h2h}, + attrs={"x_num_col_dims": 1, + "y_num_col_dims": 1}) + + self._helper.append_op( + type="elementwise_add", + inputs={'X': tmp_h2h, + 'Y': tmp_i2h}, + outputs={'Out': hidden}, + attrs={'axis': -1, + 'use_mkldnn': False}) + hidden = self._helper.append_activation(hidden) + + self._helper.append_op( + type="mul", + inputs={"X": hidden, + "Y": self._h2o_w}, + outputs={"Out": out}, + attrs={"x_num_col_dims": 1, + "y_num_col_dims": 1}) + + self._helper.append_op( + type="softmax", + inputs={"X": out}, + outputs={"Out": softmax_out}, + attrs={"use_cudnn": False}) + + self._helper.append_op( + type='reduce_sum', + inputs={'X': softmax_out}, + outputs={'Out': reduce_out}, + attrs={'dim': None, + 'keep_dim': False, + 'reduce_all': True}) + + return reduce_out, hidden + + +class SimpleRNN(fluid.imperative.Layer): + def __init__(self): + super(SimpleRNN, self).__init__() + self.seq_len = 4 + self._cell = SimpleRNNCell( + 3, + 3, + 3, + fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1))) + + def forward(self, inputs): + outs = list() + pre_hiddens = list() + + init_hidden = fluid.layers.tensor.create_parameter( + attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1)), + shape=[1, 3], + dtype='float32', + is_bias=False) + pre_hidden = init_hidden + for i in range(self.seq_len): + input = fluid.layers.slice( + inputs, axes=[1], starts=[i], ends=[i + 1]) + input = fluid.layers.reshape(input, shape=[1, 3]) + out_softmax, pre_hidden = self._cell(input, pre_hidden) + outs.append(out_softmax) + + return outs, pre_hiddens + + class TestImperative(unittest.TestCase): + def test_sum_op(self): + x = np.ones([2, 2], np.float32) + with fluid.imperative.guard(): + inputs = [] + for _ in range(10): + inputs.append(fluid.imperative.base.to_variable(x)) + ret = fluid.layers.sums(inputs) + loss = fluid.layers.reduce_sum(ret) + loss._backward() + self.assertTrue(np.allclose(ret._numpy(), x * 10)) + self.assertTrue(np.allclose(inputs[0]._gradient(), x)) + def test_layer(self): with fluid.imperative.guard(): cl = core.Layer() @@ -199,6 +333,41 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.allclose(dy_out, static_out)) self.assertTrue(np.allclose(dy_grad, static_grad)) + def test_rnn(self): + np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], + [10.0, 11.0, 12.0]]) + np_inp = np_inp.reshape((1, 4, 3)) + np_inp = np_inp.astype(np.float32) + with fluid.imperative.guard(): + var_inp = fluid.imperative.base.to_variable(np_inp) + var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) + simple_rnn = SimpleRNN() + outs, pre_hiddens = simple_rnn.forward(var_inp) + dy_out = outs[3]._numpy() + outs[3]._backward() + dy_grad_h2o = simple_rnn._cell._h2o_w._gradient() + dy_grad_h2h = simple_rnn._cell._h2h_w._gradient() + dy_grad_i2h = simple_rnn._cell._i2h_w._gradient() + + with new_program_scope(): + inp = fluid.layers.data( + name="inp", shape=[1, 4, 3], append_batch_size=False) + simple_rnn = SimpleRNN() + outs, pre_hiddens = simple_rnn(inp) + param_grads = fluid.backward.append_backward(outs[3]) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run( + feed={inp.name: np_inp}, + fetch_list=[ + outs[3].name, param_grads[0][1].name, + param_grads[1][1].name, param_grads[2][1].name + ]) + self.assertTrue(np.allclose(dy_out, static_out)) + self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o)) + self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h)) + self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index d0a5a883174cb33a035b344f9489b2ba02ba99f1..08b155acc657c3a4a73f5b1d72ac356fc7e83a58 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -82,13 +82,14 @@ class MNIST(fluid.imperative.Layer): self._simple_img_conv_pool_2 = SimpleImgConvPool( 20, 50, 5, 2, 2, act="relu") - pool_2_shape = 50 * 8 * 8 + pool_2_shape = 50 * 4 * 4 SIZE = 10 scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5 self._fc = FC(10, param_attr=fluid.param_attr.ParamAttr( initializer=fluid.initializer.NormalInitializer( - loc=0.0, scale=scale))) + loc=0.0, scale=scale)), + act="softmax") def forward(self, inputs): x = self._simple_img_conv_pool_1(inputs) @@ -98,9 +99,9 @@ class MNIST(fluid.imperative.Layer): class TestImperativeMnist(unittest.TestCase): - def test_mnist_cpu_float32(self): + def test_mnist_float32(self): seed = 90 - + batch_num = 2 with fluid.imperative.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed @@ -112,15 +113,15 @@ class TestImperativeMnist(unittest.TestCase): dy_param_init_value = {} for batch_id, data in enumerate(train_reader()): - if batch_id >= 2: + if batch_id >= batch_num: break - x_data = np.array( + dy_x_data = np.array( [x[0].reshape(1, 28, 28) for x in data]).astype('float32') y_data = np.array([x[1] for x in data]).astype('int64').reshape( 128, 1) - img = to_variable(x_data) + img = to_variable(dy_x_data) label = to_variable(y_data) label._stop_gradient = True @@ -136,6 +137,7 @@ class TestImperativeMnist(unittest.TestCase): avg_loss._backward() sgd.minimize(avg_loss) + mnist.clear_gradients() dy_param_value = {} for param in fluid.default_main_program().global_block( ).all_parameters(): @@ -175,10 +177,10 @@ class TestImperativeMnist(unittest.TestCase): static_param_init_value[static_param_name_list[i]] = out[i] for batch_id, data in enumerate(train_reader()): - if batch_id >= 2: + if batch_id >= batch_num: break - x_data = np.array( + static_x_data = np.array( [x[0].reshape(1, 28, 28) for x in data]).astype('float32') y_data = np.array([x[1] for x in data]).astype('int64').reshape( [128, 1]) @@ -186,7 +188,7 @@ class TestImperativeMnist(unittest.TestCase): fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) out = exe.run(fluid.default_main_program(), - feed={"pixel": x_data, + feed={"pixel": static_x_data, "label": y_data}, fetch_list=fetch_list) @@ -196,11 +198,12 @@ class TestImperativeMnist(unittest.TestCase): static_param_value[static_param_name_list[i - 1]] = out[i] for key, value in six.iteritems(static_param_init_value): - self.assertTrue( - np.allclose(value.all(), dy_param_init_value[key].all())) - self.assertTrue(np.allclose(static_out.all(), dy_out.all())) + self.assertTrue(np.allclose(value, dy_param_init_value[key])) + + self.assertTrue(np.allclose(static_out, dy_out)) + for key, value in six.iteritems(static_param_value): - self.assertTrue(np.allclose(value.all(), dy_param_value[key].all())) + self.assertTrue(np.allclose(value, dy_param_value[key])) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..afe990e74ff96dfbca4f335b561f9bbe7d295246 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -0,0 +1,350 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import paddle.fluid as fluid +from paddle.fluid.imperative.nn import Embedding +import paddle.fluid.framework as framework +from paddle.fluid.optimizer import SGDOptimizer +from paddle.fluid.imperative.base import to_variable +from test_imperative_base import new_program_scope +import numpy as np +import six +from paddle.fluid.backward import append_backward + + +class SimpleLSTMRNN(fluid.imperative.Layer): + def __init__(self, + hidden_size, + num_steps, + num_layers=2, + init_scale=0.1, + dropout=None): + super(SimpleLSTMRNN, self).__init__() + self._hidden_size = hidden_size + self._num_layers = num_layers + self._init_scale = init_scale + self._dropout = dropout + self._input = None + self._num_steps = num_steps + + def _build_once(self, input_embedding, init_hidden=None, init_cell=None): + self.weight_1_arr = [] + self.weight_2_arr = [] + self.bias_arr = [] + self.hidden_array = [] + self.cell_array = [] + self.mask_array = [] + + for i in range(self._num_layers): + weight_1 = fluid.layers.create_parameter( + shape=[self._hidden_size * 2, self._hidden_size * 4], + dtype="float32", + name="fc_weight1_" + str(i), + default_initializer=fluid.initializer.UniformInitializer( + low=-self._init_scale, high=self._init_scale)) + self.weight_1_arr.append(weight_1) + bias_1 = fluid.layers.create_parameter( + [self._hidden_size * 4], + dtype="float32", + name="fc_bias1_" + str(i), + default_initializer=fluid.initializer.Constant(0.0)) + self.bias_arr.append(bias_1) + + pre_hidden = fluid.layers.slice( + init_hidden, axes=[0], starts=[i], ends=[i + 1]) + pre_cell = fluid.layers.slice( + init_cell, axes=[0], starts=[i], ends=[i + 1]) + pre_hidden = fluid.layers.reshape( + pre_hidden, shape=[-1, self._hidden_size]) + pre_cell = fluid.layers.reshape( + pre_cell, shape=[-1, self._hidden_size]) + self.hidden_array.append(pre_hidden) + self.cell_array.append(pre_cell) + + def parameters(self): + parameters = list() + for param in self.weight_1_arr: + parameters.append(param) + for param in self.weight_2_arr: + parameters.append(param) + for bias in self.bias_arr: + parameters.append(bias) + return parameters + + def forward(self, input_embedding, init_hidden=None, init_cell=None): + res = [] + for index in range(self._num_steps): + self._input = fluid.layers.slice( + input_embedding, axes=[1], starts=[index], ends=[index + 1]) + self._input = fluid.layers.reshape( + self._input, shape=[-1, self._hidden_size]) + for k in range(self._num_layers): + pre_hidden = self.hidden_array[k] + pre_cell = self.cell_array[k] + weight_1 = self.weight_1_arr[k] + bias = self.bias_arr[k] + + nn = fluid.layers.concat([self._input, pre_hidden], 1) + gate_input = fluid.layers.matmul(x=nn, y=weight_1) + + gate_input = fluid.layers.elementwise_add(gate_input, bias) + i, j, f, o = fluid.layers.split( + gate_input, num_or_sections=4, dim=-1) + c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( + i) * fluid.layers.tanh(j) + m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) + self.hidden_array[k] = m + self.cell_array[k] = c + self._input = m + + if self._dropout is not None and self._dropout > 0.0: + self._input = fluid.layers.dropout( + self._input, + dropout_prob=self._dropout, + dropout_implementation='upscale_in_train') + res.append( + fluid.layers.reshape( + self._input, shape=[1, -1, self._hidden_size])) + real_res = fluid.layers.concat(res, 0) + real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) + last_hidden = fluid.layers.concat(self.hidden_array, 1) + last_hidden = fluid.layers.reshape( + last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) + last_cell = fluid.layers.concat(self.cell_array, 1) + last_cell = fluid.layers.reshape( + last_cell, shape=[-1, self._num_layers, self._hidden_size]) + last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) + return real_res, last_hidden, last_cell + + +class PtbModel(fluid.imperative.Layer): + def __init__(self, + hidden_size, + vocab_size, + num_layers=2, + num_steps=20, + init_scale=0.1, + dropout=None): + super(PtbModel, self).__init__() + self.hidden_size = hidden_size + self.vocab_size = vocab_size + self.init_scale = init_scale + self.num_layers = num_layers + self.num_steps = num_steps + self.dropout = dropout + self.simple_lstm_rnn = SimpleLSTMRNN( + hidden_size, + num_steps, + num_layers=num_layers, + init_scale=init_scale, + dropout=dropout) + self.embedding = Embedding( + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False, + param_attr=fluid.ParamAttr( + name='embedding_para', + initializer=fluid.initializer.UniformInitializer( + low=-init_scale, high=init_scale))) + self.softmax_weight = fluid.layers.create_parameter( + [self.hidden_size, self.vocab_size], + dtype="float32", + name="softmax_weight", + default_initializer=fluid.initializer.UniformInitializer( + low=-self.init_scale, high=self.init_scale)) + self.softmax_bias = fluid.layers.create_parameter( + [self.vocab_size], + dtype="float32", + name='softmax_bias', + default_initializer=fluid.initializer.UniformInitializer( + low=-self.init_scale, high=self.init_scale)) + + def _build_once(self, input, label, init_hidden, init_cell): + pass + + def parameters(self): + parameters = self.simple_lstm_rnn.parameters() + [ + self.softmax_weight, self.softmax_bias + ] + self.embedding.parameters() + return parameters + + def forward(self, input, label, init_hidden, init_cell): + + init_h = fluid.layers.reshape( + init_hidden, shape=[self.num_layers, -1, self.hidden_size]) + + init_c = fluid.layers.reshape( + init_cell, shape=[self.num_layers, -1, self.hidden_size]) + + x_emb = self.embedding(input) + x_emb = fluid.layers.reshape( + x_emb, shape=[-1, self.num_steps, self.hidden_size]) + if self.dropout is not None and self.dropout > 0.0: + x_emb = fluid.layers.dropout( + x_emb, + dropout_prob=self.drop_out, + dropout_implementation='upscale_in_train') + rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h, + init_c) + rnn_out = fluid.layers.reshape( + rnn_out, shape=[-1, self.num_steps, self.hidden_size]) + projection = fluid.layers.matmul(rnn_out, self.softmax_weight) + projection = fluid.layers.elementwise_add(projection, self.softmax_bias) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size]) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size]) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False) + loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) + loss = fluid.layers.reduce_mean(loss, dim=[0]) + loss = fluid.layers.reduce_sum(loss) + loss.permissions = True + + return loss, last_hidden, last_cell + + +class TestImperativePtbRnn(unittest.TestCase): + def test_ptb_rnn_cpu_float32(self): + seed = 90 + hidden_size = 10 + vocab_size = 1000 + num_layers = 1 + num_steps = 3 + init_scale = 0.1 + batch_size = 4 + + with fluid.imperative.guard(): + fluid.default_startup_program().random_seed = seed + fluid.default_main_program().random_seed = seed + # TODO: marsyang1993 Change seed to + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale) + + sgd = SGDOptimizer(learning_rate=1e-3) + dy_param_updated = dict() + dy_param_init = dict() + dy_loss = None + last_hidden = None + last_cell = None + for i in range(2): + x_data = np.arange(12).reshape(4, 3).astype('int64') + y_data = np.arange(1, 13).reshape(4, 3).astype('int64') + x_data = x_data.reshape((-1, num_steps, 1)) + y_data = y_data.reshape((-1, 1)) + init_hidden_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + x = to_variable(x_data) + y = to_variable(y_data) + init_hidden = to_variable(init_hidden_data) + init_cell = to_variable(init_cell_data) + dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden, + init_cell) + if i == 0: + for param in ptb_model.parameters(): + dy_param_init[param.name] = param._numpy() + dy_loss._backward() + sgd.minimize(dy_loss) + for param in ptb_model.parameters(): + dy_param_updated[param.name] = param._numpy() + + with new_program_scope(): + fluid.default_startup_program().random_seed = seed + fluid.default_main_program().random_seed = seed + # TODO: marsyang1993 Change seed to + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale) + + exe = fluid.Executor(fluid.CPUPlace()) + sgd = SGDOptimizer(learning_rate=1e-3) + x = fluid.layers.data(name="x", shape=[-1, 3, 1], dtype='int64') + y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32') + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32') + + static_loss, static_last_hidden, static_last_cell = ptb_model( + x, y, init_hidden, init_cell) + sgd.minimize(static_loss) + static_param_updated = dict() + static_param_init = dict() + static_param_name_list = list() + for param in ptb_model.parameters(): + static_param_name_list.append(param.name) + + out = exe.run(framework.default_startup_program(), + fetch_list=static_param_name_list) + for i in range(len(static_param_name_list)): + static_param_init[static_param_name_list[i]] = out[i] + static_loss_value = None + static_last_cell_value = None + static_last_hidden_value = None + for i in range(2): + x_data = np.arange(12).reshape(4, 3).astype('int64') + y_data = np.arange(1, 13).reshape(4, 3).astype('int64') + x_data = x_data.reshape((-1, num_steps, 1)) + y_data = y_data.reshape((-1, 1)) + init_hidden_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + fetch_list = [static_loss, static_last_hidden, static_last_cell] + fetch_list.extend(static_param_name_list) + out = exe.run(fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data + }, + fetch_list=fetch_list) + static_loss_value = out[0] + static_last_cell_value = out[1] + static_last_hidden_value = out[2] + for k in range(3, len(out)): + static_param_updated[static_param_name_list[k - 3]] = out[k] + + self.assertTrue( + np.allclose(static_loss_value.all(), dy_loss._numpy().all())) + self.assertTrue( + np.allclose(static_last_cell_value.all(), + last_cell._numpy().all())) + self.assertTrue( + np.allclose(static_last_hidden_value.all(), + last_hidden._numpy().all())) + for key, value in six.iteritems(static_param_init): + self.assertTrue( + np.allclose(value.all(), dy_param_init[key].all())) + for key, value in six.iteritems(static_param_updated): + self.assertTrue( + np.allclose(value.all(), dy_param_updated[key].all())) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 87a72dd04e376cf9225e275d862b0cbbb9774e2c..c27fd0b8024a8fa3310a62de34299fb621e2902f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -264,6 +264,7 @@ class TestImperativeResnet(unittest.TestCase): )] = np_array optimizer.minimize(avg_loss) + resnet.clear_gradients() dy_param_value = {} for param in fluid.default_main_program().global_block( diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index c13f03e86f3e375026b04a31d51ac1a5223360ef..e7bc1601a54c8615e0e787d74145aa4987b6cb88 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -58,7 +58,8 @@ class TestBook(unittest.TestCase): def test_simple_conv2d(self): program = Program() with program_guard(program, startup_program=Program()): - images = layers.data(name='pixel', shape=[3, 48, 48], dtype='int32') + images = layers.data( + name='pixel', shape=[3, 48, 48], dtype='float32') layers.conv2d(input=images, num_filters=3, filter_size=[4, 4]) print(str(program)) diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index 9778bd694de4b21f3ff723846c77a8ad0dceb57b..8fc391a1ff2529460b038979c0c7d0a9d905a7e0 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -19,7 +19,7 @@ import copy from op_test import OpTest -def iou(box_a, box_b): +def iou(box_a, box_b, norm): """Apply intersection-over-union overlap between box_a and box_b """ xmin_a = min(box_a[0], box_a[2]) @@ -32,8 +32,10 @@ def iou(box_a, box_b): xmax_b = max(box_b[0], box_b[2]) ymax_b = max(box_b[1], box_b[3]) - area_a = (ymax_a - ymin_a) * (xmax_a - xmin_a) - area_b = (ymax_b - ymin_b) * (xmax_b - xmin_b) + area_a = (ymax_a - ymin_a + (norm == False)) * (xmax_a - xmin_a + + (norm == False)) + area_b = (ymax_b - ymin_b + (norm == False)) * (xmax_b - xmin_b + + (norm == False)) if area_a <= 0 and area_b <= 0: return 0.0 @@ -42,17 +44,21 @@ def iou(box_a, box_b): xb = min(xmax_a, xmax_b) yb = min(ymax_a, ymax_b) - inter_area = max(xb - xa, 0.0) * max(yb - ya, 0.0) - - box_a_area = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]) - box_b_area = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]) + inter_area = max(xb - xa + (norm == False), + 0.0) * max(yb - ya + (norm == False), 0.0) iou_ratio = inter_area / (area_a + area_b - inter_area) return iou_ratio -def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0): +def nms(boxes, + scores, + score_threshold, + nms_threshold, + top_k=200, + normalized=True, + eta=1.0): """Apply non-maximum suppression at test time to avoid detecting too many overlapping bounding boxes for a given object. Args: @@ -87,7 +93,7 @@ def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0): for k in range(len(selected_indices)): if keep: kept_idx = selected_indices[k] - overlap = iou(boxes[idx], boxes[kept_idx]) + overlap = iou(boxes[idx], boxes[kept_idx], normalized) keep = True if overlap <= adaptive_threshold else False else: break @@ -99,16 +105,24 @@ def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0): def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, - nms_top_k, keep_top_k): - class_num = scores.shape[0] - priorbox_num = scores.shape[1] + nms_top_k, keep_top_k, normalized, shared): + if shared: + class_num = scores.shape[0] + priorbox_num = scores.shape[1] + else: + box_num = scores.shape[0] + class_num = scores.shape[1] selected_indices = {} num_det = 0 for c in range(class_num): if c == background: continue - indices = nms(boxes, scores[c], score_threshold, nms_threshold, - nms_top_k) + if shared: + indices = nms(boxes, scores[c], score_threshold, nms_threshold, + nms_top_k, normalized) + else: + indices = nms(boxes[:, c, :], scores[:, c], score_threshold, + nms_threshold, nms_top_k, normalized) selected_indices[c] = indices num_det += len(indices) @@ -116,7 +130,10 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, score_index = [] for c, indices in selected_indices.items(): for idx in indices: - score_index.append((scores[c][idx], c, idx)) + if shared: + score_index.append((scores[c][idx], c, idx)) + else: + score_index.append((scores[idx][c], c, idx)) sorted_score_index = sorted( score_index, key=lambda tup: tup[0], reverse=True) @@ -127,24 +144,75 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, selected_indices[c] = [] for s, c, idx in sorted_score_index: selected_indices[c].append(idx) + if not shared: + for labels in selected_indices: + selected_indices[labels].sort() num_det = keep_top_k return selected_indices, num_det -def batched_multiclass_nms(boxes, scores, background, score_threshold, - nms_threshold, nms_top_k, keep_top_k): +def lod_multiclass_nms(boxes, scores, background, score_threshold, + nms_threshold, nms_top_k, keep_top_k, box_lod, + normalized): + det_outs = [] + lod = [] + head = 0 + for n in range(len(box_lod[0])): + box = boxes[head:head + box_lod[0][n]] + score = scores[head:head + box_lod[0][n]] + head = head + box_lod[0][n] + nmsed_outs, nmsed_num = multiclass_nms( + box, + score, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized, + shared=False) + if nmsed_num == 0: + #lod.append(1) + continue + lod.append(nmsed_num) + for c, indices in nmsed_outs.items(): + for idx in indices: + xmin, ymin, xmax, ymax = box[idx, c, :] + det_outs.append([c, score[idx][c], xmin, ymin, xmax, ymax]) + if len(lod) == 0: + lod.append(1) + + return det_outs, lod + + +def batched_multiclass_nms(boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized=True): batch_size = scores.shape[0] det_outs = [] lod = [] for n in range(batch_size): - nmsed_outs, nmsed_num = multiclass_nms(boxes[n], scores[n], background, - score_threshold, nms_threshold, - nms_top_k, keep_top_k) - lod.append(nmsed_num) - if nmsed_num == 0: continue + nmsed_outs, nmsed_num = multiclass_nms( + boxes[n], + scores[n], + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized, + shared=True) + if nmsed_num == 0: + continue + lod.append(nmsed_num) tmp_det_out = [] for c, indices in nmsed_outs.items(): for idx in indices: @@ -154,7 +222,8 @@ def batched_multiclass_nms(boxes, scores, background, score_threshold, sorted_det_out = sorted( tmp_det_out, key=lambda tup: tup[0], reverse=False) det_outs.extend(sorted_det_out) - + if len(lod) == 0: + lod += [1] return det_outs, lod @@ -168,7 +237,6 @@ class TestMulticlassNMSOp(OpTest): M = 1200 C = 21 BOX_SIZE = 4 - background = 0 nms_threshold = 0.3 nms_top_k = 400 @@ -206,6 +274,7 @@ class TestMulticlassNMSOp(OpTest): 'keep_top_k': keep_top_k, 'score_threshold': score_threshold, 'nms_eta': 1.0, + 'normalized': True, } def test_check_output(self): @@ -219,13 +288,70 @@ class TestMulticlassNMSOpNoOutput(TestMulticlassNMSOp): self.score_threshold = 2.0 +class TestMulticlassNMSLoDInput(OpTest): + def set_argument(self): + self.score_threshold = 0.01 + + def setUp(self): + self.set_argument() + M = 1200 + C = 21 + BOX_SIZE = 4 + box_lod = [[1200]] + background = 0 + nms_threshold = 0.3 + nms_top_k = 400 + keep_top_k = 200 + score_threshold = self.score_threshold + normalized = False + + scores = np.random.random((M, C)).astype('float32') + + def softmax(x): + shiftx = x - np.max(x).clip(-64.) + exps = np.exp(shiftx) + return exps / np.sum(exps) + + scores = np.apply_along_axis(softmax, 1, scores) + + boxes = np.random.random((M, C, BOX_SIZE)).astype('float32') + boxes[:, :, 0] = boxes[:, :, 0] * 10 + boxes[:, :, 1] = boxes[:, :, 1] * 10 + boxes[:, :, 2] = boxes[:, :, 2] * 10 + 10 + boxes[:, :, 3] = boxes[:, :, 3] * 10 + 10 + + nmsed_outs, lod = lod_multiclass_nms( + boxes, scores, background, score_threshold, nms_threshold, + nms_top_k, keep_top_k, box_lod, normalized) + nmsed_outs = [-1] if not nmsed_outs else nmsed_outs + nmsed_outs = np.array(nmsed_outs).astype('float32') + self.op_type = 'multiclass_nms' + self.inputs = { + 'BBoxes': (boxes, box_lod), + 'Scores': (scores, box_lod), + } + self.outputs = {'Out': (nmsed_outs, [lod])} + self.attrs = { + 'background_label': 0, + 'nms_threshold': nms_threshold, + 'nms_top_k': nms_top_k, + 'keep_top_k': keep_top_k, + 'score_threshold': score_threshold, + 'nms_eta': 1.0, + 'normalized': normalized, + } + + def test_check_output(self): + self.check_output() + + class TestIOU(unittest.TestCase): def test_iou(self): box1 = np.array([4.0, 3.0, 7.0, 5.0]).astype('float32') box2 = np.array([3.0, 4.0, 6.0, 8.0]).astype('float32') expt_output = np.array([2.0 / 16.0]).astype('float32') - calc_output = np.array([iou(box1, box2)]).astype('float32') + calc_output = np.array([iou(box1, box2, True)]).astype('float32') self.assertTrue(np.allclose(calc_output, expt_output)) diff --git a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py index 242709425f2d3f190d3c1ed795d30938fb8e23fe..5bb2260ef7a143670dd75fc88769603d1437173d 100644 --- a/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_nearest_interp_op.py @@ -24,7 +24,8 @@ def nearest_neighbor_interp_np(X, out_h, out_w, out_size=None, - actual_shape=None): + actual_shape=None, + align_corners=True): """nearest neighbor interpolation implement in shape [N, C, H, W]""" if out_size is not None: out_h = out_size[0] @@ -35,17 +36,31 @@ def nearest_neighbor_interp_np(X, n, c, in_h, in_w = X.shape ratio_h = ratio_w = 0.0 - if out_h > 1: - ratio_h = (in_h - 1.0) / (out_h - 1.0) - if out_w > 1: - ratio_w = (in_w - 1.0) / (out_w - 1.0) + if (out_h > 1): + if (align_corners): + ratio_h = (in_h - 1.0) / (out_h - 1.0) + else: + ratio_h = 1.0 * in_h / out_h + if (out_w > 1): + if (align_corners): + ratio_w = (in_w - 1.0) / (out_w - 1.0) + else: + ratio_w = 1.0 * in_w / out_w out = np.zeros((n, c, out_h, out_w)) - for i in range(out_h): - in_i = int(ratio_h * i + 0.5) - for j in range(out_w): - in_j = int(ratio_w * j + 0.5) - out[:, :, i, j] = X[:, :, in_i, in_j] + + if align_corners: + for i in range(out_h): + in_i = int(ratio_h * i + 0.5) + for j in range(out_w): + in_j = int(ratio_w * j + 0.5) + out[:, :, i, j] = X[:, :, in_i, in_j] + else: + for i in range(out_h): + in_i = int(ratio_h * i) + for j in range(out_w): + in_j = int(ratio_w * j) + out[:, :, i, j] = X[:, :, in_i, in_j] return out.astype(X.dtype) @@ -59,7 +74,8 @@ class TestNearestInterpOp(OpTest): input_np = np.random.random(self.input_shape).astype("float32") output_np = nearest_neighbor_interp_np(input_np, self.out_h, self.out_w, - self.out_size, self.actual_shape) + self.out_size, self.actual_shape, + self.align_corners) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size @@ -68,7 +84,8 @@ class TestNearestInterpOp(OpTest): self.attrs = { 'out_h': self.out_h, 'out_w': self.out_w, - 'interp_method': self.interp_method + 'interp_method': self.interp_method, + 'align_corners': self.align_corners, } self.outputs = {'Out': output_np} @@ -84,6 +101,7 @@ class TestNearestInterpOp(OpTest): self.out_h = 2 self.out_w = 2 self.out_size = np.array([3, 3]).astype("int32") + self.align_corners = True class TestNearestNeighborInterpCase1(TestNearestInterpOp): @@ -92,6 +110,7 @@ class TestNearestNeighborInterpCase1(TestNearestInterpOp): self.input_shape = [4, 1, 7, 8] self.out_h = 1 self.out_w = 1 + self.align_corners = True class TestNearestNeighborInterpCase2(TestNearestInterpOp): @@ -100,6 +119,7 @@ class TestNearestNeighborInterpCase2(TestNearestInterpOp): self.input_shape = [3, 3, 9, 6] self.out_h = 12 self.out_w = 12 + self.align_corners = True class TestNearestNeighborInterpCase3(TestNearestInterpOp): @@ -108,6 +128,7 @@ class TestNearestNeighborInterpCase3(TestNearestInterpOp): self.input_shape = [1, 1, 128, 64] self.out_h = 64 self.out_w = 128 + self.align_corners = True class TestNearestNeighborInterpCase4(TestNearestInterpOp): @@ -117,6 +138,7 @@ class TestNearestNeighborInterpCase4(TestNearestInterpOp): self.out_h = 1 self.out_w = 1 self.out_size = np.array([2, 2]).astype("int32") + self.align_corners = True class TestNearestNeighborInterpCase5(TestNearestInterpOp): @@ -126,6 +148,7 @@ class TestNearestNeighborInterpCase5(TestNearestInterpOp): self.out_h = 12 self.out_w = 12 self.out_size = np.array([11, 11]).astype("int32") + self.align_corners = True class TestNearestNeighborInterpCase6(TestNearestInterpOp): @@ -135,6 +158,7 @@ class TestNearestNeighborInterpCase6(TestNearestInterpOp): self.out_h = 64 self.out_w = 128 self.out_size = np.array([65, 129]).astype("int32") + self.align_corners = True class TestNearestNeighborInterpActualShape(TestNearestInterpOp): @@ -144,6 +168,7 @@ class TestNearestNeighborInterpActualShape(TestNearestInterpOp): self.out_h = 64 self.out_w = 32 self.out_size = np.array([66, 40]).astype("int32") + self.align_corners = True class TestNearestInterpOpUint8(OpTest): @@ -155,14 +180,16 @@ class TestNearestInterpOpUint8(OpTest): input_np = np.random.randint( low=0, high=256, size=self.input_shape).astype("uint8") output_np = nearest_neighbor_interp_np(input_np, self.out_h, self.out_w, - self.out_size, self.actual_shape) + self.out_size, self.actual_shape, + self.align_corners) self.inputs = {'X': input_np} if self.out_size is not None: self.inputs['OutSize'] = self.out_size self.attrs = { 'out_h': self.out_h, 'out_w': self.out_w, - 'interp_method': self.interp_method + 'interp_method': self.interp_method, + 'align_corners': self.align_corners } self.outputs = {'Out': output_np} @@ -174,6 +201,7 @@ class TestNearestInterpOpUint8(OpTest): self.input_shape = [1, 3, 9, 6] self.out_h = 10 self.out_w = 9 + self.align_corners = True class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8): @@ -182,6 +210,7 @@ class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8): self.input_shape = [2, 3, 128, 64] self.out_h = 120 self.out_w = 50 + self.align_corners = True class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8): @@ -191,6 +220,12 @@ class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8): self.out_h = 5 self.out_w = 13 self.out_size = np.array([6, 15]).astype("int32") + self.align_corners = True + + +class TestNearestInterpWithoutCorners(TestNearestInterpOp): + def set_align_corners(self): + self.align_corners = False if __name__ == "__main__": diff --git a/python/paddle/fluid/transpiler/details/__init__.py b/python/paddle/fluid/transpiler/details/__init__.py index f33c05ed2f48c2498b98fc486d6ff7471088d77e..82d0d336e523ec48c5ceca3b92ff0963c4499123 100644 --- a/python/paddle/fluid/transpiler/details/__init__.py +++ b/python/paddle/fluid/transpiler/details/__init__.py @@ -17,3 +17,4 @@ from __future__ import print_function from .program_utils import * from .ufind import * from .checkport import * +from .vars_distributed import * diff --git a/python/paddle/fluid/transpiler/details/vars_distributed.py b/python/paddle/fluid/transpiler/details/vars_distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..05e7f6e3e706376efc8af870a780d96c45642514 --- /dev/null +++ b/python/paddle/fluid/transpiler/details/vars_distributed.py @@ -0,0 +1,269 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function +from paddle.fluid.framework import Variable + + +class VarStruct(object): + """ + record part properties of a Variable in python. + """ + + def __init__(self, name, shape, dtype, type, lod_level, persistable): + self.name = name + self.shape = shape + self.dtype = dtype + self.type = type + self.lod_level = lod_level + self.persistable = persistable + + +class VarDistributed(object): + """ + a class to record the var distributed on parameter servers. + the class will record the relationship between origin var and slice var. + the slice var's properties, such as type/shape/offset/endpoint. + """ + + def __init__(self, + origin_var, + slice_var, + is_slice=None, + block_id=None, + offset=None, + vtype=None, + endpoint=None): + """ + Args: + origin_var(Variable|VarStruct): origin var properties + slice_var(Variable|VarStruct): slice var properties + is_slice(bool|None): slice or not, slice_var=True/False and its block size > 8192 are the judgement standard. + block_id(int|None): the number about the slice var. + offset(int|None): if the slice var is sliced, offset is the numel before the var. + vtype(str|None): a tag, such as Optimizer/Param/RemoteProfetch. + endpoint(str|None): which parameter the slice var on, such as "127.0.0.1:1001" + """ + + if isinstance(origin_var, Variable): + self.origin = self.__create_var_struct(origin_var) + else: + self.origin = origin_var + + if isinstance(slice_var, Variable): + self.slice = self.__create_var_struct(slice_var) + else: + self.slice = slice_var + + if self.equal(self.origin, self.slice): + self.is_slice = False + self.block_id = 0 + self.offset = 0 + else: + self.is_slice = True + self.block_id = 0 + self.offset = 0 + + if is_slice is not None: + self.is_slice = is_slice + if block_id is not None: + self.block_id = block_id + if offset is not None: + self.offset = offset + + self.vtype = vtype + self.endpoint = endpoint + + @staticmethod + def __create_var_struct(var): + return VarStruct(var.name, var.shape, var.dtype, var.type, + var.lod_level, var.persistable) + + @staticmethod + def equal(var1, var2): + """ + the two var is equal or not. + Returns: + bool: equal will return True else False + """ + assert isinstance(var1, VarStruct) and isinstance(var2, VarStruct) + + return var1.name == var2.name and \ + var1.type == var2.type and \ + var1.shape == var2.shape and \ + var1.dtype == var2.dtype and \ + var1.lod_level == var2.lod_level and \ + var1.persistable == var2.persistable + + def __str__(self): + origin_var_str = "{name} : fluid.{type}.shape{shape}.astype({dtype})". \ + format(i="{", e="}", name=self.origin.name, type=self.origin.type, + shape=self.origin.shape, dtype=self.origin.dtype) + + slice_var_str = "{name} : fluid.{type}.shape{shape}.astype({dtype})" \ + ".slice({is_slice}).block({block_id}).offset({offset})". \ + format(i="{", e="}", name=self.slice.name, type=self.slice.type, + shape=self.slice.shape, dtype=self.slice.dtype, + is_slice=self.is_slice, block_id=self.block_id, offset=self.offset) + + return "var owned: {}, origin var: ( {} ), slice var: ( {} ), endpoint: {} ".format( + self.vtype, origin_var_str, slice_var_str, self.endpoint) + + +class VarsDistributed(object): + """ + a gather about VarDistributed with many methods to find distributed vars. + through the class, we can get overview about the distributed parameters on parameter servers. + this class may centralized and convenient for developer to manage and get variable's distribute. + other module can also use this to find variables such io.py. + """ + + def __init__(self): + self.distributed_vars = [] + + def add_distributed_var(self, + origin_var, + slice_var, + is_slice=None, + block_id=None, + offset=None, + vtype=None, + endpoint=None): + """ + add distributed var in this. + + Args: + origin_var(Variable|VarStruct): origin var properties + slice_var(Variable|VarStruct): slice var properties + is_slice(bool|None): slice or not, slice_var=True/False and its block size > 8192 are the judgement standard. + block_id(int|None): the number about the slice var. + offset(int|None): if the slice var is sliced, offset is the numel before the var. + vtype(str|None): a tag, such as Optimizer/Param/RemoteProfetch. + endpoint(str|None): which parameter the slice var on, such as "127.0.0.1:1001" + Returns: + None + """ + self.distributed_vars.append( + VarDistributed(origin_var, slice_var, is_slice, block_id, offset, + vtype, endpoint)) + + def get_distributed_var_by_slice(self, var_name): + """ + get distributed var by conditions. + + Args: + var_name(str): slice var name, such as "w.traier0.block1" + Returns: + VarDistributed: distributed var. + """ + for dist_var in self.distributed_vars: + if dist_var.slice.name == var_name: + return dist_var + return None + + @staticmethod + def equal(var1, var2): + """ + the two var is equal or not. + Returns: + bool: equal will return True else False + """ + return var1.name == var2.name and \ + var1.type == var2.type and \ + var1.shape == var2.shape and \ + var1.dtype == var2.dtype and \ + var1.lod_level == var2.lod_level and \ + var1.persistable == var2.persistable + + def get_distributed_var_by_origin_and_ep(self, origin_var_name, endpoint): + """ + get distributed var by conditions. + + Args: + origin_var_name(str): + endpoint(str): the parameter endpoint, such as "127.0.0.1:1001" + Returns: + VarDistributed: distributed var. + """ + for dist_var in self.distributed_vars: + if dist_var.origin.name == origin_var_name and dist_var.endpoint == endpoint: + return dist_var + return None + + def get_distributed_vars_by_vtypes(self, vtypes, groupby=False): + """ + get distributed vars by conditions. + + Args: + vtype(str|None): distributed var's vtype, such as "Optimizer", "RemotePrefetch" + groupby(bool|False): group by origin var or not. + + Returns: + list: distributed var list. + dict: distributed var map when groupby=True + """ + vtype_vars = [] + for var in self.distributed_vars: + if var.vtype in vtypes: + vtype_vars.append(var) + if not groupby: + return vtype_vars + + params_map = {} + for var in vtype_vars: + origin_var_name = var.origin.name + + if origin_var_name in params_map.keys(): + optimizers = params_map.get(origin_var_name) + else: + optimizers = [] + optimizers.append(var) + params_map[origin_var_name] = optimizers + return params_map + + def get_distributed_vars_by_ep(self, endpoint, vtype=None): + """ + get distributed vars by conditions. + + Args: + endpoint(str): the parameter server endpoint, such as "127.0.0.1:2001" + vtype(str|None): distributed var's vtype, such as "Optimizer", "RemotePrefetch" + + Returns: + list: distributed var list. + """ + endpoint_vars = [] + for var in self.distributed_vars: + if var.endpoint == endpoint: + endpoint_vars.append(var) + if not vtype: + return endpoint_vars + + vtype_vars = [] + for var in endpoint_vars: + if var.vtype == vtype: + vtype_vars.append(var) + return vtype_vars + + def overview(self): + """ + get the overview string about all params on all parameter servers. + + Returns: + Str: overview string. + + """ + vars_str = [] + for var in self.distributed_vars: + vars_str.append(str(var)) + return "\n".join(vars_str) diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index e58f34e3750803669149685003ea5858fa775ed7..a3293afbbd7cef8470c808e98ae88a05f2e492f4 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -30,19 +30,23 @@ Steps to transpile pserver: 5. add listen_and_serv op """ +import sys import math -import numpy as np +from functools import reduce + import collections +import six import logging +import numpy as np + from .ps_dispatcher import RoundRobin, PSDispatcher from .. import core, framework, unique_name from ..framework import Program, default_main_program, \ - default_startup_program, Block, \ - Parameter, Variable, grad_var_name -from .details import * + default_startup_program, Block, Parameter, grad_var_name +from .details import wait_server_ready, UnionFind, VarStruct, VarsDistributed +from .details import delete_ops, find_op_by_output_arg from ..distribute_lookup_table import find_distributed_lookup_table -from functools import reduce LOOKUP_TABLE_TYPE = "lookup_table" LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad" @@ -62,260 +66,6 @@ def log(*args): print(args) -class VarStruct(object): - """ - record part properties of a Variable in python. - """ - - def __init__(self, name, shape, dtype, type, lod_level, persistable): - self.name = name - self.shape = shape - self.dtype = dtype - self.type = type - self.lod_level = lod_level - self.persistable = persistable - - -class VarDistributed(object): - """ - a class to record the var distributed on parameter servers. - the class will record the relationship between origin var and slice var. - the slice var's properties, such as type/shape/offset/endpoint. - """ - - def __init__(self, - origin_var, - slice_var, - is_slice=None, - block_id=None, - offset=None, - vtype=None, - endpoint=None): - """ - Args: - origin_var(Variable|VarStruct): origin var properties - slice_var(Variable|VarStruct): slice var properties - is_slice(bool|None): slice or not, slice_var=True/False and its block size > 8192 are the judgement standard. - block_id(int|None): the number about the slice var. - offset(int|None): if the slice var is sliced, offset is the numel before the var. - vtype(str|None): a tag, such as Optimizer/Param/RemoteProfetch. - endpoint(str|None): which parameter the slice var on, such as "127.0.0.1:1001" - """ - - if isinstance(origin_var, Variable): - self.origin = self.__create_var_struct(origin_var) - else: - self.origin = origin_var - - if isinstance(slice_var, Variable): - self.slice = self.__create_var_struct(slice_var) - else: - self.slice = slice_var - - if self.equal(self.origin, self.slice): - self.is_slice = False - self.block_id = 0 - self.offset = 0 - else: - self.is_slice = True - self.block_id = 0 - self.offset = 0 - - if is_slice is not None: - self.is_slice = is_slice - if block_id is not None: - self.block_id = block_id - if offset is not None: - self.offset = offset - - self.vtype = vtype - self.endpoint = endpoint - - @staticmethod - def __create_var_struct(var): - return VarStruct(var.name, var.shape, var.dtype, var.type, - var.lod_level, var.persistable) - - @staticmethod - def equal(var1, var2): - """ - the two var is equal or not. - Returns: - bool: equal will return True else False - """ - assert isinstance(var1, VarStruct) and isinstance(var2, VarStruct) - - return var1.name == var2.name and \ - var1.type == var2.type and \ - var1.shape == var2.shape and \ - var1.dtype == var2.dtype and \ - var1.lod_level == var2.lod_level and \ - var1.persistable == var2.persistable - - def __str__(self): - origin_var_str = "{name} : fluid.{type}.shape{shape}.astype({dtype})". \ - format(i="{", e="}", name=self.origin.name, type=self.origin.type, - shape=self.origin.shape, dtype=self.origin.dtype) - - slice_var_str = "{name} : fluid.{type}.shape{shape}.astype({dtype})" \ - ".slice({is_slice}).block({block_id}).offset({offset})". \ - format(i="{", e="}", name=self.slice.name, type=self.slice.type, - shape=self.slice.shape, dtype=self.slice.dtype, - is_slice=self.is_slice, block_id=self.block_id, offset=self.offset) - - return "var owned: {}, origin var: ( {} ), slice var: ( {} ), endpoint: {} ".format( - self.vtype, origin_var_str, slice_var_str, self.endpoint) - - -class VarsDistributed(object): - """ - a gather about VarDistributed with many methods to find distributed vars. - through the class, we can get overview about the distributed parameters on parameter servers. - this class may centralized and convenient for developer to manage and get variable's distribute. - other module can also use this to find variables such io.py. - """ - - def __init__(self): - self.distributed_vars = [] - - def add_distributed_var(self, - origin_var, - slice_var, - is_slice=None, - block_id=None, - offset=None, - vtype=None, - endpoint=None): - """ - add distributed var in this. - - Args: - origin_var(Variable|VarStruct): origin var properties - slice_var(Variable|VarStruct): slice var properties - is_slice(bool|None): slice or not, slice_var=True/False and its block size > 8192 are the judgement standard. - block_id(int|None): the number about the slice var. - offset(int|None): if the slice var is sliced, offset is the numel before the var. - vtype(str|None): a tag, such as Optimizer/Param/RemoteProfetch. - endpoint(str|None): which parameter the slice var on, such as "127.0.0.1:1001" - Returns: - None - """ - self.distributed_vars.append( - VarDistributed(origin_var, slice_var, is_slice, block_id, offset, - vtype, endpoint)) - - def get_distributed_var_by_slice(self, var_name): - """ - get distributed var by conditions. - - Args: - var_name(str): slice var name, such as "w.traier0.block1" - Returns: - VarDistributed: distributed var. - """ - for dist_var in self.distributed_vars: - if dist_var.slice.name == var_name: - return dist_var - return None - - @staticmethod - def equal(var1, var2): - """ - the two var is equal or not. - Returns: - bool: equal will return True else False - """ - return var1.name == var2.name and \ - var1.type == var2.type and \ - var1.shape == var2.shape and \ - var1.dtype == var2.dtype and \ - var1.lod_level == var2.lod_level and \ - var1.persistable == var2.persistable - - def get_distributed_var_by_origin_and_ep(self, origin_var_name, endpoint): - """ - get distributed var by conditions. - - Args: - origin_var_name(str): - endpoint(str): the parameter endpoint, such as "127.0.0.1:1001" - Returns: - VarDistributed: distributed var. - """ - for dist_var in self.distributed_vars: - if dist_var.origin.name == origin_var_name and dist_var.endpoint == endpoint: - return dist_var - return None - - def get_distributed_vars_by_vtypes(self, vtypes, groupby=False): - """ - get distributed vars by conditions. - - Args: - vtype(str|None): distributed var's vtype, such as "Optimizer", "RemotePrefetch" - groupby(bool|False): group by origin var or not. - - Returns: - list: distributed var list. - dict: distributed var map when groupby=True - """ - vtype_vars = [] - for var in self.distributed_vars: - if var.vtype in vtypes: - vtype_vars.append(var) - if not groupby: - return vtype_vars - - params_map = {} - for var in vtype_vars: - origin_var_name = var.origin.name - - if origin_var_name in params_map.keys(): - optimizers = params_map.get(origin_var_name) - else: - optimizers = [] - optimizers.append(var) - params_map[origin_var_name] = optimizers - return params_map - - def get_distributed_vars_by_ep(self, endpoint, vtype=None): - """ - get distributed vars by conditions. - - Args: - endpoint(str): the parameter server endpoint, such as "127.0.0.1:2001" - vtype(str|None): distributed var's vtype, such as "Optimizer", "RemotePrefetch" - - Returns: - list: distributed var list. - """ - endpoint_vars = [] - for var in self.distributed_vars: - if var.endpoint == endpoint: - endpoint_vars.append(var) - if not vtype: - return endpoint_vars - - vtype_vars = [] - for var in endpoint_vars: - if var.vtype == vtype: - vtype_vars.append(var) - return vtype_vars - - def overview(self): - """ - get the overview string about all params on all parameter servers. - - Returns: - Str: overview string. - - """ - vars_str = [] - for var in self.distributed_vars: - vars_str.append(str(var)) - return "\n".join(vars_str) - - class VarBlock: def __init__(self, varname, offset, size): self.varname = varname