diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index 4e98e4bf889bc13938931be7f6cb204c83250a5c..95ca16f57f2704eaded85aa5f5c0546310fba3a7 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -63,6 +63,15 @@ ADD_DEPENDENCIES(gflags extern_gflags) LIST(APPEND external_project_dependencies gflags) +# On Windows (including MinGW), the Shlwapi library is used by gflags if available. +if (WIN32) + include(CheckIncludeFileCXX) + check_include_file_cxx("shlwapi.h" HAVE_SHLWAPI) + if (HAVE_SHLWAPI) + set_property(GLOBAL PROPERTY OS_DEPENDENCY_MODULES shlwapi.lib) + endif(HAVE_SHLWAPI) +endif (WIN32) + IF(WITH_C_API) INSTALL(DIRECTORY ${GFLAGS_INCLUDE_DIR} DESTINATION third_party/gflags) IF(ANDROID) diff --git a/cmake/external/ngraph.cmake b/cmake/external/ngraph.cmake index 799d9c309f329f5f10364d794a7964ec3b02eeb4..14af98b2d74d4aa955aac27727e05567788a84c9 100644 --- a/cmake/external/ngraph.cmake +++ b/cmake/external/ngraph.cmake @@ -37,14 +37,18 @@ INCLUDE(GNUInstallDirs) INCLUDE(ExternalProject) SET(NGRAPH_PROJECT "extern_ngraph") -SET(NGRAPH_GIT_TAG "08851c2c45fcf9fa9c74871dd3dbc3fe38f37cc9") +SET(NGRAPH_GIT_TAG "20bd8bbc79ae3a81c57313846a2be7313e5d1dab") SET(NGRAPH_SOURCES_DIR ${THIRD_PARTY_PATH}/ngraph) SET(NGRAPH_INSTALL_DIR ${THIRD_PARTY_PATH}/install/ngraph) SET(NGRAPH_INC_DIR ${NGRAPH_INSTALL_DIR}/include) SET(NGRAPH_LIB_DIR ${NGRAPH_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}) SET(NGRAPH_SHARED_LIB_NAME libngraph.so) SET(NGRAPH_CPU_LIB_NAME libcpu_backend.so) -SET(NGRAPH_TBB_LIB_NAME libtbb.so.2) +if(CMAKE_BUILD_TYPE STREQUAL "Debug") + SET(NGRAPH_TBB_LIB_NAME libtbb_debug.so.2) +else() + SET(NGRAPH_TBB_LIB_NAME libtbb.so.2) +endif() SET(NGRAPH_GIT_REPO "https://github.com/NervanaSystems/ngraph.git") SET(NGRAPH_SHARED_LIB ${NGRAPH_LIB_DIR}/${NGRAPH_SHARED_LIB_NAME}) SET(NGRAPH_CPU_LIB ${NGRAPH_LIB_DIR}/${NGRAPH_CPU_LIB_NAME}) @@ -66,16 +70,7 @@ ExternalProject_Add( CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} CMAKE_ARGS -DMKLDNN_INCLUDE_DIR=${MKLDNN_INC_DIR} CMAKE_ARGS -DMKLDNN_LIB_DIR=${MKLDNN_INSTALL_DIR}/lib -) - -# Workaround for nGraph expecting mklml to be in mkldnn install directory. -ExternalProject_Add_Step( - ${NGRAPH_PROJECT} - PrepareMKL - COMMAND ${CMAKE_COMMAND} -E create_symlink ${MKLML_LIB} ${MKLDNN_INSTALL_DIR}/lib/libmklml_intel.so - COMMAND ${CMAKE_COMMAND} -E create_symlink ${MKLML_IOMP_LIB} ${MKLDNN_INSTALL_DIR}/lib/libiomp5.so - DEPENDEES download - DEPENDERS configure + CMAKE_ARGS -DMKLML_LIB_DIR=${MKLML_INSTALL_DIR}/lib ) add_dependencies(ngraph ${NGRAPH_PROJECT}) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 05293b8b06b55bb0b83a30c7eb059efe0b61e57e..63820fd4f0ad1718beda71048e4333596de80dbe 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -359,6 +359,8 @@ function(cc_binary TARGET_NAME) add_dependencies(${TARGET_NAME} ${cc_binary_DEPS}) common_link(${TARGET_NAME}) endif() + get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) + target_link_libraries(${TARGET_NAME} ${os_dependency_modules}) endfunction(cc_binary) function(cc_test TARGET_NAME) @@ -367,18 +369,15 @@ function(cc_test TARGET_NAME) set(oneValueArgs "") set(multiValueArgs SRCS DEPS ARGS) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + add_executable(${TARGET_NAME} ${cc_test_SRCS}) if(WIN32) - list(APPEND win32_deps shlwapi) if("${cc_test_DEPS};" MATCHES "python;") list(REMOVE_ITEM cc_test_DEPS python) - list(APPEND win32_deps ${PYTHON_LIBRARIES}) + target_link_libraries(${TARGET_NAME} ${PYTHON_LIBRARIES}) endif() endif(WIN32) - add_executable(${TARGET_NAME} ${cc_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) - if(WIN32) - target_link_libraries(${TARGET_NAME} ${win32_deps}) - endif(WIN32) + get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) + target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} ${os_dependency_modules} paddle_gtest_main lod_tensor memory gtest gflags glog) add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) common_link(${TARGET_NAME}) add_test(NAME ${TARGET_NAME} @@ -451,7 +450,8 @@ function(nv_test TARGET_NAME) set(multiValueArgs SRCS DEPS) cmake_parse_arguments(nv_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cuda_add_executable(${TARGET_NAME} ${nv_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) + get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) + target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog ${os_dependency_modules}) add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) common_link(${TARGET_NAME}) add_test(${TARGET_NAME} ${TARGET_NAME}) @@ -538,7 +538,8 @@ function(hip_test TARGET_NAME) endif() add_executable(${TARGET_NAME} ${_cmake_options} ${_generated_files} ${_sources}) set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE HIP) - target_link_libraries(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main memory gtest gflags) + get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) + target_link_libraries(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main memory gtest gflags ${os_dependency_modules}) add_dependencies(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main memory gtest gflags) common_link(${TARGET_NAME}) add_test(${TARGET_NAME} ${TARGET_NAME}) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index dff760174388b0f3441d9778dd094742c8338942..340bad0b9a2515e80a4149770ab092779b0f8e5b 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -88,6 +88,7 @@ paddle.fluid.layers.pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'poo paddle.fluid.layers.adaptive_pool2d ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)) paddle.fluid.layers.adaptive_pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)) paddle.fluid.layers.batch_norm ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False, False)) +paddle.fluid.layers.data_norm ArgSpec(args=['input', 'act', 'epsilon', 'param_attr', 'data_layout', 'in_place', 'use_mkldnn', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var'], varargs=None, keywords=None, defaults=(None, 1e-05, None, 'NCHW', False, False, None, None, None, False)) paddle.fluid.layers.beam_search_decode ArgSpec(args=['ids', 'scores', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.conv2d_transpose ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)) paddle.fluid.layers.conv3d_transpose ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)) @@ -210,6 +211,7 @@ paddle.fluid.layers.get_tensor_from_selected_rows ArgSpec(args=['x', 'name'], va paddle.fluid.layers.lstm ArgSpec(args=['input', 'init_h', 'init_c', 'max_len', 'hidden_size', 'num_layers', 'dropout_prob', 'is_bidirec', 'is_test', 'name', 'default_initializer', 'seed'], varargs=None, keywords=None, defaults=(0.0, False, False, None, None, -1)) paddle.fluid.layers.py_func ArgSpec(args=['func', 'x', 'out', 'backward_func', 'skip_vars_in_backward_input'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.layers.psroi_pool ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.teacher_student_sigmoid_loss ArgSpec(args=['input', 'label', 'soft_max_up_bound', 'soft_max_lower_bound'], varargs=None, keywords=None, defaults=(15.0, -15.0)) paddle.fluid.layers.huber_loss ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)) @@ -406,28 +408,50 @@ paddle.fluid.nets.glu ArgSpec(args=['input', 'dim'], varargs=None, keywords=None paddle.fluid.nets.scaled_dot_product_attention ArgSpec(args=['queries', 'keys', 'values', 'num_heads', 'dropout_rate'], varargs=None, keywords=None, defaults=(1, 0.0)) paddle.fluid.nets.img_conv_group ArgSpec(args=['input', 'conv_num_filter', 'pool_size', 'conv_padding', 'conv_filter_size', 'conv_act', 'param_attr', 'conv_with_batchnorm', 'conv_batchnorm_drop_rate', 'pool_stride', 'pool_type', 'use_cudnn'], varargs=None, keywords=None, defaults=(1, 3, None, None, False, 0.0, 1, 'max', True)) paddle.fluid.optimizer.SGDOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'regularization', 'name'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.optimizer.SGDOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.SGDOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.SGDOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.optimizer.MomentumOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'momentum', 'use_nesterov', 'regularization', 'name'], varargs=None, keywords=None, defaults=(False, None, None)) +paddle.fluid.optimizer.MomentumOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.MomentumOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.MomentumOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.optimizer.AdagradOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(1e-06, None, None)) +paddle.fluid.optimizer.AdagradOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.AdagradOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.AdagradOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.optimizer.AdamOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name', 'lazy_mode'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None, False)) +paddle.fluid.optimizer.AdamOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.AdamOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.AdamOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.optimizer.AdamaxOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None)) +paddle.fluid.optimizer.AdamaxOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.AdamaxOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.AdamaxOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.optimizer.DecayedAdagradOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'decay', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, None, None)) +paddle.fluid.optimizer.DecayedAdagradOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.DecayedAdagradOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.DecayedAdagradOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.optimizer.FtrlOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'l1', 'l2', 'lr_power', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.0, 0.0, -0.5, None, None)) +paddle.fluid.optimizer.FtrlOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.FtrlOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.FtrlOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.optimizer.RMSPropOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'rho', 'epsilon', 'momentum', 'centered', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.95, 1e-06, 0.0, False, None, None)) +paddle.fluid.optimizer.RMSPropOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.RMSPropOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.RMSPropOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.optimizer.AdadeltaOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'epsilon', 'rho', 'regularization', 'name'], varargs=None, keywords=None, defaults=(1e-06, 0.95, None, None)) +paddle.fluid.optimizer.AdadeltaOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.AdadeltaOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.AdadeltaOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.optimizer.ModelAverage.__init__ ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window', 'regularization', 'name'], varargs=None, keywords=None, defaults=(10000, 10000, None, None)) paddle.fluid.optimizer.ModelAverage.apply ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.optimizer.ModelAverage.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.ModelAverage.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.ModelAverage.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.optimizer.ModelAverage.restore ArgSpec(args=['self', 'executor'], varargs=None, keywords=None, defaults=None) paddle.fluid.optimizer.LarsMomentumOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'momentum', 'lars_coeff', 'lars_weight_decay', 'regularization', 'name'], varargs=None, keywords=None, defaults=(0.001, 0.0005, None, None)) +paddle.fluid.optimizer.LarsMomentumOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) +paddle.fluid.optimizer.LarsMomentumOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.LarsMomentumOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.backward.append_backward ArgSpec(args=['loss', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.regularizer.L1DecayRegularizer.__init__ ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,)) diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index d91993bd4f8c04539cd189a4145350498911c513..75f922d2cca6855a67be7284ae407e549a1a1afb 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -226,7 +226,7 @@ std::unique_ptr MultiDevSSAGraphBuilderBase::ApplyImpl( * Only variables should be the leaves of graph. */ AddOutputToLeafOps(&result); - result.Erase(kGraphOps); + result.Erase(kGraphOps); return graph; } diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 6e6db3d3efbc9fbb17e7ee45402dd4cb7f4f7a34..42fb6a1aa5375bfbb266454cfbc7f0fb756f779c 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -42,11 +42,23 @@ pass_library(seq_concat_fc_fuse_pass inference) pass_library(multi_batch_merge_pass base) pass_library(conv_bn_fuse_pass inference) pass_library(seqconv_eltadd_relu_fuse_pass inference) +pass_library(seqpool_concat_fuse_pass inference) pass_library(is_test_pass base) pass_library(conv_elementwise_add_act_fuse_pass inference) pass_library(conv_elementwise_add2_act_fuse_pass inference) pass_library(conv_elementwise_add_fuse_pass inference) pass_library(conv_affine_channel_fuse_pass inference) +pass_library(transpose_flatten_concat_fuse_pass inference) + +# There may be many transpose-flatten structures in a model, and the output of +# these structures will be used as inputs to the concat Op. This pattern will +# be detected by our pass. The index here represents the number of structures in the +# pattern. We use index 3 ~ 6, because these quantities of structures are +# common in the models. +foreach (index RANGE 3 6) + file(APPEND ${pass_file} "USE_PASS(transpose_flatten${index}_concat_fuse_pass);\n") +endforeach() + if(WITH_MKLDNN) pass_library(mkldnn_placement_pass base) pass_library(depthwise_conv_mkldnn_pass base) @@ -68,6 +80,7 @@ cc_test(graph_helper_test SRCS graph_helper_test.cc DEPS graph graph_helper op_r cc_test(graph_to_program_pass_test SRCS graph_to_program_pass_test.cc DEPS graph_to_program_pass) cc_test(test_graph_pattern_detector SRCS graph_pattern_detector_tester.cc DEPS graph_pattern_detector) cc_test(test_fc_fuse_pass SRCS fc_fuse_pass_tester.cc DEPS fc_fuse_pass framework_proto) +cc_test(test_seqpool_concat_fuse_pass SRCS seqpool_concat_fuse_pass_tester.cc DEPS seqpool_concat_fuse_pass framework_proto) cc_test(test_is_test_pass SRCS is_test_pass_tester.cc DEPS is_test_pass) if (WITH_MKLDNN) cc_test(test_depthwise_conv_mkldnn_pass SRCS depthwise_conv_mkldnn_pass_tester.cc DEPS depthwise_conv_mkldnn_pass) diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index 47fcf96a3f92b1f915e5254fff36feb8b2870730..8bb3c27bdd32d07d58913db043569f6a3bf69aeb 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -109,7 +109,6 @@ class Graph { attr_dels_[attr_name] = []() {}; } - template void Erase(const std::string &attr_name) { PADDLE_ENFORCE(attrs_.count(attr_name) != 0, "%s not set in the graph", attr_name); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index c513fe2dd8f5733c87802f6fa9980ad885dfd865..6282ced1e47329915bb3626b410e55ad8251071d 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1306,6 +1306,69 @@ PDNode *patterns::ConvAffineChannel::operator()( return ac_out_var; } +// a -> transpose_op(1) -> transpose_out_a -> flatten_op(1) -> flatten_out_a +// b -> transpose_op(2) -> transpose_out_b -> flatten_op(2) -> flatten_out_b +// ... +// z -> transpose_op(n) -> transpose_out_z -> flatten_op(n) -> flatten_out_z +// flatten_out_a -> concat_op flatten_out_b -> concat_op ... flatten_out_z -> +// concat_op +PDNode *patterns::TransposeFlattenConcat::operator()( + std::vector conv_in, int times) { + // The times represents the repeat times of the + // {trans, trans_out, flatten, flatten_out} + const int kNumFields = 4; + const int kTransOutOffset = 1; + const int kFlattenOffset = 2; + const int kFlattenOutOffset = 3; + + std::vector nodes; + + for (int i = 0; i < times; i++) { + nodes.push_back( + pattern->NewNode(GetNodeName("transpose" + std::to_string(i))) + ->assert_is_op("transpose2")); + nodes.push_back( + pattern->NewNode(GetNodeName("transpose_out" + std::to_string(i))) + ->assert_is_op_output("transpose2") + ->assert_is_op_input("flatten2", "X") + ->AsIntermediate()); + nodes.push_back(pattern->NewNode(GetNodeName("flatten" + std::to_string(i))) + ->assert_is_op("flatten2")); + + nodes.push_back( + pattern->NewNode(GetNodeName("flatten_out" + std::to_string(i))) + ->assert_is_op_output("flatten2") + ->assert_is_op_nth_input("concat", "X", i) + ->AsIntermediate()); + } + + auto concat_op = pattern->NewNode(GetNodeName("concat")) + ->assert_is_op("concat") + ->assert_op_has_n_inputs("concat", times); + auto concat_out = pattern->NewNode(GetNodeName("concat_out")) + ->assert_is_op_output("concat") + ->AsOutput(); + + std::vector flatten_outs; + for (int i = 0; i < times; i++) { + conv_in[i]->AsInput(); + // trans + nodes[i * kNumFields]->LinksFrom({conv_in[i]}); + // trans_out + nodes[i * kNumFields + kTransOutOffset]->LinksFrom({nodes[i * kNumFields]}); + // flatten + nodes[i * kNumFields + kFlattenOffset]->LinksFrom( + {nodes[i * kNumFields + kTransOutOffset]}); + // flatten_out + nodes[i * kNumFields + kFlattenOutOffset]->LinksFrom( + {nodes[i * kNumFields + kFlattenOffset]}); + flatten_outs.push_back(nodes[i * kNumFields + kFlattenOutOffset]); + } + + concat_op->LinksFrom(flatten_outs).LinksTo({concat_out}); + return concat_out; +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index 61a53003449710da2a52c90197c9f2f3ac56c7bb..c8be586f546dc604375401b13a801841efbf08d2 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -766,6 +766,21 @@ struct ConvAffineChannel : public PatternBase { PATTERN_DECL_NODE(ac_out); // Out }; +struct TransposeFlattenConcat : public PatternBase { + TransposeFlattenConcat(PDPattern* pattern, const std::string& name_scope) + : PatternBase(pattern, name_scope, "transpose_flatten_concat") {} + + PDNode* operator()(std::vector conv_inputs, int times); + + std::string GetNodeName(const std::string& op_type) { + return PDNodeName(name_scope_, repr_, id_, op_type); + } + + PDNode* GetPDNode(const std::string& op_type) { + return pattern->RetrieveNode(GetNodeName(op_type)); + } +}; + } // namespace patterns // Link two ir::Nodes from each other. diff --git a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..fa75e3b4aa7feb7ff856dc26338d089f90efa2e2 --- /dev/null +++ b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc @@ -0,0 +1,214 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#include "paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h" +#include +#include +#include "paddle/fluid/framework/lod_tensor.h" + +#define MAX_CONCAT_INPUTS 200 + +namespace paddle { +namespace framework { +namespace ir { + +PDNode* BuildSeqPoolConcatPattern(PDPattern* pattern, + const std::string& name_scope, + int num_inputs) { + auto is_concat_op_with_inputs = [](Node* x, int num) -> bool { + return x && x->IsOp() && x->Op()->Type() == "concat" && + x->Op()->Input("X").size() == static_cast(num); + }; + + auto is_nth_input_var_of_concat = [=](Node* x, int idx) -> bool { + return x && x->IsVar() && VarLinksToOp(x, "concat") && + x->outputs.size() == 1 && IsNthInput(x, x->outputs[0], "X", idx) && + is_concat_op_with_inputs(x->outputs[0], num_inputs); + }; + + auto is_seqpool_op_with_pootype_of_nth_input_of_concat = [=]( + Node* x, const std::string& type, int idx) -> bool { + bool this_is_seqpool_op = + x && x->IsOp() && x->Op()->Type() == "sequence_pool" && + x->Op()->HasAttr("pooltype") && + boost::get(x->Op()->GetAttr("pooltype")) == type && + x->outputs.size() == 2; // seqpool should only have 2 outputs + bool satisfied_all = this_is_seqpool_op; + if (this_is_seqpool_op) { + // Only one output of seqpool_op is nth_input_var of concat, + // the other one should be unused empty var. + if (is_nth_input_var_of_concat(x->outputs[0], idx)) { + satisfied_all = satisfied_all && x->outputs[1]->IsVar() && + x->outputs[1]->outputs.empty(); + } else { + satisfied_all = + satisfied_all && is_nth_input_var_of_concat(x->outputs[1], idx) && + x->outputs[0]->IsVar() && x->outputs[0]->outputs.size() == 0; + } + } + return satisfied_all; + }; + + auto* concat_op = pattern->NewNode( + [=](Node* x) { return is_concat_op_with_inputs(x, num_inputs); }, + name_scope + "/concat_op"); + concat_op->assert_op_attr("axis", 1); + + auto* concat_out_var = pattern->NewNode( + [=](Node* x) { + return x && x->IsVar() && VarLinksFromOp(x, "concat") && + x->inputs.size() == 1 && + is_concat_op_with_inputs(x->inputs[0], num_inputs); + }, + name_scope + "/concat_out_var"); + concat_out_var->assert_is_only_output_of_op("concat"); + + std::vector seqpool_ops_input_var(num_inputs); + std::vector seqpool_ops_output_var(num_inputs); + std::vector seqpool_ops_output_unused_var(num_inputs); + std::vector seqpool_ops(num_inputs); + + for (int i = 0; i < num_inputs; ++i) { + seqpool_ops_output_var[i] = pattern->NewNode( + [=](Node* x) { + return x && x->IsVar() && is_nth_input_var_of_concat(x, i) && + x->inputs.size() == 1 && + is_seqpool_op_with_pootype_of_nth_input_of_concat(x->inputs[0], + "SUM", i); + }, + name_scope + "/sequence_pool_out_" + std::to_string(i)); + + seqpool_ops_output_unused_var[i] = pattern->NewNode( + [=](Node* x) { + return x && x->IsVar() && x->inputs.size() == 1 && + x->outputs.size() == 0 && + is_seqpool_op_with_pootype_of_nth_input_of_concat(x->inputs[0], + "SUM", i); + }, + name_scope + "/sequence_pool_unused_out_" + std::to_string(i)); + + seqpool_ops[i] = pattern->NewNode( + [=](Node* x) { + return x && x->IsOp() && + is_seqpool_op_with_pootype_of_nth_input_of_concat(x, "SUM", i); + }, + name_scope + "/sequence_pool_op_" + std::to_string(i)); + + seqpool_ops_input_var[i] = pattern->NewNode( + [=](Node* x) { + bool basic = x && x->IsVar() && x->outputs.size() >= 1; + bool next_is_fine = false; + for (auto* o : x->outputs) { + if (is_seqpool_op_with_pootype_of_nth_input_of_concat(o, "SUM", + i)) { + next_is_fine = true; + break; + } + } + return basic && next_is_fine; + }, + name_scope + "/sequence_pool_in_" + std::to_string(i)); + + // Links + seqpool_ops[i] + ->LinksFrom({seqpool_ops_input_var[i]}) + .LinksTo({seqpool_ops_output_var[i], seqpool_ops_output_unused_var[i]}); + } + concat_op->LinksFrom(seqpool_ops_output_var).LinksTo({concat_out_var}); + return concat_out_var; +} + +int BuildFusion(Graph* graph, const std::string& name_scope, int num_inputs) { + GraphPatternDetector gpd; + auto* pattern = gpd.mutable_pattern(); + BuildSeqPoolConcatPattern(pattern, name_scope, num_inputs); + + auto retrieve_node = [](const std::string& name, + const GraphPatternDetector::subgraph_t& subgraph, + const PDPattern& pat) -> Node* { + PADDLE_ENFORCE(subgraph.count(pat.RetrieveNode(name)), + "pattern has no Node called %s", name.c_str()); + Node* p = subgraph.at(pat.RetrieveNode(name)); + PADDLE_ENFORCE_NOT_NULL(p, "subgraph has no node %s", name.c_str()); + return p; + }; + + int fusion_count{0}; + auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, + Graph* g) { + VLOG(4) << "handle SeqPool Concat fuse"; + std::vector input_names(num_inputs); + std::vector input_vars(num_inputs); + auto& fused_pattern = gpd.pattern(); + for (int i = 0; i < num_inputs; ++i) { + input_vars[i] = + retrieve_node(name_scope + "/sequence_pool_in_" + std::to_string(i), + subgraph, fused_pattern); + input_names[i] = input_vars[i]->Name(); + } + auto* concat_op = + retrieve_node(name_scope + "/concat_op", subgraph, fused_pattern); + auto* concat_out_var = + retrieve_node(name_scope + "/concat_out_var", subgraph, fused_pattern); + auto* seqpool_op0 = retrieve_node(name_scope + "/sequence_pool_op_0", + subgraph, fused_pattern); + + // Create New OpDesc + OpDesc op_desc; + op_desc.SetType("fusion_seqpool_concat"); + op_desc.SetInput("X", input_names); + op_desc.SetAttr("pooltype", seqpool_op0->Op()->GetAttr("pooltype")); + op_desc.SetAttr("axis", concat_op->Op()->GetAttr("axis")); + op_desc.SetOutput("Out", {concat_out_var->Name()}); + auto* op = graph->CreateOpNode(&op_desc); + for (size_t i = 0; i < input_vars.size(); ++i) { + IR_NODE_LINK_TO(input_vars[i], op); + } + IR_NODE_LINK_TO(op, concat_out_var); + + std::unordered_set marked_nodes; + for (auto& item : subgraph) { + marked_nodes.insert(item.second); + } + for (size_t i = 0; i < input_vars.size(); ++i) { + marked_nodes.erase(input_vars[i]); + } + marked_nodes.erase(concat_out_var); + GraphSafeRemoveNodes(graph, marked_nodes); + ++fusion_count; + }; + + gpd(graph, handler); + return fusion_count; +} + +std::unique_ptr SeqPoolConcatFusePass::ApplyImpl( + std::unique_ptr graph) const { + FusePassBase::Init(name_scope_, graph.get()); + int fusion_count = 0; + for (int i = MAX_CONCAT_INPUTS; i > 0; --i) { + fusion_count += + BuildFusion(graph.get(), name_scope_ + "/" + std::to_string(i), i); + } + AddStatis(fusion_count); + + return graph; +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(seqpool_concat_fuse_pass, + paddle::framework::ir::SeqPoolConcatFusePass); diff --git a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..ba2154045e62c687173565c5ad30ea4d45d3c8f4 --- /dev/null +++ b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h @@ -0,0 +1,52 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#pragma once + +#include +#include "paddle/fluid/framework/ir/fuse_pass_base.h" +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" + +namespace paddle { +namespace framework { +namespace ir { + +/** + * Fuse SequencePool(with sum pooltype yet) and Concat; + * + * Before fuse: + * | | | + * seq_pool, seq_pool, ... seq_pool + * \ | ... / + * concat + * | + * After fuse: + * \ | / + * FusionSeqPoolConcat + * | + */ +class SeqPoolConcatFusePass : public FusePassBase { + public: + virtual ~SeqPoolConcatFusePass() {} + + protected: + std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + + const std::string name_scope_{"seqpool_concat_fuse"}; +}; + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass_tester.cc b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..456a03192cc4e4a9d0dbe2dcb649b6c1b4d9cd5a --- /dev/null +++ b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass_tester.cc @@ -0,0 +1,198 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h" +#include +#include "paddle/fluid/framework/op_proto_maker.h" + +namespace paddle { +namespace framework { +namespace ir { + +void SetOp(ProgramDesc* prog, const std::string& type, + const std::vector& inputs, + const std::vector& outputs) { + auto* op = prog->MutableBlock(0)->AppendOp(); + op->SetType(type); + if (type == "sequence_pool") { + op->SetInput("X", {inputs[0]}); + std::string pooltype = "SUM"; + op->SetAttr("pooltype", pooltype); + op->SetOutput("MaxIndex", {outputs[0]}); + op->SetOutput("Out", {outputs[1]}); + } else if (type == "concat") { + op->SetInput("X", inputs); + op->SetAttr("axis", 1); + op->SetOutput("Out", {outputs[0]}); + } else { + op->SetInput("X", inputs); + op->SetOutput("Out", outputs); + } + op->SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(), + static_cast(OpRole::kForward)); +} + +int CountOpType(const ir::Graph* graph, + const std::string& op_type = "fusion_seqpool_concat") { + int count = 0; + for (auto* node : graph->Nodes()) { + if (node->IsOp() && node->Op()->Type() == op_type) { + ++count; + } + } + return count; +} + +std::unique_ptr GetNumNodesOfBeforeAfter( + std::unique_ptr graph, int* before, int* after, + const std::string& pass_type = "seqpool_concat_fuse_pass") { + auto pass = PassRegistry::Instance().Get(pass_type); + *before = graph->Nodes().size(); + graph = pass->Apply(std::move(graph)); + *after = graph->Nodes().size(); + return graph; +} + +/* + * Before fuse: + * a b c + * | | | + * op1 op2 op3 + * / \ / \ / \ + * d e f g h i + * \ | / + * concat + * | + * j + * Type of op1, op2 and op3 are sequence_pool, with "SUM" pooltype attr + * + * After fuse: + * a b c + * \ | / + * fusion_seqpool_concat + * | + * j + */ +TEST(SeqPoolConcatFusePass, basic) { + ProgramDesc prog; + for (auto& v : std::vector( + {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"})) { + auto* var = prog.MutableBlock(0)->Var(v); + var->SetType(proto::VarType::LOD_TENSOR); + } + + SetOp(&prog, "sequence_pool", std::vector({"a"}), + std::vector({"d", "e"})); + SetOp(&prog, "sequence_pool", std::vector({"b"}), + std::vector({"f", "g"})); + SetOp(&prog, "sequence_pool", std::vector({"c"}), + std::vector({"h", "i"})); + SetOp(&prog, "concat", std::vector({"e", "g", "i"}), + std::vector({"j"})); + + std::unique_ptr graph(new ir::Graph(prog)); + int before, after; + graph = GetNumNodesOfBeforeAfter(std::move(graph), &before, &after); + // Remove 10 Nodes: op1, op2, op3, d, e, f, g, h, i, concat_op + // Add 1 Node: fusion_seqpool_concat + EXPECT_EQ(after, before - 9); + EXPECT_EQ(CountOpType(graph.get()), 1); +} + +/* + * Before fuse: + * a b + * | / \ + * op1 op2 op3 + * / \ / \ \ + * c d e f g + * \ / + * concat + * | + * h + * Type of op1 and op2 are sequence_pool, with "SUM" pooltype attr + * + * After fuse: + * a b + * \ / \ + * fusion_seqpool_concat op3 + * | | + * h g + */ +TEST(SeqPoolConcatFusePass, advanced) { + ProgramDesc prog; + for (auto& v : + std::vector({"a", "b", "c", "d", "e", "f", "g", "h"})) { + auto* var = prog.MutableBlock(0)->Var(v); + var->SetType(proto::VarType::LOD_TENSOR); + } + + SetOp(&prog, "sequence_pool", std::vector({"a"}), + std::vector({"c", "d"})); + SetOp(&prog, "sequence_pool", std::vector({"b"}), + std::vector({"e", "f"})); + SetOp(&prog, "op3", std::vector({"b"}), + std::vector({"g"})); + SetOp(&prog, "concat", std::vector({"d", "f"}), + std::vector({"h"})); + + std::unique_ptr graph(new ir::Graph(prog)); + int before, after; + graph = GetNumNodesOfBeforeAfter(std::move(graph), &before, &after); + // Remove 7 Nodes: op1, op2, c, d, e, f concat_op + // Add 1 Node: fusion_seqpool_concat + EXPECT_EQ(after, before - 6); + EXPECT_EQ(CountOpType(graph.get()), 1); +} + +ProgramDesc BuildProgramDesc(int num_inputs_of_concat) { + ProgramDesc prog; + auto new_var = [&](const std::string& name) { + auto* var = prog.MutableBlock(0)->Var(name); + var->SetType(proto::VarType::LOD_TENSOR); + }; + std::vector concat_inputs; + for (int i = 0; i < num_inputs_of_concat; ++i) { + std::string prefix = "seqpool_op_" + i; + new_var(prefix + "in"); + new_var(prefix + "out"); + new_var(prefix + "out_unused"); + SetOp(&prog, "sequence_pool", std::vector({prefix + "in"}), + std::vector({prefix + "out", prefix + "out_unused"})); + concat_inputs.push_back(prefix + "out"); + } + SetOp(&prog, "concat", concat_inputs, + std::vector({"concat_out"})); + return prog; +} + +// test more inputs of concat +TEST(SeqPoolConcatFusePass, more_inputs) { + for (int num : {1, 2, 10}) { + ProgramDesc prog = BuildProgramDesc(num); + std::unique_ptr graph(new ir::Graph(prog)); + int before, after; + graph = GetNumNodesOfBeforeAfter(std::move(graph), &before, &after); + // Remove Nodes: n * (seqpool_op, out, out_unused), and concat_op + // Add Node: fusion_seqpool_concat op + EXPECT_EQ(after, before - num * 3); + EXPECT_EQ(CountOpType(graph.get()), 1); + } +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +USE_PASS(seqpool_concat_fuse_pass); diff --git a/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..fda43948d567689103815e3ad7ba285719dae80f --- /dev/null +++ b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc @@ -0,0 +1,148 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "paddle/fluid/framework/ir/graph_viz_pass.h" +#include "paddle/fluid/framework/ir/node.h" +#include "paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.h" + +namespace paddle { +namespace framework { +namespace ir { + +template +std::unique_ptr TransposeFlattenConcatFusePass::ApplyImpl( + std::unique_ptr graph) const { + const std::string pattern_name = + "transpose_flatten" + std::to_string(times) + "_concat_fuse"; + FusePassBase::Init(pattern_name, graph.get()); + + GraphPatternDetector gpd; + std::vector input_nodes; + for (int i = 0; i < times; i++) { + input_nodes.push_back(gpd.mutable_pattern() + ->NewNode("x" + std::to_string(i)) + ->assert_is_op_input("transpose2", "X") + ->AsInput()); + } + + patterns::TransposeFlattenConcat pattern(gpd.mutable_pattern(), pattern_name); + pattern(input_nodes, times); + + auto handler = [&](const GraphPatternDetector::subgraph_t &subgraph, + Graph *g) { + const int kNumFields = 5; + const int kTransOffset = 1; + const int kTransOutOffset = 2; + const int kFlattenOffset = 3; + const int kFlattenOutOffset = 4; + std::vector nodes; + + for (int i = 0; i < times; i++) { + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("transpose" + std::to_string(i)))); + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("transpose_out" + std::to_string(i)))); + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("flatten" + std::to_string(i)))); + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("flatten_out" + std::to_string(i)))); + PADDLE_ENFORCE(subgraph.at(input_nodes[i])); + + nodes.push_back(subgraph.at(input_nodes[i])); + nodes.push_back( + subgraph.at(pattern.GetPDNode("transpose" + std::to_string(i)))); + nodes.push_back( + subgraph.at(pattern.GetPDNode("transpose_out" + std::to_string(i)))); + nodes.push_back( + subgraph.at(pattern.GetPDNode("flatten" + std::to_string(i)))); + nodes.push_back( + subgraph.at(pattern.GetPDNode("flatten_out" + std::to_string(i)))); + } + + Node *concat_op = subgraph.at(pattern.GetPDNode("concat")); + Node *concat_out = subgraph.at(pattern.GetPDNode("concat_out")); + std::vector input_names; + std::vector trans_axis = boost::get>( + nodes[kTransOffset]->Op()->GetAttr("axis")); + int flatten_axis = + boost::get(nodes[kFlattenOffset]->Op()->GetAttr("axis")); + int concat_axis = boost::get(concat_op->Op()->GetAttr("axis")); + std::string output_name = concat_out->Name(); + + for (int i = 0; i < times; i++) { + input_names.push_back(nodes[i * kNumFields]->Name()); + } + + framework::OpDesc new_op_desc; + new_op_desc.SetType("fusion_transpose_flatten_concat"); + new_op_desc.SetInput("X", input_names); + new_op_desc.SetAttr("trans_axis", trans_axis); + new_op_desc.SetAttr("flatten_axis", flatten_axis); + new_op_desc.SetAttr("concat_axis", concat_axis); + new_op_desc.SetOutput("Out", {output_name}); + new_op_desc.Flush(); + + // Create a new node for the fused op. + auto *new_conv_op = graph->CreateOpNode(&new_op_desc); + + std::unordered_set delete_nodes; + + for (int i = 0; i < times; i++) { + nodes[i * kNumFields]->outputs.push_back(new_conv_op); + new_conv_op->inputs.push_back(nodes[i * kNumFields]); + delete_nodes.insert(nodes[i * kNumFields + kTransOffset]); + delete_nodes.insert(nodes[i * kNumFields + kTransOutOffset]); + delete_nodes.insert(nodes[i * kNumFields + kFlattenOffset]); + delete_nodes.insert(nodes[i * kNumFields + kFlattenOutOffset]); + } + delete_nodes.insert(concat_op); + + new_conv_op->outputs.push_back(concat_out); + concat_out->inputs.push_back(new_conv_op); + + // Delete the unneeded nodes. + GraphSafeRemoveNodes(graph.get(), delete_nodes); + }; + + gpd(graph.get(), handler); + return graph; +} + +template class TransposeFlattenConcatFusePass<1>; +template class TransposeFlattenConcatFusePass<3>; +template class TransposeFlattenConcatFusePass<4>; +template class TransposeFlattenConcatFusePass<5>; +template class TransposeFlattenConcatFusePass<6>; + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(transpose_flatten_concat_fuse_pass, + paddle::framework::ir::TransposeFlattenConcatFusePass<1>); + +REGISTER_PASS(transpose_flatten3_concat_fuse_pass, + paddle::framework::ir::TransposeFlattenConcatFusePass<3>); + +REGISTER_PASS(transpose_flatten4_concat_fuse_pass, + paddle::framework::ir::TransposeFlattenConcatFusePass<4>); + +REGISTER_PASS(transpose_flatten5_concat_fuse_pass, + paddle::framework::ir::TransposeFlattenConcatFusePass<5>); + +REGISTER_PASS(transpose_flatten6_concat_fuse_pass, + paddle::framework::ir::TransposeFlattenConcatFusePass<6>); diff --git a/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.h b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..fb0f0ae9efdc5a25a799d6123fa658a99860cd86 --- /dev/null +++ b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.h @@ -0,0 +1,38 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/framework/ir/fuse_pass_base.h" +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" + +namespace paddle { +namespace framework { +namespace ir { + +// There may be many transpose-flatten structures in a model, and the output of +// these structures will be used as inputs to the concat Op. This pattern will +// be detected by our pass. The times here represents the repeat times of this +// structure. +template +class TransposeFlattenConcatFusePass : public FusePassBase { + public: + virtual ~TransposeFlattenConcatFusePass() {} + + protected: + std::unique_ptr ApplyImpl(std::unique_ptr graph) const; +}; + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/op_proto_maker.cc b/paddle/fluid/framework/op_proto_maker.cc index ca31303f77c4a30eb64c43404e214779ea78aeaf..2311614c335a56501ac777d787f6653659294765 100644 --- a/paddle/fluid/framework/op_proto_maker.cc +++ b/paddle/fluid/framework/op_proto_maker.cc @@ -82,6 +82,10 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto, AddAttr(OpNamescopeAttrName(), "Operator name with namesope.") .SetDefault(""); + AddAttr>(OpCreationCallstackAttrName(), + "Callstack for Op Creatation.") + .SetDefault({}); + Validate(); } diff --git a/paddle/fluid/framework/op_proto_maker.h b/paddle/fluid/framework/op_proto_maker.h index 4c59c73d8779eceb267ad532aabccabbd54b0df2..0a0f8f4655bc34cdb25205ff6eaec9f96c801ebd 100644 --- a/paddle/fluid/framework/op_proto_maker.h +++ b/paddle/fluid/framework/op_proto_maker.h @@ -47,6 +47,7 @@ class OpProtoAndCheckerMaker { static const char *OpRoleAttrName() { return "op_role"; } static const char *OpRoleVarAttrName() { return "op_role_var"; } static const char *OpNamescopeAttrName() { return "op_namescope"; } + static const char *OpCreationCallstackAttrName() { return "op_callstack"; } void operator()(proto::OpProto *proto, OpAttrChecker *attr_checker); diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index f10da22aec1fc03b769e61ac1be750e7df69127d..6e71339e5a1733aa35cbf5c2ee79fd7c61f8a9e8 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -16,9 +16,15 @@ limitations under the License. */ #include #include +#include +#include +#include +#include "gflags/gflags.h" +#include "glog/logging.h" #include "paddle/fluid/framework/data_transform.h" #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_proto_maker.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/shape_inference.h" #include "paddle/fluid/framework/transfer_scope_cache.h" @@ -156,27 +162,55 @@ RuntimeContext::RuntimeContext(const VariableNameMap& innames, } void OperatorBase::Run(const Scope& scope, const platform::Place& place) { - VLOG(4) << place << " " << DebugStringEx(&scope); - if (platform::is_gpu_place(place)) { + try { + VLOG(4) << place << " " << DebugStringEx(&scope); + if (platform::is_gpu_place(place)) { #ifndef PADDLE_WITH_CUDA - PADDLE_THROW("Cannot run operator on place %s", place); + PADDLE_THROW("Cannot run operator on place %s", place); #else - auto dev_id = boost::get(place).device; - platform::SetDeviceId(dev_id); + auto dev_id = boost::get(place).device; + platform::SetDeviceId(dev_id); #endif - } + } - // The profile has a process-wide mutex, results in serious performance issue - // in concurrency scenerio. Here use an `if` to fix this issue. - // Please not remove the `if`, ask @Superjomn if there are any concern. - if (platform::IsProfileEnabled()) { - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - platform::RecordEvent record_event(Type(), pool.Get(place)); - RunImpl(scope, place); - } else { - RunImpl(scope, place); + // The profile has a process-wide mutex, results in serious performance + // issue + // in concurrency scenerio. Here use an `if` to fix this issue. + // Please not remove the `if`, ask @Superjomn if there are any concern. + if (platform::IsProfileEnabled()) { + platform::DeviceContextPool& pool = + platform::DeviceContextPool::Instance(); + platform::RecordEvent record_event(Type(), pool.Get(place)); + RunImpl(scope, place); + } else { + RunImpl(scope, place); + } + + VLOG(3) << place << " " << DebugStringEx(&scope); + } catch (platform::EnforceNotMet exception) { + if (Attrs().count("sub_block") != 0) { + throw exception; + } + + auto& callstack = Attr>( + OpProtoAndCheckerMaker::OpCreationCallstackAttrName()); + + if (callstack.empty()) { + throw exception; + } + std::ostringstream sout; + sout << "Invoke operator " << Type() << " error.\n"; + sout << "Python Callstacks: \n"; + for (auto& line : callstack) { + sout << line; + } + sout << "C++ Callstacks: \n"; + sout << exception.err_str_; + exception.err_str_ = sout.str(); + throw exception; + } catch (...) { + std::rethrow_exception(std::current_exception()); } - VLOG(3) << place << " " << DebugStringEx(&scope); } bool OperatorBase::HasInputs(const std::string& name) const { diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 4d29564aeed74558b7f0ec580568f70dad0b40cc..041187665af6ad0d75a7c55fe6ed451fe6f45b73 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -391,7 +391,7 @@ class ExecutionContext { PADDLE_ENFORCE( dynamic_cast(allocation_ptr) != nullptr, "The AllocationPtr must be TemporaryAllocation."); - PADDLE_ENFORCE_EQ(allocation_ptr->size(), + PADDLE_ENFORCE_GE(allocation_ptr->size(), framework::product(dim) * sizeof(T)); paddle::framework::Tensor temp_tensor( diff --git a/paddle/fluid/framework/python_headers.h b/paddle/fluid/framework/python_headers.h new file mode 100644 index 0000000000000000000000000000000000000000..422af19a13683dc9ae6377cac1b1ab2c2ac8f96b --- /dev/null +++ b/paddle/fluid/framework/python_headers.h @@ -0,0 +1,26 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +// workaround for Python 2 issue: https://bugs.python.org/issue17120 +#pragma push_macro("_XOPEN_SOURCE") +#pragma push_macro("_POSIX_C_SOURCE") +#undef _XOPEN_SOURCE +#undef _POSIX_C_SOURCE + +#include "pybind11/pybind11.h" + +#pragma pop_macro("_XOPEN_SOURCE") +#pragma pop_macro("_POSIX_C_SOURCE") diff --git a/paddle/fluid/imperative/CMakeLists.txt b/paddle/fluid/imperative/CMakeLists.txt index 373d292b443b7651b785a52a6986b0a0be58ad61..a730b84a916ea2c3e17dd4becaf939cc28160457 100644 --- a/paddle/fluid/imperative/CMakeLists.txt +++ b/paddle/fluid/imperative/CMakeLists.txt @@ -1,3 +1,5 @@ +if(WITH_PYTHON) cc_library(layer SRCS layer.cc DEPS proto_desc operator) cc_library(tracer SRCS tracer.cc DEPS proto_desc) cc_library(engine SRCS engine.cc) +endif() diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 9813149865489f7c10c0f4942172d46e1bccf81f..7594670cd2608802bdf41682ef5724a7a965d754 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -27,6 +27,8 @@ namespace paddle { namespace imperative { +std::map py_funcs_; + using framework::Variable; void AddTo(Variable* src, Variable* dst) { @@ -42,7 +44,7 @@ void AddTo(Variable* src, Variable* dst) { src_tensor->numel()); float* dst_data = dst_tensor->mutable_data(platform::CPUPlace()); const float* src_data = src_tensor->data(); - for (size_t i = 0; i < src_tensor->numel(); ++i) { + for (int64_t i = 0; i < src_tensor->numel(); ++i) { dst_data[i] += src_data[i]; } } @@ -55,6 +57,7 @@ class Autograd { if (var->stop_gradient_) { return; } + VLOG(3) << "start autograd"; std::deque ready; ready.push_back(var->pre_op_); @@ -114,57 +117,63 @@ class Autograd { } }; -framework::LoDTensor& VarBase::Grad() { +framework::LoDTensor& VarBase::GradValue() { VLOG(3) << "get var grad " << var_desc_->Name(); - return *grads_->GetMutable(); + return *(grads_->var_->GetMutable()); } std::map> OpBase::ApplyGrad() { - if (!grad_op_desc_) { + if (!grad_op_desc_ && backward_id_ <= 0) { LOG(WARNING) << "op with no grad: " << op_desc_->Type(); return {}; } - VLOG(3) << "op grad " << grad_op_desc_->Type(); - std::vector> tmp_vars; std::map> grad_outputs; - for (auto it : grad_output_vars_) { - auto& outputs = grad_outputs[it.first]; - for (size_t i = 0; i < it.second.size(); ++i) { - // Allocate a new variable - Variable* tmp_var = new framework::Variable(); - tmp_var->GetMutable(); - - tmp_vars.emplace_back(tmp_var); - outputs.push_back(tmp_var); + if (backward_id_ > 0) { + VLOG(3) << "py_layer_grad"; + grad_outputs["Out@GRAD"] = + PyLayer::ApplyGrad(backward_id_, grad_input_vars_["X@GRAD"]); + } else { + VLOG(3) << "op grad " << grad_op_desc_->Type(); + for (auto it : grad_output_vars_) { + auto& outputs = grad_outputs[it.first]; + for (size_t i = 0; i < it.second.size(); ++i) { + // Allocate a new variable + Variable* tmp_var = new framework::Variable(); + tmp_var->GetMutable(); + outputs.push_back(tmp_var); + } } - } - framework::RuntimeContext ctx(grad_input_vars_, grad_outputs); + framework::RuntimeContext ctx(grad_input_vars_, grad_outputs); - // No need to do compile time infer shape here. - // grad_op_desc_->InferShape(*block_); - grad_op_desc_->InferVarType(block_); + // No need to do compile time infer shape here. + // grad_op_desc_->InferShape(*block_); + grad_op_desc_->InferVarType(block_); - std::unique_ptr opbase = - framework::OpRegistry::CreateOp(*grad_op_desc_); - framework::OperatorWithKernel* op_kernel = - dynamic_cast(opbase.get()); - PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel"); + std::unique_ptr opbase = + framework::OpRegistry::CreateOp(*grad_op_desc_); + framework::OperatorWithKernel* op_kernel = + dynamic_cast(opbase.get()); + PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel"); - framework::Scope scope; - platform::CPUPlace place; - PreparedOp p = PreparedOp::Prepare(ctx, *op_kernel, place); - p.op.RuntimeInferShape(scope, place, ctx); - p.func(framework::ExecutionContext(p.op, scope, *p.dev_ctx, p.ctx)); + framework::Scope scope; + platform::CPUPlace place; + PreparedOp p = PreparedOp::Prepare(ctx, *op_kernel, place); + p.op.RuntimeInferShape(scope, place, ctx); + p.func(framework::ExecutionContext(p.op, scope, *p.dev_ctx, p.ctx)); + } for (auto it : grad_output_vars_) { auto& outputs = grad_outputs[it.first]; auto& origin_outputs = it.second; + PADDLE_ENFORCE_EQ(outputs.size(), origin_outputs.size()); for (size_t i = 0; i < outputs.size(); ++i) { + framework::Variable* grad = outputs[i]; framework::Variable* orig_grad = origin_outputs[i]; - AddTo(outputs[i], orig_grad); + AddTo(grad, orig_grad); + delete grad; } } return input_vars_; @@ -173,7 +182,8 @@ std::map> OpBase::ApplyGrad() { void VarBase::RunBackward() { if (!pre_op_) return; - auto grads_t = grads_->GetMutable(); + VLOG(3) << "start backward"; + auto grads_t = grads_->var_->GetMutable(); float* data = grads_t->mutable_data(platform::CPUPlace()); std::fill(data, data + grads_t->numel(), 1.0); @@ -183,5 +193,65 @@ void VarBase::RunBackward() { Autograd().RunBackward(this); } +void PyLayer::RegisterFunc(int func_id, const py::object& py_func) { + py_funcs_[func_id] = py_func; +} + +int PyLayer::NumFuncs() { return py_funcs_.size(); } + +std::vector PyLayer::Apply(int func_id, + const std::vector& inputs) { + std::vector invars; + for (const VarBase* in : inputs) { + invars.push_back(in->var_); + } + PADDLE_ENFORCE(py_funcs_.find(func_id) != py_funcs_.end()); + std::vector outvars = CallPythonFunc(py_funcs_[func_id], invars); + std::vector ret; + for (Variable* v : outvars) { + ret.push_back(new VarBase(v, new VarBase(true))); + } + return ret; +} + +std::vector PyLayer::ApplyGrad( + int func_id, const std::vector& inputs) { + PADDLE_ENFORCE(py_funcs_.find(func_id) != py_funcs_.end()); + return CallPythonFunc(py_funcs_[func_id], inputs); +} + +std::vector PyLayer::CallPythonFunc( + const py::object& callable, const std::vector& ins) { + py::gil_scoped_acquire guard; + py::tuple in_args(ins.size()); + for (size_t i = 0; i < ins.size(); ++i) { + const framework::LoDTensor& t = ins[i]->Get(); + in_args[i] = t.IsInitialized() ? py::cast(t) : py::cast(nullptr); + } + VLOG(3) << "pyfunc in " << py::len(in_args); + + // TODO(panyx0718): Who owns the returned LoDTensor. + auto ret = callable(in_args); + auto ret_tuple = py::cast(ret); + size_t ret_num = py::len(ret_tuple); + std::vector outs; + VLOG(3) << "pyfunc out " << ret_num; + for (size_t i = 0; i < ret_num; ++i) { + try { + auto* py_out_tensor = py::cast(ret_tuple[i]); + PADDLE_ENFORCE_NOT_NULL(py_out_tensor, + "Output tensor %d should not be nullptr", i); + auto* var = new framework::Variable(); + auto* tensor = var->GetMutable(); + tensor->ShareDataWith(*py_out_tensor); + tensor->set_lod(py_out_tensor->lod()); + outs.push_back(var); + } catch (py::cast_error&) { + PADDLE_THROW("The %d-th output must be LoDTensor", i); + } + } + return outs; +} + } // namespace imperative } // namespace paddle diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index 34cffd1aa3f8c3df9d4f67db37cbe0985862118f..daf56a521085b63926194b958094a7d170873830 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -14,17 +14,26 @@ #pragma once -#include -#include -#include +// clang-format off +#include "paddle/fluid/framework/python_headers.h" +// clang-format on + +#include // NOLINT +#include // NOLINT +#include // NOLINT + #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/var_desc.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/imperative/type_defs.h" + namespace paddle { namespace imperative { +namespace py = ::pybind11; + class PreparedOp { public: PreparedOp(const framework::OperatorBase& op, @@ -80,29 +89,47 @@ class PreparedOp { class OpBase; +/* The wrapper for Variable which holds a Variable and a VarBase of its + * gradient. This object should be managed totally by Python intepreter. + * + * Nearly all interface should be implemented in C++. + */ class VarBase { public: - VarBase() + VarBase() : VarBase(new framework::Variable(), new VarBase(true)) {} + + // Owns `var` and `grad` + VarBase(framework::Variable* var, VarBase* grad) : pre_op_(nullptr), + pre_op_out_name_(), pre_op_out_idx_(-1), var_desc_(nullptr), - var_(new framework::Variable()), - grads_(new framework::Variable()), + var_(var), + grads_(grad), stop_gradient_(false) {} explicit VarBase(bool stop_gradient) : pre_op_(nullptr), + pre_op_out_name_(), pre_op_out_idx_(-1), var_desc_(nullptr), var_(new framework::Variable()), - grads_(new framework::Variable()), + grads_(stop_gradient ? nullptr : new VarBase(true)), stop_gradient_(stop_gradient) {} - virtual ~VarBase() {} + virtual ~VarBase() { + if (var_) { + delete var_; + } + + if (grads_) { + delete grads_; + } + } void RunBackward(); - framework::LoDTensor& Grad(); + framework::LoDTensor& GradValue(); inline std::string GradName() const { PADDLE_ENFORCE( @@ -116,15 +143,23 @@ class VarBase { int pre_op_out_idx_; framework::VarDesc* var_desc_; + framework::Variable* var_; - framework::Variable* grads_; + VarBase* grads_; bool stop_gradient_; }; +/* The wrapper for OpDesc which holds a OpDesc and a OpDesc of its + * gradient. This object should be managed totally by Python intepreter. + */ class OpBase { public: - OpBase() : op_desc_(nullptr), grad_op_desc_(nullptr) {} + OpBase() + : op_desc_(nullptr), + forward_id_(-1), + grad_op_desc_(nullptr), + backward_id_(-1) {} virtual ~OpBase() { if (grad_op_desc_) delete grad_op_desc_; @@ -132,16 +167,22 @@ class OpBase { std::map> ApplyGrad(); + // One of `op_desc_` or `forward_id_` is set, not both. + // For pure python PyLayer, use `forward_id_`, otherwise, use op_desc_. framework::OpDesc* op_desc_; + int forward_id_; + // When has backward, one of `grad_op_desc_` or `backward_id_` is set, + // not both. framework::OpDesc* grad_op_desc_; + int backward_id_; - std::map> input_vars_; - std::map> output_vars_; - std::map> pre_ops_; + VarBasePtrMap input_vars_; + VarBasePtrMap output_vars_; + OpBasePtrMap pre_ops_; std::map> pre_ops_out_idx_; - std::map> grad_input_vars_; - std::map> grad_output_vars_; + framework::VariableValueMap grad_input_vars_; + framework::VariableValueMap grad_output_vars_; framework::BlockDesc* block_; }; @@ -153,8 +194,25 @@ class Layer { std::vector vars; return vars; } +}; + +class PyLayer { + public: + virtual ~PyLayer() {} + + static void RegisterFunc(int func_id, const py::object& py_func); + + static int NumFuncs(); + + static std::vector Apply(int func_id, + const std::vector& inputs); + + static std::vector ApplyGrad( + int func_id, const std::vector& inputs); - virtual void Backward() { LOG(ERROR) << "To support customize"; } + private: + static std::vector CallPythonFunc( + const py::object& callable, const std::vector& ins); }; } // namespace imperative diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index f64f9e72c4a23528948183b909d65e90783a4463..a01225ccee4a82f77ec2a23df75d1cf7b719bdb7 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -15,5 +15,199 @@ #include "paddle/fluid/imperative/tracer.h" namespace paddle { -namespace imperative {} // namespace imperative +namespace imperative { + +void CreateGradOp(const framework::OpDesc& op_desc, + const std::unordered_set& no_grad_set, + const std::vector& grad_sub_block, + framework::OpDesc** grad_op_desc, + std::unordered_map* grad_to_var) { + std::vector> grad_op_descs = + framework::OpInfoMap::Instance() + .Get(op_desc.Type()) + .GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block); + PADDLE_ENFORCE(grad_op_descs.size() == 1, "Only support 1 grad op now."); + // TODO(panyx0718): Leak? + *grad_op_desc = grad_op_descs[0].release(); +} + +void InitVar(framework::Variable* var, framework::Variable* grad_var) { + auto& var_t = var->Get(); + float* data = + grad_var->GetMutable()->mutable_data( + var_t.dims(), platform::CPUPlace()); + std::fill(data, data + var_t.numel(), 0.0); +} + +void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, + const VarBasePtrMap& outputs, framework::BlockDesc* block, + const bool stop_gradient) { + std::map vars; + + framework::OpDesc* op_desc = op->op_desc_; + VLOG(3) << "tracer tracing " << op_desc->Type(); + op_desc->InferShape(*block); + op_desc->InferVarType(block); + std::unique_ptr op_base = + framework::OpRegistry::CreateOp(*op_desc); + + framework::VariableValueMap invars_map; + framework::VariableValueMap outvars_map; + + op->input_vars_ = inputs; + for (auto it : op->input_vars_) { + auto& invars = invars_map[it.first]; + for (VarBase* inp : it.second) { + PADDLE_ENFORCE_NOT_NULL(inp->var_, "op %s input %s nullptr", + op->op_desc_->Type(), inp->var_desc_->Name()); + + invars.push_back(inp->var_); + vars[inp->var_desc_->Name()] = inp; + if (inp->pre_op_) { + op->pre_ops_[it.first].push_back(inp->pre_op_); + op->pre_ops_out_idx_[it.first].push_back(inp->pre_op_out_idx_); + } else { + op->pre_ops_[it.first].push_back(nullptr); + } + VLOG(3) << "input vname " << inp->var_desc_->Name() << " " + << inp->var_->IsInitialized(); + } + } + + op->output_vars_ = outputs; + for (auto it : op->output_vars_) { + auto& outvars = outvars_map[it.first]; + const std::vector& outputs = it.second; + for (size_t i = 0; i < outputs.size(); ++i) { + VarBase* out = outputs[i]; + outvars.push_back(out->var_); + vars[out->var_desc_->Name()] = out; + + framework::VarDesc* var_desc = block->FindVar(out->var_desc_->Name()); + if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { + out->var_->GetMutable(); + } else { + LOG(ERROR) << "tracer doesn't support yet"; + } + out->stop_gradient_ = stop_gradient; + out->pre_op_ = op; + out->pre_op_out_name_ = it.first; + out->pre_op_out_idx_ = i; + + VLOG(3) << "output vname " << out->var_desc_->Name() << " " + << out->var_->IsInitialized(); + } + } + + VLOG(3) << "tracer running " << op_desc->Type(); + framework::RuntimeContext ctx(invars_map, outvars_map); + + // TODO(panyx0718): Cache p. + framework::OperatorWithKernel* op_kernel = + dynamic_cast(op_base.get()); + PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel"); + + framework::Scope scope; + platform::CPUPlace place; + PreparedOp p = PreparedOp::Prepare(ctx, *op_kernel, place); + p.op.RuntimeInferShape(scope, place, ctx); + p.func(framework::ExecutionContext(p.op, scope, *p.dev_ctx, p.ctx)); + + if (!stop_gradient) { + framework::OpDesc* grad_op_desc; + // TODO(panyx): Is this leaked? + std::unique_ptr> grad_to_var( + new std::unordered_map()); + CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var.get()); + op->grad_op_desc_ = grad_op_desc; + + for (auto it : grad_op_desc->Inputs()) { + auto& grad_in_vars = op->grad_input_vars_[it.first]; + for (const std::string& grad_invar : it.second) { + block->FindRecursiveOrCreateVar(grad_invar); + auto var_it = grad_to_var->find(grad_invar); + if (var_it == grad_to_var->end()) { + auto fwd_var_it = vars.find(grad_invar); + PADDLE_ENFORCE(fwd_var_it != vars.end()); + // Forward inputs or outputs. + grad_in_vars.push_back(fwd_var_it->second->var_); + } else { + VarBase* var = vars[var_it->second]; + if (!var->grads_->var_->IsInitialized()) { + InitVar(var->var_, var->grads_->var_); + } + // Douts. + grad_in_vars.push_back(var->grads_->var_); + } + } + } + + for (auto it : grad_op_desc->Outputs()) { + auto& grad_out_vars = op->grad_output_vars_[it.first]; + for (const std::string& grad_outvar : it.second) { + block->FindRecursiveOrCreateVar(grad_outvar); + auto var_it = grad_to_var->find(grad_outvar); + PADDLE_ENFORCE(var_it != grad_to_var->end()); + VarBase* var = vars[var_it->second]; + if (!var->grads_->var_->IsInitialized()) { + InitVar(var->var_, var->grads_->var_); + } + grad_out_vars.push_back(var->grads_->var_); + } + } + } + + op->block_ = block; +} + +std::vector Tracer::PyTrace(OpBase* op, + const std::vector& inputs, + bool stop_gradient) { + VLOG(3) << "py_trace"; + op->input_vars_["X"] = inputs; + op->output_vars_["Out"] = PyLayer::Apply(op->forward_id_, inputs); + for (VarBase* inp : inputs) { + if (inp->pre_op_) { + op->pre_ops_["X"].push_back(inp->pre_op_); + op->pre_ops_out_idx_["X"].push_back(inp->pre_op_out_idx_); + } else { + op->pre_ops_["X"].push_back(nullptr); + } + } + + auto& outputs = op->output_vars_["Out"]; + for (size_t i = 0; i < outputs.size(); ++i) { + VarBase* out = outputs[i]; + out->stop_gradient_ = stop_gradient; + out->pre_op_ = op; + out->pre_op_out_name_ = "Out"; + out->pre_op_out_idx_ = i; + } + if (!stop_gradient) { + auto& grad_input_vars = op->grad_input_vars_["X@GRAD"]; + auto& grad_output_vars = op->grad_output_vars_["Out@GRAD"]; + + for (const VarBase* inp : inputs) { + grad_input_vars.push_back(inp->var_); + } + for (VarBase* out : outputs) { + grad_input_vars.push_back(out->var_); + } + for (VarBase* out : outputs) { + grad_input_vars.push_back(out->grads_->var_); + if (!grad_input_vars.back()->IsInitialized()) { + InitVar(out->var_, grad_input_vars.back()); + } + } + for (const VarBase* inp : inputs) { + grad_output_vars.push_back(inp->grads_->var_); + if (!grad_output_vars.back()->IsInitialized()) { + InitVar(inp->var_, grad_output_vars.back()); + } + } + } + return outputs; +} + +} // namespace imperative } // namespace paddle diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h index c6eff86fac7be04821e1f8718ad06356fd6adb48..f225d8abe6c0635d2bdd8dba0b12c7fc3a4110db 100644 --- a/paddle/fluid/imperative/tracer.h +++ b/paddle/fluid/imperative/tracer.h @@ -30,23 +30,9 @@ void CreateGradOp(const framework::OpDesc& op_desc, const std::unordered_set& no_grad_set, const std::vector& grad_sub_block, framework::OpDesc** grad_op_desc, - std::unordered_map* grad_to_var) { - std::vector> grad_op_descs = - framework::OpInfoMap::Instance() - .Get(op_desc.Type()) - .GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block); - PADDLE_ENFORCE(grad_op_descs.size() == 1, "Only support 1 grad op now."); - // TODO(panyx0718): Leak? - *grad_op_desc = grad_op_descs[0].release(); -} + std::unordered_map* grad_to_var); -void InitVar(framework::Variable* var, framework::Variable* grad_var) { - auto& var_t = var->Get(); - float* data = - grad_var->GetMutable()->mutable_data( - var_t.dims(), platform::CPUPlace()); - std::fill(data, data + var_t.numel(), 0.0); -} +void InitVar(framework::Variable* var, framework::Variable* grad_var); class Tracer { public: @@ -57,120 +43,10 @@ class Tracer { void Trace(OpBase* op, const std::map>& inputs, const std::map>& outputs, - framework::BlockDesc* block, const bool stop_gradient = false) { - std::map vars; + framework::BlockDesc* block, const bool stop_gradient = false); - framework::OpDesc* op_desc = op->op_desc_; - VLOG(3) << "tracer tracing " << op_desc->Type(); - op_desc->InferShape(*block); - op_desc->InferVarType(block); - std::unique_ptr op_base = - framework::OpRegistry::CreateOp(*op_desc); - - framework::VariableValueMap invars_map; - framework::VariableValueMap outvars_map; - - op->input_vars_ = inputs; - for (auto it : op->input_vars_) { - auto& invars = invars_map[it.first]; - for (VarBase* inp : it.second) { - PADDLE_ENFORCE_NOT_NULL(inp->var_, "op %s input %s nullptr", - op->op_desc_->Type(), inp->var_desc_->Name()); - - invars.push_back(inp->var_); - vars[inp->var_desc_->Name()] = inp; - if (inp->pre_op_) { - op->pre_ops_[it.first].push_back(inp->pre_op_); - op->pre_ops_out_idx_[it.first].push_back(inp->pre_op_out_idx_); - } else { - op->pre_ops_[it.first].push_back(nullptr); - } - VLOG(3) << "input vname " << inp->var_desc_->Name() << " " - << inp->var_->IsInitialized(); - } - } - - op->output_vars_ = outputs; - for (auto it : op->output_vars_) { - auto& outvars = outvars_map[it.first]; - const std::vector& outputs = it.second; - for (size_t i = 0; i < outputs.size(); ++i) { - VarBase* out = outputs[i]; - outvars.push_back(out->var_); - vars[out->var_desc_->Name()] = out; - - framework::VarDesc* var_desc = block->FindVar(out->var_desc_->Name()); - if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { - out->var_->GetMutable(); - } else { - LOG(ERROR) << "tracer doesn't support yet"; - } - out->stop_gradient_ = stop_gradient; - out->pre_op_ = op; - out->pre_op_out_name_ = it.first; - out->pre_op_out_idx_ = i; - - VLOG(3) << "output vname " << out->var_desc_->Name() << " " - << out->var_->IsInitialized(); - } - } - - VLOG(3) << "tracer running " << op_desc->Type(); - framework::RuntimeContext ctx(invars_map, outvars_map); - - // TODO(panyx0718): Cache p. - framework::OperatorWithKernel* op_kernel = - dynamic_cast(op_base.get()); - PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel"); - - framework::Scope scope; - platform::CPUPlace place; - PreparedOp p = PreparedOp::Prepare(ctx, *op_kernel, place); - p.op.RuntimeInferShape(scope, place, ctx); - p.func(framework::ExecutionContext(p.op, scope, *p.dev_ctx, p.ctx)); - - if (!stop_gradient) { - framework::OpDesc* grad_op_desc; - auto grad_to_var = new std::unordered_map(); - CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var); - op->grad_op_desc_ = grad_op_desc; - - for (auto it : grad_op_desc->Inputs()) { - auto& grad_in_vars = op->grad_input_vars_[it.first]; - for (const std::string& grad_invar : it.second) { - block->FindRecursiveOrCreateVar(grad_invar); - auto var_it = grad_to_var->find(grad_invar); - if (var_it == grad_to_var->end()) { - auto fwd_var_it = vars.find(grad_invar); - PADDLE_ENFORCE(fwd_var_it != vars.end()); - grad_in_vars.push_back(fwd_var_it->second->var_); - } else { - VarBase* var = vars[var_it->second]; - if (!var->grads_->IsInitialized()) { - InitVar(var->var_, var->grads_); - } - grad_in_vars.push_back(var->grads_); - } - } - } - - for (auto it : grad_op_desc->Outputs()) { - auto& grad_out_vars = op->grad_output_vars_[it.first]; - for (const std::string& grad_outvar : it.second) { - block->FindRecursiveOrCreateVar(grad_outvar); - auto var_it = grad_to_var->find(grad_outvar); - PADDLE_ENFORCE(var_it != grad_to_var->end()); - VarBase* var = vars[var_it->second]; - if (!var->grads_->IsInitialized()) { - InitVar(var->var_, var->grads_); - } - grad_out_vars.push_back(var->grads_); - } - } - } - - op->block_ = block; - } + std::vector PyTrace(OpBase* op, const std::vector& inputs, + bool stop_gradient = false); private: framework::BlockDesc* root_block_; diff --git a/paddle/fluid/imperative/type_defs.h b/paddle/fluid/imperative/type_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..fc9e42f8d0e9996176a5cbab7d8c7cf08ddce1af --- /dev/null +++ b/paddle/fluid/imperative/type_defs.h @@ -0,0 +1,31 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include + +namespace paddle { +namespace imperative { + +class VarBase; +class OpBase; + +typedef std::map> VarBasePtrMap; +typedef std::map> OpBasePtrMap; + +} // namespace imperative +} // namespace paddle diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 211c691504de2c0bd8ff50f34b92cbc01397d5c9..336ab426c21d9de93693c44d8fc6bc5b37b58864 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -127,6 +127,7 @@ void contrib::AnalysisConfig::EnableTensorRtEngine(int workspace_size, use_tensorrt_ = true; tensorrt_workspace_size_ = workspace_size; tensorrt_max_batchsize_ = max_batch_size; + Update(); } void contrib::AnalysisConfig::Update() { diff --git a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt index f42ee9a697bfb4b8fefd4d3ba65afea4e74f0a85..19ef402d6fd78d6a65bdb0bbd22198f36b872a27 100644 --- a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt +++ b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt @@ -92,10 +92,10 @@ if(WITH_MKL) if(NOT WIN32) set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX}) - else(WIN32) + else() set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml${CMAKE_SHARED_LIBRARY_SUFFIX} ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md${CMAKE_SHARED_LIBRARY_SUFFIX}) - endif(WIN32) + endif() set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn") if(EXISTS ${MKLDNN_PATH}) include_directories("${MKLDNN_PATH}/include") @@ -128,8 +128,8 @@ else() ${CMAKE_STATIC_LIBRARY_PREFIX}glog ${CMAKE_STATIC_LIBRARY_PREFIX}gflags ${CMAKE_STATIC_LIBRARY_PREFIX}protobuf ${CMAKE_STATIC_LIBRARY_PREFIX}snappy ${CMAKE_STATIC_LIBRARY_PREFIX}z ${CMAKE_STATIC_LIBRARY_PREFIX}xxhash snappystream ${EXTERNAL_LIB}) - # NOTE(dzhwinter) shlwapi is deprecated. - set(DEPS ${DEPS} libcmt shlwapi) + get_property(os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) + set(DEPS ${DEPS} libcmt ${os_dependency_modules}) endif(NOT WIN32) if(WITH_GPU) diff --git a/paddle/fluid/inference/api/demo_ci/run.sh b/paddle/fluid/inference/api/demo_ci/run.sh index a94ccfa92439a735e101c7e5709909abea062ff8..9811fe2cd0621708bf896b10a036049dfc4d3898 100755 --- a/paddle/fluid/inference/api/demo_ci/run.sh +++ b/paddle/fluid/inference/api/demo_ci/run.sh @@ -116,6 +116,10 @@ D --modeldir=$DATA_DIR/mobilenet/model \ --data=$DATA_DIR/mobilenet/data.txt \ --refer=$DATA_DIR/mobilenet/result.txt + if [ $? -ne 0 ]; then + echo "trt demo trt_mobilenet_demo runs fail." + exit 1 + fi fi done set +x diff --git a/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc b/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc index 30215e480f908f353f00cbc9077e6c057222423a..338a0cec161f352781f132aea71dd56f68840c62 100644 --- a/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc +++ b/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc @@ -38,8 +38,8 @@ void Main() { std::unique_ptr predictor; paddle::contrib::AnalysisConfig config; config.EnableUseGpu(100, 0); - config.SetModel(FLAGS_modeldir + "/__params__", - FLAGS_modeldir + "/__model__"); + config.SetModel(FLAGS_modeldir + "/__model__", + FLAGS_modeldir + "/__params__"); config.EnableTensorRtEngine(); predictor = CreatePaddlePredictor(config); diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index 7830e859567747e6c05686335919e8346f76320d..cdd01cb9f06424b39d17e192f9a924451ad1daaf 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -204,11 +204,14 @@ static std::string DescribeTensor(const PaddleTensor &tensor) { os << to_string(l) << "; "; } os << "\n"; - os << " - data: "; + os << " - memory length: " << tensor.data.length(); + os << "\n"; + os << " - data: "; int dim = VecReduceToInt(tensor.shape); + float *pdata = static_cast(tensor.data.data()); for (int i = 0; i < dim; i++) { - os << static_cast(tensor.data.data())[i] << " "; + os << pdata[i] << " "; } os << '\n'; return os.str(); @@ -224,10 +227,12 @@ static std::string DescribeZeroCopyTensor(const ZeroCopyTensor &tensor) { os << to_string(l) << "; "; } os << "\n"; - os << " - data: "; PaddlePlace place; int size; const auto *data = tensor.data(&place, &size); + os << " - numel: " << size; + os << "\n"; + os << " - data: "; for (int i = 0; i < size; i++) { os << data[i] << " "; } diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index 3642f36127f1f8df30858e34bc0e1a8d09603775..832c8cdf2849279c4c32a81e9f81ef522c401b86 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -123,7 +123,8 @@ class ZeroCopyTensor { */ template T* mutable_data(PaddlePlace place); - /** Get the memory directly, will return the place and memory size by pointer. + /** Get the memory directly, will return the place and element size by + * pointer. * This is for reading the output tensor. */ template diff --git a/paddle/fluid/inference/api/paddle_pass_builder.h b/paddle/fluid/inference/api/paddle_pass_builder.h index 9337ae55b76fe4726149f69b07a189dc0cf769c1..de9650735adfe158e72213d4f6d5d3569aa90d55 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.h +++ b/paddle/fluid/inference/api/paddle_pass_builder.h @@ -89,6 +89,7 @@ class CpuPassStrategy : public PassStrategy { passes_.assign({ "infer_clean_graph_pass", // "attention_lstm_fuse_pass", // + "seqpool_concat_fuse_pass", // "seqconv_eltadd_relu_fuse_pass", // // "embedding_fc_lstm_fuse_pass", // "fc_lstm_fuse_pass", // @@ -140,6 +141,10 @@ class GpuPassStrategy : public PassStrategy { "conv_elementwise_add_fuse_pass", // }); + for (int i = 6; i >= 3; i--) { + passes_.push_back("transpose_flatten" + std::to_string(i) + + "_concat_fuse_pass"); + } use_gpu_ = true; } diff --git a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc index 6975086193d991dc9f53b2d9d988f960c8ad118d..79362f9677010247dffa4fbaa155a7a56eed6f85 100644 --- a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc @@ -39,6 +39,7 @@ class ElementwiseWeightOpConverter : public OpConverter { const framework::Scope& scope, bool test_mode) override { // Here the two nullptr looks strange, that's because the // framework::OpDesc's constructor is strange. + nvinfer1::ILayer* layer = nullptr; framework::OpDesc op_desc(op, nullptr); VLOG(3) << "Convert a fluid elementwise op to TensorRT IScaleLayer"; @@ -98,13 +99,21 @@ class ElementwiseWeightOpConverter : public OpConverter { 0}; TensorRTEngine::Weight power_weights{nvinfer1::DataType::kFLOAT, nullptr, 0}; + if (op_type_ == "add") { + nvinfer1::IScaleLayer* scale_layer = TRT_ENGINE_ADD_LAYER( + engine_, Scale, *X, scale_mode, shift_weights.get(), + scale_weights.get(), power_weights.get()); + layer = scale_layer; + } else if (op_type_ == "mul") { + nvinfer1::IScaleLayer* scale_layer = TRT_ENGINE_ADD_LAYER( + engine_, Scale, *X, scale_mode, scale_weights.get(), + shift_weights.get(), power_weights.get()); + layer = scale_layer; + } - nvinfer1::IScaleLayer* layer = TRT_ENGINE_ADD_LAYER( - engine_, Scale, *const_cast(X), scale_mode, - shift_weights.get(), scale_weights.get(), power_weights.get()); auto output_name = op_desc.Output("Out")[0]; - - layer->setName(("elementwise_add (Output: " + output_name + ")").c_str()); + layer->setName( + ("elementwise_" + op_type_ + "(Output: " + output_name + ")").c_str()); layer->getOutput(0)->setName(output_name.c_str()); engine_->weight_map[op_desc.Input("Y").front()] = std::move(weight_tensor); engine_->SetITensor(output_name, layer->getOutput(0)); @@ -113,6 +122,9 @@ class ElementwiseWeightOpConverter : public OpConverter { engine_->DeclareOutput(output_name); } } + + protected: + std::string op_type_; }; class ElementwiseTensorOpConverter : public OpConverter { @@ -188,6 +200,16 @@ const std::unordered_map {"max", nvinfer1::ElementWiseOperation::kMAX}, }; +class ElementwiseWeightAddOpConverter : public ElementwiseWeightOpConverter { + public: + ElementwiseWeightAddOpConverter() { op_type_ = "add"; } +}; + +class ElementwiseWeightMulOpConverter : public ElementwiseWeightOpConverter { + public: + ElementwiseWeightMulOpConverter() { op_type_ = "mul"; } +}; + class ElementwiseTensorAddOpConverter : public ElementwiseTensorOpConverter { public: ElementwiseTensorAddOpConverter() { op_type_ = "add"; } @@ -227,7 +249,10 @@ class ElementwiseTensorPowOpConverter : public ElementwiseTensorOpConverter { } // namespace inference } // namespace paddle -REGISTER_TRT_OP_CONVERTER(elementwise_add_weight, ElementwiseWeightOpConverter); +REGISTER_TRT_OP_CONVERTER(elementwise_add_weight, + ElementwiseWeightAddOpConverter); +REGISTER_TRT_OP_CONVERTER(elementwise_mul_weight, + ElementwiseWeightMulOpConverter); REGISTER_TRT_OP_CONVERTER(elementwise_add_tensor, ElementwiseTensorAddOpConverter); diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 131712ca88370aa977184fcb00d09f2283db110c..6854282a164773ad32a105c254b12a3bb4731e7f 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -100,14 +100,14 @@ set(OCR_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/ocr") if (NOT EXISTS ${OCR_INSTALL_DIR}) inference_download_and_uncompress(${OCR_INSTALL_DIR} "http://paddlemodels.cdn.bcebos.com/" "inference-vis-demos%2Focr.tar.gz") endif() -inference_analysis_api_test_with_refer_result(test_analyzer_ocr ${OCR_INSTALL_DIR} analyzer_vis_tester.cc) +inference_analysis_api_test_with_refer_result(test_analyzer_ocr ${OCR_INSTALL_DIR} analyzer_vis_tester.cc SERIAL) # mobilenet with transpose op set(MOBILENET_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/mobilenet") if (NOT EXISTS ${MOBILENET_INSTALL_DIR}) inference_download_and_uncompress(${MOBILENET_INSTALL_DIR} "http://paddlemodels.cdn.bcebos.com/" "inference-vis-demos%2Fmobilenet.tar.gz") endif() -inference_analysis_api_test_with_refer_result(test_analyzer_mobilenet_transpose ${MOBILENET_INSTALL_DIR} analyzer_vis_tester.cc) +inference_analysis_api_test_with_refer_result(test_analyzer_mobilenet_transpose ${MOBILENET_INSTALL_DIR} analyzer_vis_tester.cc SERIAL) # resnet50 inference_analysis_api_test_with_fake_data(test_analyzer_resnet50 diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc index 3c52afbfb8f60b1e9389d416a5640c9685d8e764..22e6366fb5cba6c7a0cde9c0c5f50f56c2e23b05 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc @@ -283,7 +283,7 @@ TEST(Analyzer_rnn1, multi_thread) { std::vector> input_slots_all; SetInput(&input_slots_all); TestPrediction(reinterpret_cast(&cfg), - input_slots_all, &outputs, 4 /* multi_thread */); + input_slots_all, &outputs, 2 /* multi_thread */); } // Validate that the AnalysisPredictor + ZeroCopyTensor really works by testing @@ -351,10 +351,10 @@ TEST(Analyzer_rnn1, ZeroCopy) { ASSERT_TRUE(native_predictor->Run(native_inputs.front(), &native_outputs)); LOG(INFO) << "native output " << DescribeTensor(native_outputs.front()); - int output_size{0}; + int output_size{0}; // this is the number of elements not memory size auto *zero_copy_data = output_tensor->data(&place, &output_size); auto *native_data = static_cast(native_outputs.front().data.data()); - for (size_t i = 0; i < output_size / sizeof(float); i++) { + for (int i = 0; i < output_size; i++) { EXPECT_NEAR(zero_copy_data[i], native_data[i], 1e-3); } } @@ -370,15 +370,12 @@ TEST(Analyzer_rnn1, ZeroCopyMultiThread) { auto base_predictor = CreatePaddlePredictor(config); double total_time_of_threads{0}; std::vector threads; - std::vector> predictors; - for (int tid = 0; tid < FLAGS_num_threads; tid++) { - predictors.emplace_back(CreatePaddlePredictor(config)); - } for (int tid = 0; tid < FLAGS_num_threads; tid++) { - threads.emplace_back([config, &total_time_of_threads, &predictors, tid] { - // auto predictor = base_predictor->Clone(); - auto &predictor = predictors[tid]; + threads.emplace_back([&, tid] { + // To ensure the thread binding correctly, + // please clone inside the threadpool. + auto predictor = base_predictor->Clone(); NEW_TENSOR(data_lod_attention); NEW_TENSOR(cell_init); NEW_TENSOR(data); diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc index a1742f606819334e7b15e644f8b9e330795bf16e..c137090879e67d5314b94709586c5292dc208745 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc @@ -121,14 +121,6 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data) { } } -void SetConfig(AnalysisConfig *cfg) { - cfg->SetModel(FLAGS_infer_model + "/model", FLAGS_infer_model + "/params"); - cfg->DisableGpu(); - cfg->SwitchSpecifyInputNames(); - cfg->pass_builder()->TurnOnDebug(); - cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); -} - void SetInput(std::vector> *inputs) { DataRecord data(FLAGS_infer_data, FLAGS_batch_size); std::vector input_slots; @@ -141,15 +133,22 @@ void SetInput(std::vector> *inputs) { } } +void SetConfig(AnalysisConfig *cfg, bool use_mkldnn = false) { + cfg->SetModel(FLAGS_infer_model + "/model", FLAGS_infer_model + "/params"); + cfg->DisableGpu(); + cfg->SwitchSpecifyInputNames(); + cfg->pass_builder()->TurnOnDebug(); + cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); + if (use_mkldnn) { + cfg->EnableMKLDNN(); + } +} + void profile(bool use_mkldnn = false) { AnalysisConfig cfg; - SetConfig(&cfg); + SetConfig(&cfg, use_mkldnn); - if (use_mkldnn) { - cfg.EnableMKLDNN(); - } std::vector outputs; - std::vector> input_slots_all; SetInput(&input_slots_all); TestPrediction(reinterpret_cast(&cfg), @@ -169,16 +168,162 @@ TEST(Analyzer_seq_pool1, compare) { reinterpret_cast(&cfg), input_slots_all); } -// Check the fuse status -TEST(Analyzer_seq_pool1, fuse_statis) { +// Compare Deterministic result +TEST(Analyzer_seq_pool1, compare_determine) { + AnalysisConfig cfg; + SetConfig(&cfg); + + std::vector> input_slots_all; + SetInput(&input_slots_all); + CompareDeterministic(reinterpret_cast(&cfg), + input_slots_all); +} + +void analysis_fuse_statis(bool use_zerocopy) { AnalysisConfig cfg; SetConfig(&cfg); + cfg.SwitchUseFeedFetchOps(!use_zerocopy); int num_ops; auto predictor = CreatePaddlePredictor(cfg); - auto fuse_statis = GetFuseStatis( - static_cast(predictor.get()), &num_ops); + auto fuse_statis = GetFuseStatis(predictor.get(), &num_ops); + ASSERT_TRUE(fuse_statis.count("fc_fuse")); + ASSERT_EQ(fuse_statis.at("fc_fuse"), 10); + ASSERT_TRUE(fuse_statis.count("seqpool_concat_fuse")); + EXPECT_EQ(fuse_statis.at("seqpool_concat_fuse"), 2); LOG(INFO) << "num_ops: " << num_ops; - EXPECT_EQ(num_ops, 349); + EXPECT_EQ(num_ops, 195); +} + +// Check the fuse status +TEST(Analyzer_seq_pool1, fuse_statis) { analysis_fuse_statis(false); } + +void PrepareZeroCopyInputs( + const std::unique_ptr &predictor, + std::vector> *inputs) { + DataRecord data(FLAGS_infer_data, FLAGS_batch_size); + // only feed one batch + const auto &one_batch = data.NextBatch(); + inputs->clear(); + for (size_t i = 0; i < one_batch.size(); ++i) { + auto &slot = one_batch[i]; + auto tensor = predictor->GetInputTensor(slot.name + "_embed"); + tensor->Reshape(slot.shape); + tensor->SetLoD({slot.lod}); + ZeroCopyTensorAssignData(tensor.get(), slot.data); + inputs->emplace_back(std::move(tensor)); + } +} + +// diff: similarity_norm.tmp_0, // speed: fc_4.tmp_1 +static const char out_var_name[] = "reduce_sum_0.tmp_0"; + +// return the output values +std::vector zerocopy_profile(int repeat_times) { + AnalysisConfig config; + SetConfig(&config); + config.SwitchUseFeedFetchOps(false); + auto predictor = CreatePaddlePredictor(config); + std::vector> inputs; + PrepareZeroCopyInputs(predictor, &inputs); + auto output_tensor = predictor->GetOutputTensor(out_var_name); + Timer timer; + LOG(INFO) << "Warm up run..."; + timer.tic(); + predictor->ZeroCopyRun(); + PrintTime(FLAGS_batch_size, 1, 1, 0, timer.toc(), 1); + if (FLAGS_profile) { + paddle::platform::ResetProfiler(); + } + LOG(INFO) << "Run " << repeat_times << " times..."; + timer.tic(); + for (int i = 0; i < repeat_times; i++) { + predictor->ZeroCopyRun(); + } + PrintTime(FLAGS_batch_size, repeat_times, 1, 0, timer.toc() / repeat_times, + 1); + + LOG(INFO) << "ZeroCopy output: " << DescribeZeroCopyTensor(*output_tensor); + PaddlePlace place; + int output_size{0}; + auto *pdata = output_tensor->data(&place, &output_size); + std::vector res(output_size); + for (int i = 0; i < output_size; ++i) { + res[i] = pdata[i]; + } + return res; +} + +TEST(Analyzer_seq_pool1, zerocopy_profile) { zerocopy_profile(FLAGS_repeat); } + +TEST(Analyzer_seq_pool1, zerocopy_profile_threads) { + AnalysisConfig config; + SetConfig(&config); + config.SwitchUseFeedFetchOps(false); + + auto base_predictor = CreatePaddlePredictor(config); + double total_time_of_threads{0}; + std::vector threads; + + for (int tid = 0; tid < FLAGS_num_threads; tid++) { + threads.emplace_back([&, tid] { + // To ensure the thread binding correctly, + // please clone inside the threadpool. + auto predictor = base_predictor->Clone(); + std::vector> inputs; + PrepareZeroCopyInputs(predictor, &inputs); + auto output_tensor = predictor->GetOutputTensor(out_var_name); + Timer timer; + double total_time{0}; + + LOG(INFO) << "Warm up run..."; + timer.tic(); + predictor->ZeroCopyRun(); + PrintTime(FLAGS_batch_size, 1, FLAGS_num_threads, tid, timer.toc(), 1); + if (FLAGS_profile) { + paddle::platform::ResetProfiler(); + } + int repeat_times = FLAGS_repeat; + LOG(INFO) << "Run " << repeat_times << " times..."; + timer.tic(); + + for (int i = 0; i < repeat_times; i++) { + predictor->ZeroCopyRun(); + } + total_time += timer.toc(); + total_time_of_threads += total_time; + + LOG(INFO) << "thread time: " << total_time / repeat_times; + }); + } + + for (auto &t : threads) { + t.join(); + } + + LOG(INFO) << "average time: " + << total_time_of_threads / FLAGS_num_threads / FLAGS_repeat; +} + +TEST(Analyzer_seq_pool1, zerocopy_fuse_statis) { analysis_fuse_statis(true); } + +TEST(Analyzer_seq_pool1, zerocopy_compare_native) { + AnalysisConfig config; + SetConfig(&config); + config.SwitchUseFeedFetchOps(true); + auto predictor = CreatePaddlePredictor(config.ToNativeConfig()); + std::vector native_outputs; + std::vector> input_slots_all; + SetInput(&input_slots_all); + ASSERT_TRUE(predictor->Run(input_slots_all[0], &native_outputs)); + EXPECT_EQ(native_outputs.size(), 1UL); + + auto zerocopy_output = zerocopy_profile(1); + EXPECT_EQ(zerocopy_output.size() * sizeof(float), + native_outputs.front().data.length()); + auto *native_data = static_cast(native_outputs.front().data.data()); + for (size_t i = 0; i < zerocopy_output.size(); ++i) { + EXPECT_NEAR(zerocopy_output[i], native_data[i], 1e-3); + } } } // namespace analysis diff --git a/paddle/fluid/inference/tests/api/config_printer.h b/paddle/fluid/inference/tests/api/config_printer.h index cf0f1d5c18c79e34c96d4301dbf13c924ae2a3f0..ecc10bafd650e52dfb73e8dd4329c697ff4f4ccc 100644 --- a/paddle/fluid/inference/tests/api/config_printer.h +++ b/paddle/fluid/inference/tests/api/config_printer.h @@ -62,7 +62,7 @@ std::ostream &operator<<(std::ostream &os, const contrib::AnalysisConfig &config) { os << GenSpaces(num_spaces) << "contrib::AnalysisConfig {\n"; num_spaces++; - os << *reinterpret_cast(&config); + os << config.ToNativeConfig(); if (!config.model_from_memory()) { os << GenSpaces(num_spaces) << "prog_file: " << config.prog_file() << "\n"; os << GenSpaces(num_spaces) << "param_file: " << config.params_file() diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 41d033df85811a4730cab8b3275aaffd1ba338e5..7572468e323e08c3538b92715395c787026eb071 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -54,11 +54,13 @@ namespace paddle { namespace inference { void PrintConfig(const PaddlePredictor::Config *config, bool use_analysis) { + const auto *analysis_config = + reinterpret_cast(config); if (use_analysis) { - LOG(INFO) << *reinterpret_cast(config); + LOG(INFO) << *analysis_config; return; } - LOG(INFO) << *reinterpret_cast(config); + LOG(INFO) << analysis_config->ToNativeConfig(); } void CompareResult(const std::vector &outputs, @@ -96,12 +98,13 @@ void CompareResult(const std::vector &outputs, std::unique_ptr CreateTestPredictor( const PaddlePredictor::Config *config, bool use_analysis = true) { + const auto *analysis_config = + reinterpret_cast(config); if (use_analysis) { - return CreatePaddlePredictor( - *(reinterpret_cast(config))); + return CreatePaddlePredictor(*analysis_config); } - return CreatePaddlePredictor( - *(reinterpret_cast(config))); + auto native_config = analysis_config->ToNativeConfig(); + return CreatePaddlePredictor(native_config); } size_t GetSize(const PaddleTensor &out) { return VecReduceToInt(out.shape); } @@ -310,13 +313,12 @@ void CompareDeterministic( int num_times = FLAGS_repeat; auto predictor = CreateTestPredictor(config, FLAGS_use_analysis); - // warmup run std::vector warmup_outputs, outputs; - predictor->Run(inputs[0], &warmup_outputs, batch_size); - // run num_times to Compare Deterministic Result. - for (int i = 0; i < num_times; i++) { - for (size_t j = 0; j < inputs.size(); j++) { + for (size_t j = 0; j < inputs.size(); j++) { + // warmup run + predictor->Run(inputs[j], &warmup_outputs, batch_size); + for (int i = 0; i < num_times; i++) { predictor->Run(inputs[j], &outputs, batch_size); CompareResult(outputs, warmup_outputs); } @@ -328,10 +330,7 @@ void CompareNativeAndAnalysis( const std::vector> &inputs) { PrintConfig(config, true); std::vector native_outputs, analysis_outputs; - const auto *analysis_config = - reinterpret_cast(config); - auto native_config = analysis_config->ToNativeConfig(); - TestOneThreadPrediction(&native_config, inputs, &native_outputs, false); + TestOneThreadPrediction(config, inputs, &native_outputs, false); TestOneThreadPrediction(config, inputs, &analysis_outputs, true); CompareResult(analysis_outputs, native_outputs); } diff --git a/paddle/fluid/inference/tests/api/trt_models_tester.cc b/paddle/fluid/inference/tests/api/trt_models_tester.cc index 21df6eab814dad5e2f654bf6d9558a2f9859d5ae..9725c1903293b2ce4f6c4baeb1e6d63af28c1c3b 100644 --- a/paddle/fluid/inference/tests/api/trt_models_tester.cc +++ b/paddle/fluid/inference/tests/api/trt_models_tester.cc @@ -99,24 +99,12 @@ void compare(std::string model_dir, bool use_tensorrt) { SetFakeImageInput(&inputs_all, model_dir, false, "__model__", ""); } - std::vector native_outputs; - NativeConfig native_config; - SetConfig(&native_config, model_dir, true, false, - FLAGS_batch_size); - TestOneThreadPrediction( - reinterpret_cast(&native_config), inputs_all, - &native_outputs, false); - - std::vector analysis_outputs; contrib::AnalysisConfig analysis_config; - analysis_config.EnableUseGpu(50, 0); SetConfig(&analysis_config, model_dir, true, use_tensorrt, FLAGS_batch_size); - TestOneThreadPrediction( - reinterpret_cast(&analysis_config), inputs_all, - &analysis_outputs, true); - - CompareResult(native_outputs, analysis_outputs); + CompareNativeAndAnalysis( + reinterpret_cast(&analysis_config), + inputs_all); } TEST(TensorRT_mobilenet, compare) { diff --git a/paddle/fluid/inference/utils/CMakeLists.txt b/paddle/fluid/inference/utils/CMakeLists.txt index cfb80fe6ec11a55a887c7552ec4e6a8a0c6a2fce..c43eaf7f9849ee4a88ed95bdb8b6966da8760435 100644 --- a/paddle/fluid/inference/utils/CMakeLists.txt +++ b/paddle/fluid/inference/utils/CMakeLists.txt @@ -2,6 +2,3 @@ cc_library(benchmark SRCS benchmark.cc DEPS enforce) cc_test(test_benchmark SRCS benchmark_tester.cc DEPS benchmark) cc_binary(visualizer SRCS visualizer.cc DEPS analysis paddle_pass_builder ir_pass_manager pass graph_viz_pass analysis_passes) -if(WIN32) - target_link_libraries(visualizer shlwapi) -endif(WIN32) diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index dbb6ffd5e29d73ca16766fd5b843c9590f4db3e1..f5208e7a601f4dd33b486e5840178022f66431e5 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -137,7 +137,6 @@ class CUDNNConvOpKernel : public framework::OpKernel { // ------------------- cudnn conv algorithm --------------------- cudnnConvolutionFwdAlgo_t algo; auto handle = dev_ctx.cudnn_handle(); - auto workspace_handle = dev_ctx.cudnn_workspace_handle(); bool half_float = false; #if CUDA_VERSION >= 9000 && CUDNN_VERSION_MIN(7, 0, 1) @@ -158,6 +157,8 @@ class CUDNNConvOpKernel : public framework::OpKernel { VLOG(5) << "NOT use cudnn_tensor_op_math"; } #endif + Tensor cudnn_workspace; + void* cudnn_workspace_ptr = nullptr; auto x_dims = framework::vectorize(input->dims()); auto f_dims = framework::vectorize(filter->dims()); @@ -180,21 +181,26 @@ class CUDNNConvOpKernel : public framework::OpKernel { .Var(kCUDNNFwdAlgoCache) ->GetMutable>(); } + cudnn_workspace = + ctx.AllocateTmpTensor( + framework::make_ddim( + {static_cast(workspace_size_limit)}), + dev_ctx); + cudnn_workspace_ptr = static_cast(cudnn_workspace.data()); + algo = algo_cache->GetAlgorithm( x_dims, f_dims, strides, paddings, dilations, 0, [&]() { int returned_algo_count; std::array fwd_perf_stat; - auto cudnn_find_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE( - platform::dynload::cudnnFindConvolutionForwardAlgorithmEx( - handle, cudnn_input_desc, input_data, cudnn_filter_desc, - filter_data, cudnn_conv_desc, cudnn_output_desc, - output_data, kNUM_CUDNN_FWD_ALGS, &returned_algo_count, - fwd_perf_stat.data(), cudnn_workspace, - workspace_size_limit)); - }; - workspace_handle.RunFunc(cudnn_find_func, workspace_size_limit); + + CUDNN_ENFORCE( + platform::dynload::cudnnFindConvolutionForwardAlgorithmEx( + handle, cudnn_input_desc, input_data, cudnn_filter_desc, + filter_data, cudnn_conv_desc, cudnn_output_desc, + output_data, kNUM_CUDNN_FWD_ALGS, &returned_algo_count, + fwd_perf_stat.data(), cudnn_workspace_ptr, + workspace_size_limit)); VLOG(3) << "Perf result: (algo: stat, time, memory)"; for (int i = 0; i < returned_algo_count; ++i) { @@ -219,17 +225,23 @@ class CUDNNConvOpKernel : public framework::OpKernel { PADDLE_ENFORCE_LE(workspace_size_in_bytes, workspace_size_limit, "workspace_size to be allocated exceeds the limit"); + // Allocate on GPU memory + if (!cudnn_workspace_ptr) { + cudnn_workspace = + ctx.AllocateTmpTensor( + framework::make_ddim( + {static_cast(workspace_size_in_bytes)}), + dev_ctx); + cudnn_workspace_ptr = static_cast(cudnn_workspace.data()); + } // ------------------- cudnn conv forward --------------------- ScalingParamType alpha = 1.0f, beta = 0.0f; for (int i = 0; i < groups; i++) { - auto cudnn_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( - handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, - cudnn_filter_desc, filter_data + i * group_offset_filter, - cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, - &beta, cudnn_output_desc, output_data + i * group_offset_out)); - }; - workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( + handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, + cudnn_filter_desc, filter_data + i * group_offset_filter, + cudnn_conv_desc, algo, cudnn_workspace_ptr, workspace_size_in_bytes, + &beta, cudnn_output_desc, output_data + i * group_offset_out)); } } }; @@ -297,6 +309,21 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor( layout, framework::vectorize2int(filter->dims()), groups); +#if CUDA_VERSION >= 9000 && CUDNN_VERSION_MIN(7, 0, 1) + // Enable Tensor Core for cudnn backward + if (dev_ctx.GetComputeCapability() >= 70 && + std::type_index(typeid(T)) == + std::type_index(typeid(platform::float16))) { + CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + cudnn_conv_desc, CUDNN_TENSOR_OP_MATH)); + VLOG(5) << "use cudnn_tensor_op_math for backward"; + } else { + CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + cudnn_conv_desc, CUDNN_DEFAULT_MATH)); + VLOG(5) << "NOT use cudnn_tensor_op_math for backward"; + } +#endif + int input_channels = input->dims()[1]; int input_height, input_width, input_depth; if (input->dims().size() == 5) { @@ -338,10 +365,20 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { workspace_size_limit = max_user_size * 1024 * 1024; } + Tensor cudnn_workspace; + void* cudnn_workspace_ptr = nullptr; + if ((input_data || filter_data) && exhaustive_search) { + cudnn_workspace = + ctx.AllocateTmpTensor( + framework::make_ddim( + {static_cast(workspace_size_limit)}), + dev_ctx); + cudnn_workspace_ptr = static_cast(cudnn_workspace.data()); + } + auto x_dims = framework::vectorize(input->dims()); auto f_dims = framework::vectorize(filter->dims()); auto handle = dev_ctx.cudnn_handle(); - auto workspace_handle = dev_ctx.cudnn_workspace_handle(); if (input_grad) { T* input_grad_data = input_grad->mutable_data(ctx.GetPlace()); if (exhaustive_search) { @@ -359,25 +396,22 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { ->GetMutable< AlgorithmsCache>(); } + data_algo = data_algo_cache->GetAlgorithm( x_dims, f_dims, strides, paddings, dilations, 0, [&]() { int returned_algo_count; std::array data_perf_stat; - auto cudnn_find_bd_data_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE( - platform::dynload:: - cudnnFindConvolutionBackwardDataAlgorithmEx( - handle, cudnn_filter_desc, filter_data, - cudnn_output_grad_desc, output_grad_data, - cudnn_conv_desc, cudnn_input_desc, input_grad_data, - kNUM_CUDNN_BWD_DATA_ALGS, &returned_algo_count, - data_perf_stat.data(), cudnn_workspace, - workspace_size_limit)); - }; - workspace_handle.RunFunc(cudnn_find_bd_data_func, - workspace_size_limit); + + CUDNN_ENFORCE(platform::dynload:: + cudnnFindConvolutionBackwardDataAlgorithmEx( + handle, cudnn_filter_desc, filter_data, + cudnn_output_grad_desc, output_grad_data, + cudnn_conv_desc, cudnn_input_desc, + input_grad_data, kNUM_CUDNN_BWD_DATA_ALGS, + &returned_algo_count, data_perf_stat.data(), + cudnn_workspace_ptr, workspace_size_limit)); VLOG(3) << "Perf result: (algo: stat, time, memory)"; for (int i = 0; i < returned_algo_count; ++i) { @@ -428,25 +462,23 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { ->GetMutable< AlgorithmsCache>(); } + filter_algo = f_algo_cache->GetAlgorithm( x_dims, f_dims, strides, paddings, dilations, 0, [&]() { int returned_algo_count; std::array filter_perf_stat; - auto cudnn_find_bd_f_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE( - platform::dynload:: - cudnnFindConvolutionBackwardFilterAlgorithmEx( - handle, cudnn_input_desc, input_data, - cudnn_output_grad_desc, output_grad_data, - cudnn_conv_desc, cudnn_filter_desc, - filter_grad_data, kNUM_CUDNN_BWD_FILTER_ALGS, - &returned_algo_count, filter_perf_stat.data(), - cudnn_workspace, workspace_size_limit)); - }; - workspace_handle.RunFunc(cudnn_find_bd_f_func, - workspace_size_limit); + + CUDNN_ENFORCE( + platform::dynload:: + cudnnFindConvolutionBackwardFilterAlgorithmEx( + handle, cudnn_input_desc, input_data, + cudnn_output_grad_desc, output_grad_data, + cudnn_conv_desc, cudnn_filter_desc, filter_grad_data, + kNUM_CUDNN_BWD_FILTER_ALGS, &returned_algo_count, + filter_perf_stat.data(), cudnn_workspace_ptr, + workspace_size_limit)); return filter_perf_stat[0].algo; }); VLOG(3) << "cuDNN backward filter algo " << filter_algo; @@ -467,6 +499,16 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { workspace_size_in_bytes = std::max(workspace_size_in_bytes, tmp_size); } + // ------------------- cudnn conv workspace --------------------- + if (!cudnn_workspace_ptr) { + cudnn_workspace = + ctx.AllocateTmpTensor( + framework::make_ddim( + {static_cast(workspace_size_in_bytes)}), + dev_ctx); + cudnn_workspace_ptr = static_cast(cudnn_workspace.data()); + } + // ------------------- cudnn conv backward data --------------------- ScalingParamType alpha = 1.0f, beta = 0.0f; if (input_grad) { @@ -474,15 +516,12 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { // Because beta is zero, it is unnecessary to reset input_grad. for (int i = 0; i < groups; i++) { - auto cudnn_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( - handle, &alpha, cudnn_filter_desc, - filter_data + i * group_offset_filter, cudnn_output_grad_desc, - output_grad_data + i * group_offset_out, cudnn_conv_desc, - data_algo, cudnn_workspace, workspace_size_in_bytes, &beta, - cudnn_input_desc, input_grad_data + i * group_offset_in)); - }; - workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( + handle, &alpha, cudnn_filter_desc, + filter_data + i * group_offset_filter, cudnn_output_grad_desc, + output_grad_data + i * group_offset_out, cudnn_conv_desc, data_algo, + cudnn_workspace_ptr, workspace_size_in_bytes, &beta, + cudnn_input_desc, input_grad_data + i * group_offset_in)); } } // ------------------- cudnn conv backward filter --------------------- @@ -490,15 +529,12 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { T* filter_grad_data = filter_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset filter_grad. for (int i = 0; i < groups; i++) { - auto cudnn_func = [&](void* cudnn_workspace) { - CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( - handle, &alpha, cudnn_input_desc, - input_data + i * group_offset_in, cudnn_output_grad_desc, - output_grad_data + i * group_offset_out, cudnn_conv_desc, - filter_algo, cudnn_workspace, workspace_size_in_bytes, &beta, - cudnn_filter_desc, filter_grad_data + i * group_offset_filter)); - }; - workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( + handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, + cudnn_output_grad_desc, output_grad_data + i * group_offset_out, + cudnn_conv_desc, filter_algo, cudnn_workspace_ptr, + workspace_size_in_bytes, &beta, cudnn_filter_desc, + filter_grad_data + i * group_offset_filter)); } } } diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 03d9d466c3238c6c853bca75f5b9791a0841ff78..16ffc11419f667e4d1c7f6c9a5be355478a48fab 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -318,10 +318,14 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { std::vector paddings = ctx.Attr>("paddings"); std::vector dilations = ctx.Attr>("dilations"); int groups = ctx.Attr("groups"); - bool fuse_relu = ctx.Attr("fuse_relu"); + bool fuse_residual_conn = ctx.Attr("fuse_residual_connection"); bool force_fp32_output = ctx.Attr("force_fp32_output"); + if (fuse_residual_conn) { + PADDLE_ENFORCE(force_fp32_output != true, + "residual fusion does not support force output with fp32"); + } bool is_conv3d = strides.size() == 3U; // TODO(tpatejko): add support for dilation @@ -355,14 +359,23 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { framework::DataTypeTrait::DataType); } + if (fuse_residual_conn) { + auto residual = ctx.Input("ResidualData"); + auto residual_dt = paddle::framework::ToMKLDNNDataType(residual->type()); + if (dst_dt != residual_dt) dst_dt = residual_dt; + } + // Get unique name for storing MKLDNN primitives std::string key; key.reserve(MaxKeyLength); platform::ConvMKLDNNHandler::AppendKey( &key, src_tz, weights_tz, strides, paddings, dilations, groups, src_dt, - input->format(), dst_dt, ctx.op().Output("Output")); + input->format(), fuse_relu, fuse_residual_conn, + ctx.op().Output("Output")); const std::string key_conv_pd = key + "@conv_pd"; + bool need_s8_to_u8 = false; + std::shared_ptr conv_p = nullptr; std::shared_ptr src_memory_p = nullptr; std::shared_ptr user_src_memory_p = nullptr; @@ -377,14 +390,20 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto src_key = key + "@src_mem_p"; auto user_src_key = key + "@user_src_mem_p"; auto src_reorder_key = key + "@src_mem_preorder_p"; + auto residual_reorder_key = key + "@residual_data_mem_preorder_p"; + conv_p = std::static_pointer_cast( dev_ctx.GetBlob(prim_key)); + if (conv_p == nullptr || !is_test) { const K* filter_data = filter->data(); auto scale_in_data = ctx.Attr("Scale_in"); + auto scale_in_eltwise_data = ctx.Attr("Scale_in_eltwise"); auto scale_weights_data = ctx.Attr>("Scale_weights"); auto scale_out_data = force_fp32_output ? 1.0f : ctx.Attr("Scale_out"); + float sum_scale = + fuse_residual_conn ? scale_out_data / scale_in_eltwise_data : 1.0f; bool is_multi_channel = scale_weights_data.size() > 1; @@ -427,6 +446,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { weights_tz, memory::data_type::s8, chosen_memory_format); auto dst_md = platform::MKLDNNMemDesc(dst_tz, dst_dt, chosen_memory_format); + // create a conv primitive descriptor and save it for usage in backward if (bias) { bias_tz = paddle::framework::vectorize2int(bias->dims()); @@ -434,11 +454,13 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { memory::format::x); conv_pd = ConvFwdPrimitiveDesc(src_md, weights_md, bias_md, dst_md, strides, paddings, mkldnn_engine, - fuse_relu, output_shift_scale, is_test); + fuse_relu, fuse_residual_conn, + output_shift_scale, sum_scale, is_test); } else { - conv_pd = ConvFwdPrimitiveDesc(src_md, weights_md, dst_md, strides, - paddings, mkldnn_engine, fuse_relu, - output_shift_scale, is_test); + conv_pd = + ConvFwdPrimitiveDesc(src_md, weights_md, dst_md, strides, paddings, + mkldnn_engine, fuse_relu, fuse_residual_conn, + output_shift_scale, sum_scale, is_test); } // Save conv_pd/src_memory/weights_memory for backward pass dev_ctx.SetBlob(key_conv_pd, conv_pd); @@ -463,7 +485,41 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { user_weights_memory_p, pipeline, is_test, true, scale_weights_data, mask_reorder); - if (!force_fp32_output) { + if (fuse_residual_conn) { + auto residual_param = ctx.Input("ResidualData"); + PADDLE_ENFORCE_EQ(output->dims(), residual_param->dims(), + "Output and elementwise parameter need to have the " + "same dimension sizes"); + auto residual_dt = + paddle::framework::ToMKLDNNDataType(residual_param->type()); + if (residual_param->format() != handler->GetDstFormat()) { + auto residual_data_tz = + paddle::framework::vectorize2int(residual_param->dims()); + + auto user_residual_md = platform::MKLDNNMemDesc( + residual_data_tz, residual_dt, residual_param->format()); + + if (residual_dt == mkldnn::memory::data_type::u8) { + dst_memory_p = platform::SetDstMemory( + ctx, output, residual_param, user_residual_md, handler, + &pipeline); + } else { + need_s8_to_u8 = fuse_relu; + dst_memory_p = platform::SetDstMemory( + ctx, output, residual_param, user_residual_md, handler, + &pipeline); + } + } else { + output->ShareDataWith(*residual_param); + if (residual_dt == mkldnn::memory::data_type::u8) { + dst_memory_p = + platform::SetDstMemory(ctx, output, handler); + } else { + need_s8_to_u8 = fuse_relu; + dst_memory_p = platform::SetDstMemory(ctx, output, handler); + } + } + } else if (!force_fp32_output) { if (fuse_relu) { dst_memory_p = platform::SetDstMemory(ctx, output, handler); } else { @@ -476,11 +532,11 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { // create convolution op primitive auto scale_bias_key = key + "@scale_bias"; if (bias) { - const float* bias_data = bias->data(); + const K* bias_data = bias->data(); auto user_bias_md = platform::MKLDNNMemDesc( - {bias_tz}, platform::MKLDNNGetDataType(), memory::format::x); + {bias_tz}, platform::MKLDNNGetDataType(), memory::format::x); auto user_bias_memory_p = handler->AcquireBiasMemory( - user_bias_md, to_void_cast(bias_data)); + user_bias_md, to_void_cast(bias_data)); std::shared_ptr bias_memory_p; int mask_reorder = is_multi_channel ? 1 << 0 : 1; int count = @@ -526,26 +582,51 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { handler.reset(new platform::ConvMKLDNNHandler(conv_pd, dev_ctx, mkldnn_engine, key)); } - if (!force_fp32_output) { + + if (fuse_residual_conn) { + auto residual_param = ctx.Input("ResidualData"); + auto residual_dt = + paddle::framework::ToMKLDNNDataType(residual_param->type()); + output->ShareDataWith(*residual_param); + if (residual_dt == mkldnn::memory::data_type::u8) { + platform::SetDstMemoryHandler(ctx, output, handler, + &dst_memory_p); + } else { + platform::SetDstMemoryHandler(ctx, output, handler, + &dst_memory_p); + } + } else if (!force_fp32_output) { if (fuse_relu) { - dst_memory_p = - platform::SetDstMemoryHandler(ctx, output, handler); + platform::SetDstMemoryHandler(ctx, output, handler, + &dst_memory_p); } else { - dst_memory_p = - platform::SetDstMemoryHandler(ctx, output, handler); + platform::SetDstMemoryHandler(ctx, output, handler, + &dst_memory_p); } } else { - dst_memory_p = - platform::SetDstMemoryHandler(ctx, output, handler); + platform::SetDstMemoryHandler(ctx, output, handler, + &dst_memory_p); } + if (src_memory_reorder_p) { pipeline.push_back(*src_memory_reorder_p); } + + auto residual_reorder_p = std::static_pointer_cast( + dev_ctx.GetBlob(residual_reorder_key)); + if (residual_reorder_p) { + pipeline.push_back(*residual_reorder_p); + } + pipeline.push_back(*conv_p); } // push primitive to stream and wait until it's executed stream(stream::kind::eager).submit(pipeline).wait(); + if (need_s8_to_u8) { + output->mutable_data(ctx.GetPlace()); + } + output->set_layout(DataLayout::kMKLDNN); output->set_format(GetMKLDNNFormat(*dst_memory_p)); } @@ -577,11 +658,15 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { } mkldnn::primitive_attr CreatePostOps( - bool fuse_relu, const std::vector output_shift_scale) const { + bool fuse_relu, bool fuse_residual_conn, + const std::vector output_shift_scale, float sum_scale) const { mkldnn::primitive_attr conv_attr; mkldnn::post_ops post_operations; int mask = output_shift_scale.size() > 1 ? 1 << 1 : 0; conv_attr.set_output_scales(mask, output_shift_scale); + if (fuse_residual_conn) { + post_operations.append_sum(sum_scale); + } if (fuse_relu) { constexpr float scale = 1.0f; constexpr float negative_slope = 0.0f; @@ -622,8 +707,9 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { const memory::desc& dst, const std::vector& strides, const std::vector& paddings, const mkldnn::engine& engine, const bool fuse_relu, + const bool fuse_residual_conn, const std::vector output_shift_scale, - bool is_test) const { + const float sum_scale, bool is_test) const { memory::dims stride_dims = {strides[0], strides[1]}; memory::dims padding_dims = {paddings[0], paddings[1]}; @@ -634,8 +720,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { propagation, mkldnn::convolution_direct, src, weights, dst, stride_dims, padding_dims, padding_dims, mkldnn::padding_kind::zero); - mkldnn::primitive_attr conv_attr = - CreatePostOps(fuse_relu, output_shift_scale); + mkldnn::primitive_attr conv_attr = CreatePostOps( + fuse_relu, fuse_residual_conn, output_shift_scale, sum_scale); auto p_conv_pd = new mkldnn::convolution_forward::primitive_desc( conv_desc, conv_attr, engine); @@ -675,8 +761,9 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { const std::vector& strides, const std::vector& paddings, const mkldnn::engine& engine, const bool fuse_relu, + const bool fuse_residual_conn, const std::vector output_shift_scale, - bool is_test) const { + const float sum_scale, bool is_test) const { memory::dims stride_dims = {strides[0], strides[1]}; memory::dims padding_dims = {paddings[0], paddings[1]}; @@ -687,8 +774,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { propagation, mkldnn::convolution_direct, src, weights, bias, dst, stride_dims, padding_dims, padding_dims, mkldnn::padding_kind::zero); - mkldnn::primitive_attr conv_attr = - CreatePostOps(fuse_relu, output_shift_scale); + mkldnn::primitive_attr conv_attr = CreatePostOps( + fuse_relu, fuse_residual_conn, output_shift_scale, sum_scale); auto p_conv_pd = new mkldnn::convolution_forward::primitive_desc( conv_desc, conv_attr, engine); @@ -891,7 +978,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { input_grad->set_format(GetMKLDNNFormat(*diff_src_memory_p)); } stream(stream::kind::eager).submit(pipeline).wait(); - } // Compute() + } }; } // namespace operators diff --git a/paddle/fluid/operators/data_norm_op.cc b/paddle/fluid/operators/data_norm_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..d5bc25d19cba4de6f059612e3e8c4a65b2edd0f9 --- /dev/null +++ b/paddle/fluid/operators/data_norm_op.cc @@ -0,0 +1,409 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/data_norm_op.h" +#include +#include "paddle/fluid/framework/data_layout.h" +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +using DataLayout = framework::DataLayout; + +template +using EigenArrayMap = + Eigen::Map>; +template +using ConstEigenArrayMap = + Eigen::Map>; +template +using EigenVectorArrayMap = Eigen::Map>; +template +using ConstEigenVectorArrayMap = + Eigen::Map>; + +class DataNormOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), ""); + PADDLE_ENFORCE(ctx->HasInput("BatchSize"), ""); + PADDLE_ENFORCE(ctx->HasInput("BatchSum"), ""); + PADDLE_ENFORCE(ctx->HasInput("BatchSquareSum"), ""); + PADDLE_ENFORCE(ctx->HasOutput("Means"), ""); + PADDLE_ENFORCE(ctx->HasOutput("Scales"), ""); + PADDLE_ENFORCE(ctx->HasOutput("Y"), ""); + + const auto x_dims = ctx->GetInputDim("X"); + const DataLayout data_layout = framework::StringToDataLayout( + ctx->Attrs().Get("data_layout")); + + PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, + "Input X must have 2 to 5 dimensions."); + + const int64_t C = + (data_layout == DataLayout::kNCHW ? x_dims[1] + : x_dims[x_dims.size() - 1]); + + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSize")[0], C); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSum")[0], C); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("BatchSquareSum")[0], C); + + ctx->SetOutputDim("Y", x_dims); + ctx->SetOutputDim("Means", {C}); + ctx->SetOutputDim("Scales", {C}); + ctx->ShareLoD("X", "Y"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + auto input_data_type = ctx.Input("X")->type(); + // By default, the type of the scale, bias, mean, + // and var tensors should both be float. (For float or float16 input tensor) + // or double (For double input tensor). + auto dn_param_type = framework::proto::VarType::FP32; + if (input_data_type == framework::proto::VarType::FP64) { + dn_param_type = framework::proto::VarType::FP64; + } + PADDLE_ENFORCE_EQ(dn_param_type, ctx.Input("BatchSize")->type(), + "BatchSize input should be of float type"); + PADDLE_ENFORCE_EQ(dn_param_type, ctx.Input("BatchSum")->type(), + "BatchSum input should be of float type"); + PADDLE_ENFORCE_EQ(dn_param_type, + ctx.Input("BatchSquareSum")->type(), + "BatchSquareSum input should be of float type"); + + // TODO(pzelazko-intel): enable MKLDNN layout when it's ready + framework::LibraryType library = framework::LibraryType::kPlain; + framework::DataLayout layout = framework::DataLayout::kAnyLayout; +#ifdef PADDLE_WITH_MKLDNN + if (library == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library = framework::LibraryType::kMKLDNN; + layout = framework::DataLayout::kMKLDNN; + } +#endif + + return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout, + library); + } +}; + +class DataNormOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + // AddAttr("is_test", "").SetDefault(false); + AddAttr("epsilon", "") + .SetDefault(1e-4) + .AddCustomChecker([](const float &epsilon) { + PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 0.001f, + "'epsilon' should be between 0.0 and 0.001."); + }); + AddAttr("data_layout", "").SetDefault("NCHW"); + AddInput("X", "The input tensor"); + AddInput("BatchSize", + "BatchSize is a 1-dimensional tensor of size C " + "that is applied to the output"); + AddInput("BatchSum", + "BatchSum is a 1-dimensional tensor of size C " + "that is applied to the output"); + AddInput("BatchSquareSum", + "The global BatchSquareSum (for training) or " + "estimated BatchSquareSum (for testing)"); + AddOutput("Y", "result after normalization"); + AddOutput("Means", + "Mean of the history data batch, " + "will apply to output when training") + .AsIntermediate(); + AddOutput("Scales", + "Scales of the history data batch, " + "will apply to output when training") + .AsIntermediate(); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); + AddComment(R"DOC( +Data Normalization. + +Can be used as a normalizer function for data +The required data format for this layer is one of the following: +1. NHWC `[batch, in_height, in_width, in_channels]` +2. NCHW `[batch, in_channels, in_height, in_width]` + +)DOC"); + } +}; + +template +class DataNormKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + // const bool is_test = ctx.Attr("is_test"); + const std::string data_layout_str = ctx.Attr("data_layout"); + const DataLayout data_layout = + framework::StringToDataLayout(data_layout_str); + + const auto *x = ctx.Input("X"); + const auto &x_dims = x->dims(); + PADDLE_ENFORCE(x_dims.size() == 2, "The Input dim size should be 2"); + const int N = x_dims[0]; + const int C = + (data_layout == DataLayout::kNCHW ? x_dims[1] + : x_dims[x_dims.size() - 1]); + auto *y = ctx.Output("Y"); + auto *mean_out = ctx.Output("Means"); + auto *scales = ctx.Output("Scales"); + + // alloc memory + y->mutable_data(ctx.GetPlace()); + + Eigen::Array inv_std(C); + ConstEigenVectorArrayMap b_size_arr( + ctx.Input("BatchSize")->data(), C); + ConstEigenVectorArrayMap b_sum_arr( + ctx.Input("BatchSum")->data(), C); + ConstEigenVectorArrayMap b_square_sum_arr( + ctx.Input("BatchSquareSum")->data(), C); + EigenVectorArrayMap means_arr(mean_out->mutable_data(ctx.GetPlace()), + C); + EigenVectorArrayMap scales_arr(scales->mutable_data(ctx.GetPlace()), + C); + means_arr = b_sum_arr / b_size_arr; + scales_arr = (b_size_arr / b_square_sum_arr).sqrt(); + + switch (data_layout) { + case DataLayout::kNCHW: // because it's two dimensions, so make no + // difference + case DataLayout::kNHWC: { + EigenArrayMap(y->mutable_data(ctx.GetPlace()), C, N) = + (ConstEigenArrayMap(x->data(), C, N).colwise() - means_arr) + .colwise() * + scales_arr; + break; + } + default: + PADDLE_THROW("Unknown storage order: %d", data_layout); + } + } +}; + +class DataNormGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + // check input + PADDLE_ENFORCE(ctx->HasInput("X")); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), ""); + PADDLE_ENFORCE(ctx->HasInput("BatchSize"), ""); + PADDLE_ENFORCE(ctx->HasInput("BatchSum"), ""); + PADDLE_ENFORCE(ctx->HasInput("BatchSquareSum"), ""); + PADDLE_ENFORCE(ctx->HasInput("Means"), ""); + PADDLE_ENFORCE(ctx->HasInput("Scales"), ""); + + // check output + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), ""); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("BatchSize")), ""); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("BatchSum")), ""); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("BatchSquareSum")), + ""); + + const auto x_dims = ctx->GetInputDim("X"); + const DataLayout data_layout = framework::StringToDataLayout( + ctx->Attrs().Get("data_layout")); + const int C = + (data_layout == DataLayout::kNCHW ? x_dims[1] + : x_dims[x_dims.size() - 1]); + + ctx->SetOutputDim(framework::GradVarName("X"), x_dims); + ctx->SetOutputDim(framework::GradVarName("BatchSize"), {C}); + ctx->SetOutputDim(framework::GradVarName("BatchSum"), {C}); + ctx->SetOutputDim(framework::GradVarName("BatchSquareSum"), {C}); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + const auto *var = ctx.InputVar(framework::GradVarName("Y")); + if (var == nullptr) { + PADDLE_THROW("can't find Y@GRAD"); + } + const Tensor *t = nullptr; + if (var->IsType()) { + t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); + } + if (t == nullptr) { + PADDLE_THROW("can't find Y@GRAD"); + } + + // TODO(pzelazko-intel): enable MKLDNN layout when it's ready + framework::LibraryType library = framework::LibraryType::kPlain; + framework::DataLayout layout = framework::DataLayout::kAnyLayout; + +#ifdef PADDLE_WITH_MKLDNN + if (library == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library = framework::LibraryType::kMKLDNN; + layout = framework::DataLayout::kMKLDNN; + } +#endif + + return framework::OpKernelType(ctx.Input("X")->type(), + ctx.GetPlace(), layout, library); + } +}; + +template +class DataNormGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *x = ctx.Input("X"); + const auto *d_y = ctx.Input(framework::GradVarName("Y")); + const auto *batch_size = ctx.Input("BatchSize"); + const auto *batch_sum = ctx.Input("BatchSum"); + const auto *batch_square_sum = ctx.Input("BatchSquareSum"); + const auto *scales = ctx.Input("Scales"); + const auto *means = ctx.Input("Means"); + + const std::string data_layout_str = ctx.Attr("data_layout"); + const DataLayout data_layout = + framework::StringToDataLayout(data_layout_str); + + // Get the size for each dimension. + // NCHW [batch_size, in_channels, in_height, in_width] + const auto &x_dims = x->dims(); + PADDLE_ENFORCE(x_dims.size() == 2, "The Input dim size should be 2"); + const int N = x_dims[0]; + const int C = + (data_layout == DataLayout::kNCHW ? x_dims[1] + : x_dims[x_dims.size() - 1]); + + // init output + auto *d_x = ctx.Output(framework::GradVarName("X")); + auto *d_batch_size = + ctx.Output(framework::GradVarName("BatchSize")); + auto *d_batch_sum = ctx.Output(framework::GradVarName("BatchSum")); + auto *d_batch_square_sum = + ctx.Output(framework::GradVarName("BatchSquareSum")); + + EigenVectorArrayMap d_batch_size_arr( + d_batch_size->mutable_data(ctx.GetPlace()), C); + EigenVectorArrayMap d_batch_sum_arr( + d_batch_sum->mutable_data(ctx.GetPlace()), C); + EigenVectorArrayMap d_batch_square_sum_arr( + d_batch_square_sum->mutable_data(ctx.GetPlace()), C); + + d_batch_size_arr.setZero(); + d_batch_sum_arr.setZero(); + d_batch_square_sum_arr.setZero(); + + const float epsilon = ctx.Attr("epsilon"); + switch ( + data_layout) { // because it's two dimensions, so make no difference + case DataLayout::kNCHW: + case DataLayout::kNHWC: { + ConstEigenVectorArrayMap scales_arr(scales->data(), C); + ConstEigenVectorArrayMap means_arr(means->data(), C); + ConstEigenArrayMap x_arr(x->data(), C, N); + ConstEigenArrayMap d_y_arr(d_y->data(), C, N); + EigenArrayMap d_x_arr(d_x->mutable_data(ctx.GetPlace()), C, N); + d_x_arr.setZero(); + for (int nc = 0; nc < N; ++nc) { + d_x_arr.col(nc) = d_y_arr.col(nc) * scales_arr; + } + + // calculate data sum and squre sum + ConstEigenVectorArrayMap batch_size_arr(batch_size->data(), C); + ConstEigenVectorArrayMap batch_sum_arr(batch_sum->data(), C); + ConstEigenVectorArrayMap batch_square_sum_arr( + batch_square_sum->data(), C); + Eigen::Array sample_sum(C); + Eigen::Array sample_square_sum(C); + // calculate data sample sum and square sum + sample_sum.setZero(); + sample_square_sum.setZero(); + for (int nc = 0; nc < N; ++nc) { + sample_sum += x_arr.col(nc); + sample_square_sum += (x_arr.col(nc) - means_arr).square(); + } + // calculate gradient + d_batch_size_arr.setConstant(N); + d_batch_sum_arr = sample_sum; + d_batch_square_sum_arr = sample_square_sum + d_batch_size_arr * epsilon; + break; + } + default: + PADDLE_THROW("Unknown storage order: %s", data_layout_str); + } + } +}; + +class DataNormGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *op = new framework::OpDesc(); + op->SetType("data_norm_grad"); + op->SetInput("X", Input("X")); + op->SetInput(framework::GradVarName("Y"), OutputGrad("Y")); + + op->SetInput("BatchSize", Input("BatchSize")); + op->SetInput("BatchSum", Input("BatchSum")); + op->SetInput("BatchSquareSum", Input("BatchSquareSum")); + op->SetInput("Scales", Output("Scales")); + op->SetInput("Means", Output("Means")); + + op->SetAttrMap(Attrs()); + + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetOutput(framework::GradVarName("BatchSize"), InputGrad("BatchSize")); + op->SetOutput(framework::GradVarName("BatchSum"), InputGrad("BatchSum")); + op->SetOutput(framework::GradVarName("BatchSquareSum"), + InputGrad("BatchSquareSum")); + + return std::unique_ptr(op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(data_norm, ops::DataNormOp, ops::DataNormOpMaker, + ops::DataNormGradMaker); +REGISTER_OPERATOR(data_norm_grad, ops::DataNormGradOp); + +REGISTER_OP_CPU_KERNEL( + data_norm, ops::DataNormKernel, + ops::DataNormKernel); +REGISTER_OP_CPU_KERNEL( + data_norm_grad, + ops::DataNormGradKernel, + ops::DataNormGradKernel); diff --git a/paddle/fluid/operators/data_norm_op.h b/paddle/fluid/operators/data_norm_op.h new file mode 100644 index 0000000000000000000000000000000000000000..63451214bcf649d0a7a949f391db9b651d237d22 --- /dev/null +++ b/paddle/fluid/operators/data_norm_op.h @@ -0,0 +1,35 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class DataNormKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override; +}; + +template +class DataNormGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override; +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.cu b/paddle/fluid/operators/elementwise/elementwise_sub_op.cu index 6f17d3292f307b009c640738109d5a4f4ca4caa9..f2adf1c83730c317cd4f4d2a4039c0f94da9df7b 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cu @@ -12,18 +12,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_sub_op.h" +#include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( elementwise_sub, ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel, ops::ElementwiseSubKernel, ops::ElementwiseSubKernel, ops::ElementwiseSubKernel); REGISTER_OP_CUDA_KERNEL( elementwise_sub_grad, ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel, ops::ElementwiseSubGradKernel, ops::ElementwiseSubGradKernel, ops::ElementwiseSubGradKernelIsRuntime()) { + if (!ctx->IsRuntime() && x_dims[0] < 0) { out_shape[0] = x_dims[0]; } @@ -115,7 +115,7 @@ class ExpandGradOp : public framework::OperatorWithKernel { auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); size_t start_pos = 0u; - if (!ctx->IsRuntime()) { + if (!ctx->IsRuntime() && x_dims[0] < 0) { PADDLE_ENFORCE_EQ( x_dims[0], out_dims[0], "The first dimension size of Input(Out@GRAD) should be " diff --git a/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc b/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..b181140db750a8d1b74c0b6cc93259a208fe5b06 --- /dev/null +++ b/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc @@ -0,0 +1,134 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#include "paddle/fluid/operators/fused/fusion_seqpool_concat_op.h" +#include +#include +#include "paddle/fluid/operators/jit/kernels.h" + +namespace paddle { +namespace operators { + +void FusionSeqPoolConcatOp::InferShape( + framework::InferShapeContext* ctx) const { + PADDLE_ENFORCE_GE(ctx->Inputs("X").size(), 1UL, + "Inputs(X) of FusionSeqPoolConcatOp should not be empty."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of FusionSeqPoolConcatOp should not be null."); + int axis = ctx->Attrs().Get("axis"); + PADDLE_ENFORCE_EQ(axis, 1, + "FusionSeqPoolConcatOp only supports concat axis=1 yet."); + + auto ins_dims = ctx->GetInputsDim("X"); + const size_t n = ins_dims.size(); + PADDLE_ENFORCE_GT(n, 0UL, "Input tensors count should > 0."); + if (n == 1) { + LOG(WARNING) << "Only have one input, may waste memory"; + } + + // The output height should be confirmed in Compute, + // since input lod is not accessible here. + PADDLE_ENFORCE_EQ(ins_dims[0].size(), 2UL, + "The dims size of first input should be 2."); + ctx->SetOutputDim("Out", {-1, ins_dims[0][axis] * static_cast(n)}); +} + +framework::OpKernelType FusionSeqPoolConcatOp::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + return framework::OpKernelType( + framework::GetDataTypeOfVar(ctx.MultiInputVar("X")[0]), ctx.GetPlace()); +} + +void FusionSeqPoolConcatOpMaker::Make() { + AddInput("X", "(LoDTensor) Input tensors of this operator.").AsDuplicable(); + AddOutput("Out", "(LoDTensor) Output tensor of concat operator."); + AddAttr("pooltype", + "(string, default 'SUM') some of the pooling " + "pooltype of SequencePoolOp.") + .SetDefault("SUM") + .InEnum({"AVERAGE", "SUM", "SQRT"}); + AddAttr("axis", + "The axis along which the input tensors will be concatenated. " + "Only supports concat axis=1 yet.") + .SetDefault(1); + AddComment(R"DOC( +Fusion Sequence Pool of pooltype(sum, average and sqrt) and Concat Operator. +)DOC"); +} + +template +class FusionSeqPoolConcatKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto ins = ctx.MultiInput("X"); + auto* out = ctx.Output("Out"); + std::string pooltype = ctx.Attr("pooltype"); + auto x0_lod = ins[0]->lod(); + auto x0_dims = ins[0]->dims(); + auto y_dims = out->dims(); + size_t bs = x0_lod[0].size() - 1; + out->Resize({static_cast(bs), y_dims[1]}); + framework::LoD y_lod(1); + y_lod[0].resize(bs + 1); + for (size_t i = 0; i <= bs; ++i) { + y_lod[0][i] = i; + } + out->set_lod(y_lod); + auto place = ctx.GetPlace(); + T* y_data = out->mutable_data(place); + + int w = ins[0]->numel() / x0_dims[0]; + PADDLE_ENFORCE_EQ(y_dims[1] % w, 0, + "The output of dims[1] should be dividable of w"); + jit::seq_pool_attr_t attr(w, jit::SeqPoolType::kSum); + if (pooltype == "AVERAGE") { + attr.type = jit::SeqPoolType::kAvg; + } else if (pooltype == "SQRT") { + attr.type = jit::SeqPoolType::kSqrt; + } + auto seqpool = + jit::Get, platform::CPUPlace>( + attr); + size_t n = ins.size(); + size_t dst_step_size = n * w; + for (size_t i = 0; i < n; ++i) { + auto x_dims = ins[i]->dims(); + auto x_lod = ins[i]->lod()[0]; + const T* src = ins[i]->data(); + T* dst = y_data + i * w; + PADDLE_ENFORCE_EQ(static_cast(ins[i]->numel() / x_dims[0]), w, + "Width of all inputs should be equal."); + PADDLE_ENFORCE_EQ(x_lod.size(), bs + 1, + "Batchsize of all inputs should be equal."); + for (size_t j = 0; j < bs; ++j) { + attr.h = static_cast(x_lod[j + 1] - x_lod[j]); + seqpool(src, dst, &attr); + dst += dst_step_size; + src += attr.h * attr.w; + } + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(fusion_seqpool_concat, ops::FusionSeqPoolConcatOp, + ops::FusionSeqPoolConcatOpMaker, + paddle::framework::DefaultGradOpDescMaker); + +REGISTER_OP_CPU_KERNEL(fusion_seqpool_concat, + ops::FusionSeqPoolConcatKernel, + ops::FusionSeqPoolConcatKernel); diff --git a/paddle/fluid/operators/fused/fusion_seqpool_concat_op.h b/paddle/fluid/operators/fused/fusion_seqpool_concat_op.h new file mode 100644 index 0000000000000000000000000000000000000000..9f882a59d351cdb360203f3212543bfca295fc65 --- /dev/null +++ b/paddle/fluid/operators/fused/fusion_seqpool_concat_op.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using LoDTensor = framework::LoDTensor; +using Tensor = framework::Tensor; + +class FusionSeqPoolConcatOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; +}; + +class FusionSeqPoolConcatOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override; +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/jit/benchmark.cc b/paddle/fluid/operators/jit/benchmark.cc index bde2791add4075be6949703dfbea634966d25c1c..4b4ce07fa78b97e636173566fa104cb8a18c914e 100644 --- a/paddle/fluid/operators/jit/benchmark.cc +++ b/paddle/fluid/operators/jit/benchmark.cc @@ -52,11 +52,11 @@ struct BenchFunc { for (int i = 0; i < FLAGS_burning; ++i) { tgt(args...); } - auto start = paddle::platform::PosixInNsec() / 1e-3; + auto start = paddle::platform::PosixInNsec() * 1e-3; for (int i = 0; i < FLAGS_repeat; ++i) { tgt(args...); } - auto end = paddle::platform::PosixInNsec() / 1e-3; + auto end = paddle::platform::PosixInNsec() * 1e-3; return static_cast(end - start) / FLAGS_repeat; } }; diff --git a/paddle/fluid/operators/math/selected_rows_functor.cc b/paddle/fluid/operators/math/selected_rows_functor.cc index 1a11b584e2bab7eeb395bf391da080ec0ba62ae4..b99115e44b31536f0fd0a9078b40d07949be86f0 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cc +++ b/paddle/fluid/operators/math/selected_rows_functor.cc @@ -195,6 +195,10 @@ struct SelectedRowsAddToTensor { void operator()(const platform::CPUDeviceContext& context, const framework::SelectedRows& input1, framework::Tensor* input2) { + if (UNLIKELY(input1.rows().size() == 0)) { + LOG(WARNING) << "input selected rows is empty!"; + return; + } auto in1_height = input1.height(); auto in2_dims = input2->dims(); PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]); diff --git a/paddle/fluid/operators/math/softmax.h b/paddle/fluid/operators/math/softmax.h index 089458e957dfaac1cbc3bf1bc2b4be4877e702c9..81beef56d9424b968932fdc4ca723099632c183a 100644 --- a/paddle/fluid/operators/math/softmax.h +++ b/paddle/fluid/operators/math/softmax.h @@ -49,6 +49,7 @@ class SoftmaxGradCUDNNFunctor { const framework::Tensor* Y, const framework::Tensor* y_grad, framework::Tensor* x_grad); }; + #endif } // namespace math diff --git a/paddle/fluid/operators/py_func_op.cc b/paddle/fluid/operators/py_func_op.cc index a6b1c738af1daff5e3e4b1ac8e537de5adc93b76..53eff2de3e3864b0f3d61f95ab5758b65f9eecb5 100644 --- a/paddle/fluid/operators/py_func_op.cc +++ b/paddle/fluid/operators/py_func_op.cc @@ -13,10 +13,10 @@ // limitations under the License. #include "paddle/fluid/operators/py_func_op.h" + #include #include #include -#include "Python.h" #include "paddle/fluid/framework/op_registry.h" namespace paddle { diff --git a/paddle/fluid/operators/py_func_op.h b/paddle/fluid/operators/py_func_op.h index 4ba06bf59857d8cc4f1c56a52627d8e768ccbf7a..5cebcd8dc0d2dcd537c7d2ce48508a6a5ee6a77d 100644 --- a/paddle/fluid/operators/py_func_op.h +++ b/paddle/fluid/operators/py_func_op.h @@ -13,8 +13,7 @@ // limitations under the License. #pragma once - -#include "pybind11/pybind11.h" +#include "paddle/fluid/framework/python_headers.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index cee3e87037e0f1439a08b7b275eedefe357a4b13..52b8dcc681b1f97d5ba03697257509cae1e6b484 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -1,11 +1,8 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -58,12 +55,24 @@ __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, } // namespace -static __device__ __forceinline__ float real_exp(float x) { return expf(x); } -static __device__ __forceinline__ double real_exp(double x) { return exp(x); } -static __device__ __forceinline__ float real_log(float x) { +static __device__ __forceinline__ platform::float16 exp_on_device( + platform::float16 x) { + return ::Eigen::numext::exp(x); +} +static __device__ __forceinline__ float exp_on_device(float x) { + return expf(x); +} +static __device__ __forceinline__ double exp_on_device(double x) { + return exp(x); +} +static __device__ __forceinline__ platform::float16 log_on_device( + platform::float16 x) { + return math::TolerableValue()(::Eigen::numext::log(x)); +} +static __device__ __forceinline__ float log_on_device(float x) { return math::TolerableValue()(logf(x)); } -static __device__ __forceinline__ double real_log(double x) { +static __device__ __forceinline__ double log_on_device(double x) { return math::TolerableValue()(log(x)); } @@ -72,25 +81,20 @@ static __device__ __forceinline__ double real_log(double x) { /* Supposing the x is `logits` and y is `labels`, the equations are as followings: - cross\_entropy_i = \sum_{j}[- y_i_j * log({e^{x_i_j}/\sum_{j}e^{x_i_j}})] = \sum_{j}[- y_i_j * log({e^{x_i_j - max_i}/\sum_{j}e^{x_i_j-max_i}})] = \sum_{j}[-y_i_j * (x_i_j - max_i - log\sum_{j}e^{x_i_j - max_i})] = \sum_{j}[-y_i_j * (x_i_j - max_i - logDiffMaxSum_i)] = \sum_{j}(-y_i_j * tmp_i_j) - softmax_i_j = e^{tmp_i_j} - where: max_i = \max_{j}{x_i_j} logDiffMaxSum_i = log\sum_{j}e^{x_i_j - max_i} tmp_i_j = x_i_j - max_i - logDiffMaxSum_i - Therefore, the calculation can be separated into 3 steps: Step 1: row-wise operation to calculate max_i Step 2: row-wise operation to calculate logDiffMaxSum_i Step 3: caculate tmp_i_j, and finally get softmax_i_j and cross\_entropy_i - To save memory, we can share memory among max_i, logDiffMaxSum_i and cross\_entropy_i. In this way, the 3 steps should be changed to: @@ -134,7 +138,8 @@ static __global__ void RowReductionForMax(const T* logits_data, T* max_data, cur_max = BlockReduce(temp_storage).Reduce(cur_max, cub::Max()); if (threadIdx.x == 0) { - max_data[blockIdx.x] = cur_max < -64 ? -64 : cur_max; + max_data[blockIdx.x] = + cur_max < static_cast(-64) ? static_cast(-64) : cur_max; } } @@ -151,17 +156,17 @@ static __global__ void RowReductionForDiffMaxSum(const T* logits_data, auto block_max = max_data[blockIdx.x]; softmax[beg_idx] = logits_data[beg_idx] - block_max; - T diff_max_sum = real_exp(softmax[beg_idx]); + T diff_max_sum = exp_on_device(softmax[beg_idx]); auto idx = beg_idx + BlockDim; while (idx < end_idx) { softmax[idx] = logits_data[idx] - block_max; - diff_max_sum += real_exp(softmax[idx]); + diff_max_sum += exp_on_device(softmax[idx]); idx += BlockDim; } diff_max_sum = BlockReduce(temp_storage).Reduce(diff_max_sum, cub::Sum()); - if (threadIdx.x == 0) max_data[blockIdx.x] = real_log(diff_max_sum); + if (threadIdx.x == 0) max_data[blockIdx.x] = log_on_device(diff_max_sum); if (!CalculateLogSoftmax) return; __syncthreads(); @@ -188,12 +193,12 @@ static __global__ void RowReductionForSoftmaxAndCrossEntropy( // log_diff_max_sum shares memory with loss auto block_log_diff_max_sum = loss_data[blockIdx.x]; auto tmp = softmax[beg_idx] - block_log_diff_max_sum; - softmax[beg_idx] = real_exp(tmp); + softmax[beg_idx] = exp_on_device(tmp); auto loss = -labels_data[beg_idx] * tmp; beg_idx += BlockDim; while (beg_idx < end_idx) { tmp = softmax[beg_idx] - block_log_diff_max_sum; - softmax[beg_idx] = real_exp(tmp); + softmax[beg_idx] = exp_on_device(tmp); loss -= (labels_data[beg_idx] * tmp); beg_idx += BlockDim; } @@ -218,10 +223,10 @@ struct HardLabelSoftmaxWithCrossEntropyFunctor { auto row_idx = idx / feature_size_; auto col_idx = idx % feature_size_; if (col_idx != labels_[row_idx]) { - log_softmax_[idx] = real_exp(log_softmax_[idx]); + log_softmax_[idx] = exp_on_device(log_softmax_[idx]); } else { auto softmax = log_softmax_[idx]; - log_softmax_[idx] = real_exp(softmax); + log_softmax_[idx] = exp_on_device(softmax); loss_[row_idx] = -softmax; } } @@ -253,10 +258,10 @@ struct HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx { auto row_idx = idx / feature_size_; auto col_idx = idx % feature_size_; if (col_idx != labels_[row_idx] || col_idx == ignore_idx_) { - log_softmax_[idx] = real_exp(log_softmax_[idx]); + log_softmax_[idx] = exp_on_device(log_softmax_[idx]); } else { auto softmax = log_softmax_[idx]; - log_softmax_[idx] = real_exp(softmax); + log_softmax_[idx] = exp_on_device(softmax); loss_[row_idx] = -softmax; } } @@ -464,9 +469,12 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(softmax_with_cross_entropy, - ops::SoftmaxWithCrossEntropyCUDAKernel, - ops::SoftmaxWithCrossEntropyCUDAKernel); -REGISTER_OP_CUDA_KERNEL(softmax_with_cross_entropy_grad, - ops::SoftmaxWithCrossEntropyGradCUDAKernel, - ops::SoftmaxWithCrossEntropyGradCUDAKernel); +REGISTER_OP_CUDA_KERNEL( + softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel, + ops::SoftmaxWithCrossEntropyCUDAKernel, + ops::SoftmaxWithCrossEntropyCUDAKernel); +REGISTER_OP_CUDA_KERNEL( + softmax_with_cross_entropy_grad, + ops::SoftmaxWithCrossEntropyGradCUDAKernel, + ops::SoftmaxWithCrossEntropyGradCUDAKernel, + ops::SoftmaxWithCrossEntropyGradCUDAKernel); diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 01996e6bf975227270914aa6bec26aacfc814c94..7abfbbd3cb5e5374441c511d82663788c39c04c6 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -41,7 +41,9 @@ class SumOp : public framework::OperatorWithKernel { return; // skip runtime infershape when is tensor array; } + auto x_var_types = ctx->GetInputsVarType("X"); auto x_dims = ctx->GetInputsDim("X"); + size_t N = x_dims.size(); PADDLE_ENFORCE_GT(N, 0, "Input tensors count should > 0."); if (N == 1) { @@ -49,7 +51,13 @@ class SumOp : public framework::OperatorWithKernel { } framework::DDim in_dim({0}); - for (auto& x_dim : x_dims) { + for (size_t i = 0; i < x_dims.size(); ++i) { + auto& x_dim = x_dims[i]; + // x_dim.size() == 1 means the real dim of selected rows is [0] + if (x_var_types[i] == framework::proto::VarType::SELECTED_ROWS && + x_dim.size() == 1) { + continue; + } if (framework::product(x_dim) == 0) { continue; } diff --git a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..c8ee13875c5ae772de3c09f97fded8f70c5698e6 --- /dev/null +++ b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc @@ -0,0 +1,162 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/teacher_student_sigmoid_loss_op.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +class TeacherStudentSigmoidLossOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); + PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); + PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null."); + + auto x_dims = ctx->GetInputDim("X"); + auto label_dims = ctx->GetInputDim("Label"); + PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "Input(X)'s rank should be 2."); + PADDLE_ENFORCE_EQ(label_dims.size(), 2UL, + "Input(Label)'s rank should be 2."); + PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], + "The 1st dimension of Input(X) and Input(Label) should " + "be equal."); + PADDLE_ENFORCE_EQ(label_dims[1], 1UL, + "The 2nd dimension of " + "Input(Label) should be 1."); + ctx->SetOutputDim("Y", {x_dims[0], 1}); + ctx->ShareLoD("X", /*->*/ "Y"); + } + + protected: + // Explicitly set that the data type of computation kernel of + // teacher_student_sigmoid_loss + // is determined by its input "X". + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(ctx.Input("X")->type(), + ctx.device_context()); + } +}; + +class TeacherStudentSigmoidLossGradientOp + : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); + PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), + "Input(Y@GRAD) should be not null."); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), + "Output(X@GRAD) should be not null."); + + auto x_dims = ctx->GetInputDim("X"); + auto label_dims = ctx->GetInputDim("Label"); + auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); + PADDLE_ENFORCE_EQ(dy_dims.size(), 2, "Input(Y@Grad)'s rank should be 2."); + PADDLE_ENFORCE_EQ(label_dims.size(), 2, "Input(Label)'s rank should be 2."); + PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], + "The 1st dimension of Input(X) and Input(Label) should " + "be equal."); + PADDLE_ENFORCE_EQ(x_dims[0], dy_dims[0], + "The 1st dimension of Input(X) and Input(Y@Grad) should " + "be equal."); + PADDLE_ENFORCE_EQ(dy_dims[1], 1, + "The 2nd dimension of Input(Y@Grad) should be 1."); + PADDLE_ENFORCE_EQ(label_dims[1], 1, + "When Attr(soft_label) == false, the 2nd dimension of " + "Input(Label) should be 1."); + ctx->SetOutputDim(framework::GradVarName("X"), x_dims); + ctx->ShareLoD("X", framework::GradVarName("X")); + } + + protected: + // Explicitly set that the data type of computation kernel of + // teacher_student_sigmoid_loss + // is determined by its input "X". + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(ctx.Input("X")->type(), + ctx.device_context()); + } +}; + +class TeacherStudentSigmoidLossOpMaker + : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(Tensor, default Tensor), a 2-D tensor with shape [N x 1]," + " where N is the batch size and D is the output. " + "This input is a probability computed by the previous operator, " + "which is almost always the result of a softmax operator."); + AddInput("Label", + "(Tensor), the ground truth which is a 2-D tensor. " + "Label is a Tensor with shape [N x 1]. "); + AddOutput("Y", + "(Tensor, default Tensor), a 2-D tensor with shape " + "[N x 1]. The teacher student sigmoid loss."); + AddAttr( + "soft_max_up_bound", + "fp32, if input > soft_max_up_bound, will be bound, default 15.0") + .SetDefault(15.0); + AddAttr( + "soft_max_lower_bound", + "fp32, if input < soft_max_lower_bound, will be bound, default -15.0") + .SetDefault(-15.0); + AddComment(R"DOC( +TeacherStudentSigmoidLoss Operator. + +It's similarity to SigmoidCrossEntropyWithLogits Operator. The difference is that +we add another label(z') to original. + loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) + max(x, 0) - x * z' + log(1 + exp(-abs(x))) + z is click or not + z' is teacher value + label = {-2, -1, [0, 2]} + when z' is not exist, clk = 0 : label = -2; + when z' is not exist, clk = 1 : label = -1; + when z' is exist , clk = 0 : label = 0 + z'; + when z' is exist , clk = 1 : label = 1 + z'; + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(teacher_student_sigmoid_loss, + ops::TeacherStudentSigmoidLossOp, + ops::TeacherStudentSigmoidLossOpMaker, + paddle::framework::DefaultGradOpDescMaker); + +REGISTER_OPERATOR(teacher_student_sigmoid_loss_grad, + ops::TeacherStudentSigmoidLossGradientOp); + +REGISTER_OP_CPU_KERNEL(teacher_student_sigmoid_loss, + ops::TeacherStudentSigmoidLossOpKernel, + ops::TeacherStudentSigmoidLossOpKernel); + +REGISTER_OP_CPU_KERNEL(teacher_student_sigmoid_loss_grad, + ops::TeacherStudentSigmoidLossGradOpKernel, + ops::TeacherStudentSigmoidLossGradOpKernel); diff --git a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.h b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.h new file mode 100644 index 0000000000000000000000000000000000000000..41d2662ae2a4d37222323d6a536ed3af1ab7e056 --- /dev/null +++ b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.h @@ -0,0 +1,118 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +class TeacherStudentSigmoidLossOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + Tensor* y = context.Output("Y"); + const Tensor* x = context.Input("X"); + const Tensor* labels = context.Input("Label"); + T* y_data = y->mutable_data(context.GetPlace()); + const T* x_data = x->data(); + const T* label_data = labels->data(); + int64_t batch_size = x->dims()[0]; + // loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) + max(x, 0) - x * z' + + // log(1 + exp(-abs(x))) + // z is click or not + // z' is value q of feed_fine + // label = {-2, -1, [0, 2]} + // when z' is not exist, clk = 0 : label = -2; + // when z' is not exist, clk = 1 : label = -1; + // when z' is exist , clk = 0 : label = 0 + z'; + // when z' is exist , clk = 1 : label = 1 + z'; + for (int i = 0; i < batch_size; ++i) { + if (label_data[i] < -1.0) { + y_data[i] = (x_data[i] > 0 ? x_data[i] : 0.0) + + log(1.0 + exp(-fabs(x_data[i]))); + } else if (label_data[i] < 0.0) { + y_data[i] = (x_data[i] > 0 ? x_data[i] : 0.0) - x_data[i] + + log(1.0 + exp(-fabs(x_data[i]))); + } else if (label_data[i] < 1.0) { + y_data[i] = (x_data[i] > 0 ? x_data[i] : 0.0) + + log(1.0 + exp(-fabs(x_data[i]))) + + (x_data[i] > 0 ? x_data[i] : 0.0) - + x_data[i] * label_data[i] + + log(1.0 + exp(-fabs(x_data[i]))); + } else { + y_data[i] = (x_data[i] > 0 ? x_data[i] : 0.0) - x_data[i] + + log(1.0 + exp(-fabs(x_data[i]))) + + (x_data[i] > 0 ? x_data[i] : 0.0) - + x_data[i] * (label_data[i] - 1.0) + + log(1.0 + exp(-fabs(x_data[i]))); + } + } + } +}; + +template +class TeacherStudentSigmoidLossGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* x = context.Input("X"); + const T* x_data = x->data(); + + Tensor* dx = context.Output(framework::GradVarName("X")); + T* dx_data = dx->mutable_data(context.GetPlace()); + + const Tensor* labels = context.Input("Label"); + const T* label_data = labels->data(); + + T soft_max_up_bound = + static_cast(context.Attr("soft_max_up_bound")); + T soft_max_lower_bound = + static_cast(context.Attr("soft_max_lower_bound")); + + int64_t batch_size = x->dims()[0]; + + const framework::Tensor* dOut = + context.Input(framework::GradVarName("Y")); + + const T* dout_data = dOut->data(); + + for (int i = 0; i < batch_size; ++i) { + T sum_val = x_data[i]; + if (sum_val > soft_max_up_bound) { + sum_val = soft_max_up_bound; + } else { + if (sum_val < soft_max_lower_bound) { + sum_val = soft_max_lower_bound; + } + } + + T pred = 1.0 / (1.0 + exp(-sum_val)); + if (label_data[i] < -1.0) { + dx_data[i] = 0.0 - pred; + } else if (label_data[i] < 0.0) { + dx_data[i] = 1.0 - pred; + } else { + dx_data[i] = label_data[i] - 2.0 * pred; + } + if (sum_val >= soft_max_up_bound || sum_val <= soft_max_lower_bound) { + dx_data[i] = 0; + } + dx_data[i] *= dout_data[i] * -1; + } + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/platform/cpu_info.cc b/paddle/fluid/platform/cpu_info.cc index 9d5ae813de0f5861d7bc97c9dc2885d91b7240fb..bdfe260793b638881a46a8d663876eeda4ed932f 100644 --- a/paddle/fluid/platform/cpu_info.cc +++ b/paddle/fluid/platform/cpu_info.cc @@ -35,20 +35,8 @@ limitations under the License. */ DEFINE_double(fraction_of_cpu_memory_to_use, 1, "Default use 100% of CPU memory for PaddlePaddle," "reserve the rest for page tables, etc"); -#if !defined(_WIN32) -DEFINE_uint64(initial_cpu_memory_in_mb, -#ifdef PADDLE_WITH_MKLDNN - /* Aligned with mozga-intel, MKLDNN need at least 5000 MB - * to obtain the best performance*/ - 5000ul, -#else - 500ul, -#endif - "Initial CPU memory for PaddlePaddle, in MD unit."); -#else DEFINE_uint64(initial_cpu_memory_in_mb, 500ul, "Initial CPU memory for PaddlePaddle, in MD unit."); -#endif // !defined(_WIN32) DEFINE_double( fraction_of_cuda_pinned_memory_to_use, 0.5, diff --git a/paddle/fluid/platform/cuda_helper_test.cu b/paddle/fluid/platform/cuda_helper_test.cu index 466bf90c63c1496883995819cdcb19f846e4a302..9e3025bf30b8849472e33a71228eb16814157b21 100644 --- a/paddle/fluid/platform/cuda_helper_test.cu +++ b/paddle/fluid/platform/cuda_helper_test.cu @@ -15,6 +15,9 @@ #include #include #include +#ifdef _WIN32 +#include +#endif #include #define PADDLE_CUDA_FP16 diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 09f3d3de54e4388f7090621a0fead96b3043d918..8f80a2d7822f1dc16cee2514a991b7341f5d1cfd 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -92,26 +92,24 @@ platform::TemporaryAllocator& DeviceTemporaryAllocator::Get( const platform::Place& place, const cudaStream_t& stream) { PADDLE_ENFORCE(platform::is_gpu_place(place)); auto place_stream = std::make_pair(place, stream); - { - std::unique_lock lock(mtx_); - if (!device_allocator_.count(place_stream)) { - device_allocator_[place_stream].reset(new TemporaryAllocator(place)); - device_allocator_[place_stream]->SetCallback([stream]() { - PADDLE_ENFORCE(cudaStreamSynchronize(stream)); - PADDLE_ENFORCE(cudaGetLastError()); - }); - } + std::unique_lock lock(mtx_); + auto it = device_allocator_.find(place_stream); + if (it == device_allocator_.end()) { + auto tmp_allocator = new TemporaryAllocator(place); + tmp_allocator->SetCallback([stream]() { + PADDLE_ENFORCE(cudaStreamSynchronize(stream)); + PADDLE_ENFORCE(cudaGetLastError()); + }); + device_allocator_[place_stream].reset(tmp_allocator); + return *tmp_allocator; + } else { + return *it->second; } - return *device_allocator_.at(place_stream); } template <> platform::TemporaryAllocator& DeviceTemporaryAllocator::Get( const platform::CUDADeviceContext& dev_ctx) { - auto place_stream = std::make_pair(dev_ctx.GetPlace(), dev_ctx.stream()); - if (device_allocator_.count(place_stream)) { - return *device_allocator_.at(place_stream); - } return Get(dev_ctx.GetPlace(), dev_ctx.stream()); } #endif @@ -325,7 +323,7 @@ Place CUDADeviceContext::GetPlace() const { return place_; } void CUDADeviceContext::Wait() const { auto& allocator = DeviceTemporaryAllocator::Instance().Get(*this); - allocator.Release([=]() { + allocator.Release([this]() { PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); PADDLE_ENFORCE(cudaGetLastError()); }); diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index c81d17380cf894631d06588c007c2e11ce5c7836..d376f90ad5754d70f3b9f30957eb2e2f584f8da9 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -61,7 +61,7 @@ namespace platform { * the allocations of temp_allocation_queue: * - when the Stream calls cudaStreamSynchronize; * - when the allocation size of opportunities exceeds a certain threshold - * (defined by FLAGS_limit_of_temporary_allocation). + * (defined by FLAGS_limit_of_tmp_allocation). * * */ class DeviceTemporaryAllocator { diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 06680539507886becb751d02cb02abbf702f1948..15413785bab3c0fd77244141e8f1840ca0cc1356 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -263,6 +263,10 @@ inline void throw_on_error(T e) { #define __THROW_ON_ERROR_ONE_ARG(COND, ARG) \ ::paddle::platform::throw_on_error(COND, ::paddle::string::Sprintf(ARG)); +#ifdef _WIN32 +#define __PADDLE_THROW_ON_ERROR(COND, ...) \ + __THROW_ON_ERROR_ONE_ARG(COND, __VA_ARGS__) +#else // _WIN32 #define __PADDLE_THROW_ON_ERROR(COND, ...) \ __PADDLE_THROW_ERROR_I( \ __VA_ARGS__, ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ @@ -274,6 +278,7 @@ inline void throw_on_error(T e) { ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ __THROW_ON_ERROR_ONE_ARG(COND, __VA_ARGS__)) +#endif // _WIN32 #define __PADDLE_UNARY_COMPARE(COND, ...) \ do { \ diff --git a/paddle/fluid/platform/float16.h b/paddle/fluid/platform/float16.h index 98afe843c0035ec14ad874508dc02b8d1d3d359c..c203f4e04a28452807a42bbdaf75e89977772a04 100644 --- a/paddle/fluid/platform/float16.h +++ b/paddle/fluid/platform/float16.h @@ -59,7 +59,7 @@ limitations under the License. */ #if !defined(_WIN32) #define PADDLE_ALIGN(x) __attribute__((aligned(x))) #else -#define PADDLE_ALIGN(x) /*do nothing*/ +#define PADDLE_ALIGN(x) __declspec(align(x)) #endif namespace paddle { diff --git a/paddle/fluid/platform/float16_test.cu b/paddle/fluid/platform/float16_test.cu index b1b51d804e02f233bcd16149005092dc80e9c79d..14cad927f06551ebbfbf1d166ae250c18591dd6b 100644 --- a/paddle/fluid/platform/float16_test.cu +++ b/paddle/fluid/platform/float16_test.cu @@ -271,11 +271,13 @@ TEST(float16, isinf) { float16 b = float16(INFINITY); // underflow to 0 float16 native_a(5e-40f); - // overflow to inf - float16 native_b(5e40f); EXPECT_EQ(std::isinf(a), true); EXPECT_EQ(std::isinf(b), true); +#ifndef _WIN32 + // overflow to inf + float16 native_b(5e40f); EXPECT_EQ(std::isinf(native_b), true); +#endif EXPECT_EQ(native_a, float16(0)); } diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index b3d20736a8e70d2f57ee5d6dc97cb490b5cfee44..faac6a12c66378d090b642312df4538aeeb3d8cd 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -210,13 +210,15 @@ class MKLDNNHandler { dst_memory.reset(new mkldnn::memory(*dst_pd, to_void_cast(output_data))); } - static void AppendKey( - std::string* key, const mkldnn::memory::dims& input_dims, - const mkldnn::memory::dims& weights_dims, const std::vector& strides, - const std::vector& paddings, const std::vector& dilations, - const int& groups, const mkldnn::memory::data_type& srcdt, - const mkldnn::memory::format& format, - const mkldnn::memory::data_type& dstdt, const std::string& suffix) { + static void AppendKey(std::string* key, + const mkldnn::memory::dims& input_dims, + const mkldnn::memory::dims& weights_dims, + const std::vector& strides, + const std::vector& paddings, + const std::vector& dilations, const int& groups, + const mkldnn::memory::data_type& srcdt, + const mkldnn::memory::format& format, const bool& relu, + const bool& residual, const std::string& suffix) { AppendKeyDims(key, input_dims); AppendKeyDims(key, weights_dims); AppendKeyVec(key, strides); @@ -225,7 +227,8 @@ class MKLDNNHandler { AppendKey(key, std::to_string(groups)); AppendKey(key, std::to_string(srcdt)); AppendKey(key, std::to_string(format)); - AppendKey(key, std::to_string(dstdt)); + AppendKey(key, std::to_string(relu)); + AppendKey(key, std::to_string(residual)); AppendKey(key, suffix); } @@ -664,15 +667,35 @@ static std::shared_ptr SetDstMemory( } template -static std::shared_ptr SetDstMemoryHandler( +static std::shared_ptr SetDstMemory( const framework::ExecutionContext& ctx, framework::Tensor* output, - const std::shared_ptr& handler) { + const framework::Tensor* residual_param, + const mkldnn::memory::desc& user_residual_md, + const std::shared_ptr& handler, + std::vector* pipeline) { + const T* residual_param_data = residual_param->data(); + PADDLE_ENFORCE(residual_param_data != nullptr, + "Provide data if you want MKLDNN conv+elementwise_add fusion"); + std::shared_ptr user_residual_memory_p = + handler->AcquireResidualDataMemory(user_residual_md, + to_void_cast(residual_param_data)); + T* output_data = output->mutable_data(ctx.GetPlace()); + std::shared_ptr dst_memory_p = + handler->AcquireDstMemoryFromResidualDataMemory( + user_residual_memory_p, to_void_cast(output_data), *pipeline); + return dst_memory_p; +} + +template +static void SetDstMemoryHandler( + const framework::ExecutionContext& ctx, framework::Tensor* output, + const std::shared_ptr& handler, + std::shared_ptr* dst_memory_p) { T* output_data = output->mutable_data( ctx.GetPlace(), ::paddle::memory::Allocator::kDefault, handler->GetDstMemorySize()); - std::shared_ptr dst_memory_p; - dst_memory_p->set_data_handle(to_void_cast(output_data)); - return dst_memory_p; + (*dst_memory_p)->set_data_handle(to_void_cast(output_data)); } + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/temporary_allocator.cc b/paddle/fluid/platform/temporary_allocator.cc index 0be017f75bcc8aff5073ebb2c5179cf7250be8b9..9cbdfe46e78dc84e58eae6929c887221d9562c69 100644 --- a/paddle/fluid/platform/temporary_allocator.cc +++ b/paddle/fluid/platform/temporary_allocator.cc @@ -15,8 +15,15 @@ #include "paddle/fluid/platform/temporary_allocator.h" #include "paddle/fluid/memory/allocation/allocator_facade.h" -DEFINE_double(limit_of_temporary_allocation, -1, - "The up limit of temporary_allocation size."); +DEFINE_int64(limit_of_tmp_allocation, -1, + "The up limit of temporary_allocation size."); +DEFINE_double(times_excess_than_required_tmp_allocation, 2, + "times_excess_than_required_tmp_allocation indicates the " + "max size the TemporaryAllocator can return. For example, " + "if the required memory size is N, and " + "times_excess_than_required_tmp_allocation is 2.0, " + "the TemporaryAllocator will return the available allocation " + "that the range of size is N ~ 2*N."); namespace paddle { namespace platform { @@ -29,24 +36,25 @@ TemporaryAllocation::TemporaryAllocation( underlying_allocation_(std::move(underlying_allocation)) {} TemporaryAllocator::TemporaryAllocator(platform::Place place) : place_(place) { - temp_mem_queue_.reset(new std::deque()); + temp_mem_map_.reset(new std::multimap()); } bool TemporaryAllocator::IsAllocThreadSafe() const { return true; } void TemporaryAllocator::Release(const std::function &callback) { - std::shared_ptr> t_allocations; + std::unique_ptr> t_allocations; { std::unique_lock lock(mtx_); callback(); - t_allocations = temp_mem_queue_; - temp_mem_queue_.reset(new std::deque()); + t_allocations.swap(temp_mem_map_); + temp_mem_map_.reset(new std::multimap()); wait_delete_mem_ = 0; } + for (auto tmp : *t_allocations) { - VLOG(10) << "Delete temporary allocation " << tmp->ptr() - << " size: " << tmp->size(); - delete tmp; + VLOG(10) << "Delete temporary allocation " << tmp.second->ptr() + << " size: " << tmp.second->size(); + delete tmp.second; } } @@ -54,28 +62,34 @@ void TemporaryAllocator::Free(alloc::Allocation *allocation) { auto *temp_allocation = dynamic_cast(allocation); PADDLE_ENFORCE_NOT_NULL(temp_allocation); if (platform::is_gpu_place(temp_allocation->place())) { + PADDLE_ENFORCE(platform::is_same_place(temp_allocation->place(), place_), + "The place should be the same."); size_t wait_delete_mem = 0; { std::unique_lock lock(mtx_); - temp_mem_queue_->emplace_back(temp_allocation); + temp_mem_map_->emplace(temp_allocation->size(), temp_allocation); wait_delete_mem_ += temp_allocation->size(); wait_delete_mem = wait_delete_mem_; VLOG(10) << "Move temporary allocation: " << temp_allocation->ptr() << " to delete queue: " << temp_allocation->size() << "; " - << "wait_delete_mem: " << wait_delete_mem_; + << "wait_delete_mem: " << wait_delete_mem; } - if (FLAGS_limit_of_temporary_allocation > 0 && - wait_delete_mem > FLAGS_limit_of_temporary_allocation) { + + if (FLAGS_limit_of_tmp_allocation > 0 && + wait_delete_mem > static_cast(FLAGS_limit_of_tmp_allocation)) { + PADDLE_ENFORCE(callback_ != nullptr, "The callback is non-initialized."); Release(callback_); } return; } + VLOG(10) << "Delete temporary allocation " << temp_allocation->ptr() + << " size: " << temp_allocation->size(); delete temp_allocation; } size_t TemporaryAllocator::TemporaryAllocationQueueSize() { std::unique_lock lock(mtx_); - return temp_mem_queue_ ? temp_mem_queue_->size() : 0; + return temp_mem_map_ ? temp_mem_map_->size() : 0; } void TemporaryAllocator::SetCallback(const std::function &callback) { @@ -84,6 +98,27 @@ void TemporaryAllocator::SetCallback(const std::function &callback) { alloc::Allocation *TemporaryAllocator::AllocateImpl( size_t size, alloc::Allocator::Attr attr) { + { + // Find available allocation in temp_mem_map. + std::unique_lock lock(mtx_); + if (temp_mem_map_->size()) { + auto it = temp_mem_map_->lower_bound(size); + // FIXME(zcd): Not sure the best value of excess fraction. + if (it != temp_mem_map_->end() && + it->first < + static_cast( + size * FLAGS_times_excess_than_required_tmp_allocation)) { + auto tmp_ptr = it->second; + temp_mem_map_->erase(it); + wait_delete_mem_ -= tmp_ptr->size(); + VLOG(10) << "Reuse temporary allocation: " << tmp_ptr->ptr() << ": " + << tmp_ptr->size(); + return tmp_ptr; + } + } + } + // If not find the the available allocation, get allocation from + // AllocatorFacadeInstance. auto raw_allocation = alloc::AllocatorFacade::Instance().Alloc(place_, size, attr); auto temp_mem = new TemporaryAllocation(std::move(raw_allocation)); diff --git a/paddle/fluid/platform/temporary_allocator.h b/paddle/fluid/platform/temporary_allocator.h index 812c4a333189d8c432be398ca0ebbce11f957561..d657a14223326aa1e2cb5b154a10a56ae742f95c 100644 --- a/paddle/fluid/platform/temporary_allocator.h +++ b/paddle/fluid/platform/temporary_allocator.h @@ -15,6 +15,7 @@ #pragma once #include // NOLINT #include +#include #include // NOLINT #include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/platform/lock_guard_ptr.h" @@ -39,7 +40,7 @@ class TemporaryAllocation : public memory::allocation::Allocation { * * There is one opportunity to free the allocations of temp_allocation_queue: * - when the allocation size of opportunities exceeds a certain threshold - * (defined by FLAGS_limit_of_temporary_allocation). + * (defined by FLAGS_limit_of_tmp_allocation). * * */ class TemporaryAllocator : public memory::allocation::Allocator { @@ -62,11 +63,10 @@ class TemporaryAllocator : public memory::allocation::Allocator { private: platform::Place place_; - // When the allocation is not held by any variable, it should be placed - // to temp_mem_queue immediately. - std::shared_ptr> temp_mem_queue_{nullptr}; - + // to temp_mem_map immediately. + std::unique_ptr> temp_mem_map_{ + nullptr}; std::mutex mtx_; size_t wait_delete_mem_{0}; std::function callback_; diff --git a/paddle/fluid/platform/temporary_allocator_test.cc b/paddle/fluid/platform/temporary_allocator_test.cc index 35d1d929819c41b213bc51ec24ac725021a76c88..3879cd540017ea22b0cf4eee794a172e56716b74 100644 --- a/paddle/fluid/platform/temporary_allocator_test.cc +++ b/paddle/fluid/platform/temporary_allocator_test.cc @@ -18,7 +18,8 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/tensor_util.h" -DECLARE_double(limit_of_temporary_allocation); +DECLARE_int64(limit_of_tmp_allocation); +DECLARE_double(times_excess_than_required_tmp_allocation); namespace paddle { namespace platform { @@ -35,7 +36,7 @@ class DummyOp : public framework::OperatorBase { const platform::Place& place) const override {} }; -TEST(temporary_allocator, temporary_allocator) { +TEST(temporary_allocator, test_base_function) { platform::CPUPlace cpu_place; TemporaryAllocator alloc(cpu_place); alloc.Allocate(100); @@ -59,10 +60,10 @@ TEST(temporary_allocator, temporary_allocator) { #endif } -TEST(temporary_allocator, add_callback) { +TEST(temporary_allocator, test_flags_function) { #ifdef PADDLE_WITH_CUDA - const double limit = FLAGS_limit_of_temporary_allocation; - FLAGS_limit_of_temporary_allocation = 10; + const int64_t limit = FLAGS_limit_of_tmp_allocation; + FLAGS_limit_of_tmp_allocation = 10; platform::CUDAPlace gpu_place(0); TemporaryAllocator gpu_alloc(gpu_place); @@ -78,7 +79,52 @@ TEST(temporary_allocator, add_callback) { }); { gpu_alloc.Allocate(100); } PADDLE_ENFORCE(deleted); - FLAGS_limit_of_temporary_allocation = limit; + FLAGS_limit_of_tmp_allocation = limit; +#endif +} + +TEST(temporary_allocator, test_reuse_tmp_allocation) { +#ifdef PADDLE_WITH_CUDA + platform::CUDAPlace gpu_place(0); + TemporaryAllocator gpu_alloc(gpu_place); + gpu_alloc.SetCallback([]() {}); + + void* tmp_allocation_ptr1 = nullptr; + { + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 0); + auto tmp_allocation1 = gpu_alloc.Allocate(100); + tmp_allocation_ptr1 = tmp_allocation1->ptr(); + } + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 1); + auto tmp_allocation2 = gpu_alloc.Allocate(100); + void* tmp_allocation_ptr2 = tmp_allocation2->ptr(); + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 0); + PADDLE_ENFORCE_EQ(tmp_allocation_ptr1, tmp_allocation_ptr2); + + auto tmp_allocation3 = gpu_alloc.Allocate(100); + void* tmp_allocation_ptr3 = tmp_allocation2->ptr(); + PADDLE_ENFORCE_EQ(tmp_allocation_ptr1, tmp_allocation_ptr3); +#endif +} + +TEST(temporary_allocator, test_times_excess_than_required_tmp_allocation) { +#ifdef PADDLE_WITH_CUDA + platform::CUDAPlace gpu_place(0); + TemporaryAllocator gpu_alloc(gpu_place); + gpu_alloc.SetCallback([]() {}); + double excess_fraction = FLAGS_times_excess_than_required_tmp_allocation; + void* tmp_allocation_ptr1 = nullptr; + { + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 0); + auto tmp_allocation1 = + gpu_alloc.Allocate(static_cast(100 * excess_fraction - 1)); + tmp_allocation_ptr1 = tmp_allocation1->ptr(); + } + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 1); + auto tmp_allocation2 = gpu_alloc.Allocate(100); + void* tmp_allocation_ptr2 = tmp_allocation2->ptr(); + PADDLE_ENFORCE_EQ(gpu_alloc.TemporaryAllocationQueueSize(), 0); + PADDLE_ENFORCE_EQ(tmp_allocation_ptr1, tmp_allocation_ptr2); #endif } diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 72b0f216d3aafbb95931590935b0bf967a8d5be8..9a91ea38caef50b9a7ad970a3d08ca28c497e419 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -1,9 +1,10 @@ - -set(PYBIND_DEPS pybind python proto_desc memory executor async_executor prune feed_fetch_method pass_builder parallel_executor profiler layer scope_pool) +set(PYBIND_DEPS pybind python proto_desc memory executor async_executor prune + feed_fetch_method pass_builder parallel_executor profiler layer scope_pool + tracer) if(WITH_PYTHON) list(APPEND PYBIND_DEPS py_func_op) endif() -set(PYBIND_SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc async_executor_py.cc imperative.cc) +set(PYBIND_SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc async_executor_py.cc imperative.cc ir.cc) if(WITH_PYTHON) if(WITH_AMD_GPU) @@ -21,9 +22,8 @@ if(WITH_PYTHON) endif(NOT APPLE AND NOT ANDROID AND NOT WIN32) endif(WITH_AMD_GPU) - if(WIN32) - target_link_libraries(paddle_pybind shlwapi) - endif(WIN32) + get_property (os_dependency_modules GLOBAL PROPERTY OS_DEPENDENCY_MODULES) + target_link_libraries(paddle_pybind ${os_dependency_modules}) cc_test(tensor_py_test SRCS tensor_py_test.cc DEPS python) endif(WITH_PYTHON) diff --git a/paddle/fluid/pybind/const_value.cc b/paddle/fluid/pybind/const_value.cc index 06d8b65fb1480d9f621ca937c1d66ab7e910f010..f8ded9f94ecaf3df1e14aead60ae12abcf8c34a9 100644 --- a/paddle/fluid/pybind/const_value.cc +++ b/paddle/fluid/pybind/const_value.cc @@ -49,6 +49,9 @@ void BindConstValue(pybind11::module* m) { op_proto_and_checker_maker.def( "kOpNameScopeAttrName", framework::OpProtoAndCheckerMaker::OpNamescopeAttrName); + op_proto_and_checker_maker.def( + "kOpCreationCallstackAttrName", + framework::OpProtoAndCheckerMaker::OpCreationCallstackAttrName); } } // namespace pybind diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 5c1c7478f4dbe4c78f5ac2c19f4eae09abbf1c8b..dbc7843caa0c0a39a32cda6050fa99a3ab4c3e22 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -26,7 +26,9 @@ void BindTracer(pybind11::module *m) { [](imperative::Tracer &self, framework::BlockDesc *root_block) { new (&self) imperative::Tracer(root_block); }) - .def("trace", &imperative::Tracer::Trace); + .def("trace", &imperative::Tracer::Trace) + .def("py_trace", &imperative::Tracer::PyTrace, + pybind11::return_value_policy::take_ownership); } } // namespace pybind diff --git a/paddle/fluid/pybind/imperative.h b/paddle/fluid/pybind/imperative.h index 7a9d3a01ea81f11ac85000c3d0153f20e108789a..f947b743f99d5d4994b1a87f89fd6815357d8125 100644 --- a/paddle/fluid/pybind/imperative.h +++ b/paddle/fluid/pybind/imperative.h @@ -22,7 +22,7 @@ limitations under the License. */ namespace paddle { namespace pybind { -class PyLayer : public imperative::Layer { +class Layer : public imperative::Layer { public: using imperative::Layer::Layer; // Inherit constructors @@ -31,10 +31,6 @@ class PyLayer : public imperative::Layer { PYBIND11_OVERLOAD(std::vector, Layer, Forward, inputs); // NOLINT } - - void Backward() override { - PYBIND11_OVERLOAD(void, Layer, Backward, ); // NOLINT - } }; class PyOpBase : public imperative::OpBase { diff --git a/paddle/fluid/pybind/ir.cc b/paddle/fluid/pybind/ir.cc new file mode 100644 index 0000000000000000000000000000000000000000..d32fe58f8695a5c14f276ef038416f5c47f3400f --- /dev/null +++ b/paddle/fluid/pybind/ir.cc @@ -0,0 +1,103 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/pybind/ir.h" +#include +#include +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/node.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/var_desc.h" +#include "pybind11/stl.h" + +namespace py = pybind11; +using paddle::framework::ir::Graph; +using paddle::framework::ir::Node; +using paddle::framework::OpDesc; +using paddle::framework::ProgramDesc; +using paddle::framework::VarDesc; +using pybind11::return_value_policy; + +namespace paddle { +namespace pybind { +void BindGraph(py::module *m) { + py::class_>( + *m, "Graph", + "The graph is a Directed Acyclic Single Static Assignment Graph, see " + "`paddle::ir::Graph` for details.") + .def(py::init()) + .def("has", &Graph::Has) + .def("get_int", &Graph::Get) + .def("get_float", &Graph::Get) + .def("get_double", &Graph::Get) + .def("get_string", &Graph::Get) + .def("set", [](Graph &self, const std::string &attr_name, + int attr) { return self.Set(attr_name, new int(attr)); }) + .def("set", + [](Graph &self, const std::string &attr_name, + const std::string &attr) { + return self.Set(attr_name, new std::string(attr)); + }) + .def("set", + [](Graph &self, const std::string &attr_name, float attr) { + return self.Set(attr_name, new float(attr)); + }) + .def("set", + [](Graph &self, const std::string &attr_name, double attr) { + return self.Set(attr_name, new double(attr)); + }) + .def("erase", &Graph::Erase) + .def("nodes", &Graph::Nodes, return_value_policy::reference) + .def("create_var_node", + [](Graph &self, VarDesc &var_desc) { + return self.CreateVarNode(&var_desc); + }, + return_value_policy::reference) + .def("create_op_node", + [](Graph &self, OpDesc &op_desc) { + return self.CreateOpNode(&op_desc); + }, + return_value_policy::reference) + .def("create_control_dep_var", &Graph::CreateControlDepVar, + return_value_policy::reference) + .def("create_empty_node", &Graph::CreateEmptyNode, + return_value_policy::reference) + .def("release_nodes", &Graph::ReleaseNodes) + .def("remove_node", + [](Graph &self, Node &node) { return self.RemoveNode(&node); }) + .def("retrieve_node", &Graph::RetrieveNode, + return_value_policy::reference) + .def("resolve_hazard", &Graph::ResolveHazard); +} + +void BindNode(py::module *m) { + py::class_ node(*m, "Node"); + node.def("name", &Node::Name) + .def("node_type", &Node::NodeType) + .def("var", &Node::Var) + .def("op", &Node::Op) + .def("id", &Node::id) + .def("is_op", &Node::IsOp) + .def("is_var", &Node::IsVar) + .def("is_ctrl_var", &Node::IsCtrlVar) + .def_readwrite("inputs", &Node::inputs) + .def_readwrite("outputs", &Node::outputs); + + py::enum_(node, "Type") + .value("Operation", Node::Type::kOperation) + .value("Variable", Node::Type::kVariable) + .export_values(); +} +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/ir.h b/paddle/fluid/pybind/ir.h new file mode 100644 index 0000000000000000000000000000000000000000..5bee70eba695b6d71c4df03e7ffe5d8d11384172 --- /dev/null +++ b/paddle/fluid/pybind/ir.h @@ -0,0 +1,25 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/framework/ir/graph.h" + +namespace paddle { +namespace pybind { +void BindGraph(pybind11::module *m); +void BindNode(pybind11::module *m); +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a38643064ace8c1ae1d651dcd2ba329df692da30..7e342a3ba0c4aab8f3643f5bb6a44a9d61b1595c 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -49,6 +49,7 @@ limitations under the License. */ #include "paddle/fluid/pybind/const_value.h" #include "paddle/fluid/pybind/exception.h" #include "paddle/fluid/pybind/imperative.h" +#include "paddle/fluid/pybind/ir.h" #include "paddle/fluid/pybind/protobuf.h" #include "paddle/fluid/pybind/pybind.h" // NOLINT #include "paddle/fluid/pybind/recordio.h" @@ -125,26 +126,18 @@ PYBIND11_MODULE(core, m) { m.add_object("_cleanup", py::capsule([]() { ScopePool::Instance().Clear(); })); - py::class_>( - m, "VarBase", R"DOC()DOC") + py::class_(m, "VarBase", R"DOC()DOC") // .def(py::init<>()) .def(py::init(), py::arg("stop_gradient") = false) .def("_run_backward", [](imperative::VarBase &self) { self.RunBackward(); }) .def("_grad_name", &imperative::VarBase::GradName) - .def("_grad", &imperative::VarBase::Grad) - .def_property("grad_value", - [](const imperative::VarBase &self) { return self.grads_; }, - [](imperative::VarBase &self, framework::Variable *grad) { - self.grads_ = grad; - }, - py::return_value_policy::reference) - .def_property("value", - [](const imperative::VarBase &self) { return self.var_; }, - [](imperative::VarBase &self, framework::Variable *var) { - self.var_ = var; - }, - py::return_value_policy::reference) + .def("_grad_value", &imperative::VarBase::GradValue) + .def("_grad_ivar", + [](const imperative::VarBase &self) { return self.grads_; }, + py::return_value_policy::reference) + .def("value", [](const imperative::VarBase &self) { return self.var_; }, + py::return_value_policy::reference) .def_property( "desc", [](const imperative::VarBase &self) { return self.var_desc_; }, @@ -168,16 +161,44 @@ PYBIND11_MODULE(core, m) { self.op_desc_ = op_desc; } }, + py::return_value_policy::reference) + .def_property( + "forward_id", + [](const imperative::OpBase &self) { return self.forward_id_; }, + [](imperative::OpBase &self, int forward_id) { + self.forward_id_ = forward_id; + }, + py::return_value_policy::reference) + .def_property( + "backward_id", + [](const imperative::OpBase &self) { return self.backward_id_; }, + [](imperative::OpBase &self, int backward_id) { + self.backward_id_ = backward_id; + }, py::return_value_policy::reference); - py::class_ layer(m, "Layer"); + py::class_ layer(m, "Layer"); layer.def(py::init<>()) - .def("forward", - [](imperative::Layer &self, - const std::vector &inputs) { - return self.Forward(inputs); - }) - .def("backward", &imperative::Layer::Backward); + .def("forward", [](imperative::Layer &self, + const std::vector &inputs) { + return self.Forward(inputs); + }); + + py::class_(m, "PyLayer") + .def(py::init<>()) + .def_static( + "apply", + [](int func_id, const std::vector &inputs) + -> std::vector { + return imperative::PyLayer::Apply(func_id, inputs); + }, + py::return_value_policy::take_ownership) + .def_static("register_func", + [](int func_id, const py::object &callable) { + imperative::PyLayer::RegisterFunc(func_id, callable); + }) + .def_static("num_funcs", &imperative::PyLayer::NumFuncs); + BindTracer(&m); py::class_(m, "Tensor", py::buffer_protocol()) @@ -769,7 +790,12 @@ All parameter, weight, gradient are variables in Paddle. }) .def("set_int", [](ir::Pass &self, const std::string &name, int val) { self.Set(name, new int(val)); }) - .def("type", &ir::Pass::Type); + .def("type", &ir::Pass::Type) + .def("apply", [](ir::Pass &self, std::shared_ptr graph) { + std::unique_ptr origin_graph(graph.get()); + auto optim_graph = self.Apply(std::move(origin_graph)); + graph.reset(optim_graph.release()); + }); py::class_> pb( m, "PassBuilder"); @@ -1036,6 +1062,9 @@ All parameter, weight, gradient are variables in Paddle. BindRecordIOWriter(&m); BindAsyncExecutor(&m); + + BindGraph(&m); + BindNode(&m); } } // namespace pybind } // namespace paddle diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 50b7a631297b150ac9d25c036d21b0bdf2854b79..0fb29d4b3d4c7fd579823075bfeb89e3977409f9 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -490,7 +490,8 @@ function assert_api_spec_approvals() { BRANCH="develop" fi - API_FILES=("paddle/fluid/API.spec" + API_FILES=("cmake/external" + "paddle/fluid/API.spec" "paddle/fluid/framework/operator.h" "paddle/fluid/framework/tensor.h" "paddle/fluid/framework/lod_tensor.h" diff --git a/python/paddle/dataset/mnist.py b/python/paddle/dataset/mnist.py index 38addd0cfd9bd0afde7eefc57f2111b717b7e636..847ca187206f8932e5454ddad881a94910efb55f 100644 --- a/python/paddle/dataset/mnist.py +++ b/python/paddle/dataset/mnist.py @@ -21,10 +21,9 @@ parse training set and test set into paddle reader creators. from __future__ import print_function import paddle.dataset.common -import subprocess +import gzip import numpy -import platform -import tempfile +import struct from six.moves import range __all__ = ['train', 'test', 'convert'] @@ -41,51 +40,47 @@ TRAIN_LABEL_MD5 = 'd53e105ee54ea40749a09fcbcd1e9432' def reader_creator(image_filename, label_filename, buffer_size): def reader(): - if platform.system() == 'Darwin': - zcat_cmd = 'gzcat' - elif platform.system() == 'Linux': - zcat_cmd = 'zcat' - else: - raise NotImplementedError() - - # According to http://stackoverflow.com/a/38061619/724872, we - # cannot use standard package gzip here. - tmp_image_file = tempfile.TemporaryFile(prefix='paddle_dataset') - m = subprocess.Popen( - [zcat_cmd, image_filename], stdout=tmp_image_file).communicate() - tmp_image_file.seek(16) # skip some magic bytes - - # Python3 will not take stdout as file - tmp_label_file = tempfile.TemporaryFile(prefix='paddle_dataset') - l = subprocess.Popen( - [zcat_cmd, label_filename], stdout=tmp_label_file).communicate() - tmp_label_file.seek(8) # skip some magic bytes - - try: # reader could be break. - while True: - labels = numpy.fromfile( - tmp_label_file, 'ubyte', count=buffer_size).astype("int") - - if labels.size != buffer_size: - break # numpy.fromfile returns empty slice after EOF. - - images = numpy.fromfile( - tmp_image_file, 'ubyte', count=buffer_size * 28 * - 28).reshape((buffer_size, 28 * 28)).astype('float32') - - images = images / 255.0 * 2.0 - 1.0 - - for i in range(buffer_size): - yield images[i, :], int(labels[i]) - finally: - try: - m.terminate() - except: - pass - try: - l.terminate() - except: - pass + with gzip.GzipFile(image_filename, 'rb') as image_file: + img_buf = image_file.read() + with gzip.GzipFile(label_filename, 'rb') as label_file: + lab_buf = label_file.read() + + step_label = 0 + + offset_img = 0 + # read from Big-endian + # get file info from magic byte + # image file : 16B + magic_byte_img = '>IIII' + magic_img, image_num, rows, cols = struct.unpack_from( + magic_byte_img, img_buf, offset_img) + offset_img += struct.calcsize(magic_byte_img) + + offset_lab = 0 + # label file : 8B + magic_byte_lab = '>II' + magic_lab, label_num = struct.unpack_from(magic_byte_lab, + lab_buf, offset_lab) + offset_lab += struct.calcsize(magic_byte_lab) + + while True: + if step_label >= label_num: + break + fmt_label = '>' + str(buffer_size) + 'B' + labels = struct.unpack_from(fmt_label, lab_buf, offset_lab) + offset_lab += struct.calcsize(fmt_label) + step_label += buffer_size + + fmt_images = '>' + str(buffer_size * rows * cols) + 'B' + images_temp = struct.unpack_from(fmt_images, img_buf, + offset_img) + images = numpy.reshape(images_temp, ( + buffer_size, rows * cols)).astype('float32') + offset_img += struct.calcsize(fmt_images) + + images = images / 255.0 * 2.0 - 1.0 + for i in range(buffer_size): + yield images[i, :], int(labels[i]) return reader diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 2c17716500ababfab3216a5ec47fecca30065ff1..686550a3c8d7d55f06b03132124621c5d0db342f 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -155,7 +155,8 @@ def __bootstrap__(): 'fraction_of_gpu_memory_to_use', 'cudnn_deterministic', 'enable_cublas_tensor_op_math', 'conv_workspace_size_limit', 'cudnn_exhaustive_search', 'memory_optimize_debug', 'selected_gpus', - 'sync_nccl_allreduce' + 'sync_nccl_allreduce', 'limit_of_tmp_allocation', + 'times_excess_than_required_tmp_allocation' ] core.init_gflags([sys.argv[0]] + diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index c280ff21eec8d1a90b8be9102d7eae119f38f2b1..7b70d19de5ca309441bdc1404e6e601af3c5b892 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -71,10 +71,25 @@ class DataToLoDTensorConverter(object): for each_data in data: self._feed_impl_(each_data, lod[1:], lod_level - 1) + def _check_shape(self, shape): + for s1, s2 in zip(self.shape, shape): + if s1 != s2 and s1 >= 0 and s2 >= 0: + raise ValueError( + "Shape not match. What is defined in data layer is {}, but receive {}". + format(self.shape, shape)) + def done(self): arr = numpy.array(self.data, dtype=self.dtype) - if self.shape and len(arr.shape) != len(self.shape): - arr = arr.reshape(self.shape) + if self.shape: + if len(arr.shape) != len(self.shape): + try: + arr = arr.reshape(self.shape) + except ValueError: + raise ValueError( + "Reshape error. What is defined in data layer is {}, but receive {}" + .format(self.shape, arr.shape)) + else: + self._check_shape(arr.shape) t = core.LoDTensor() t.set(arr, self.place) if self.lod_level > 0: @@ -152,17 +167,8 @@ class DataFeeder(object): raise TypeError("Feed list should contain a list of variable") self.feed_dtypes.append(each_var.dtype) self.feed_names.append(each_var.name) - shape = each_var.shape - batch_size_dim = -1 - for i, s in enumerate(shape): - if s < 0: - batch_size_dim = i - break - if batch_size_dim == -1: - raise ValueError("Variable {0} must has a batch size dimension", - each_var.name) self.feed_lod_level.append(each_var.lod_level) - self.feed_shapes.append(shape) + self.feed_shapes.append(each_var.shape) self.place = place diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 1a940b30c1564c7622f646b4697375179c607f91..0d06d0f2c9529f7f5dea58bc9710a24e559d8e00 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -382,9 +382,11 @@ class Executor(object): """ Close this executor. - You can no long use this executor after calling this method. + You can no longer use this executor after calling this method. For the distributed training, this method would free the resource on PServers related to the current Trainer. + TODO(typhoonzero): Define "no longer use" meaning? Can user create + a new Executor for the same program and run? TODO(panyx0718): Why ParallelExecutor doesn't have close? Example: @@ -397,7 +399,7 @@ class Executor(object): self.executor.close() self._closed = True - def _run_parallel(self, scope, feed, fetch_list, fetch_var_name, + def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name, return_numpy): if isinstance(feed, dict): feed_tensor_dict = dict() @@ -413,7 +415,7 @@ class Executor(object): self.executor.feed_and_split_tensor_into_local_scopes( feed_tensor_dict) elif isinstance(feed, list) or isinstance(feed, tuple): - if len(feed) != len(self._places): + if len(feed) != len(program._places): raise ValueError( "Feed a list of tensor, the list should be the same size as places" ) @@ -428,7 +430,7 @@ class Executor(object): tensor = each[feed_name] if not isinstance(tensor, core.LoDTensor): tmp = core.LoDTensor() - tmp.set(tensor, self._places[i]) + tmp.set(tensor, program._places[i]) tensor = tmp res_dict[feed_name] = tensor res.append(res_dict) @@ -462,7 +464,7 @@ class Executor(object): Args: program(Program|CompiledProgram): the program that need to run, - if not provided, then default_main_program will be used. + if not provided, then default_main_program (not compiled) will be used. feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData} fetch_list(list): a list of variable or variable names that user want to get, run will return them according to this list. feed_var_name(str): the name for the input variable of feed Operator. @@ -525,6 +527,7 @@ class Executor(object): self.executor = program._executor if program._is_data_parallel: return self._run_parallel( + program, scope=scope, feed=feed, fetch_list=fetch_list, diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 70767c962f551bdf3afea2237000a4cf93feb120..e9a9265931f432ebf7b2351cd6c74ece8b0f863a 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -19,6 +19,7 @@ from collections import defaultdict import contextlib import os import re +import traceback import six import numpy as np @@ -372,27 +373,21 @@ class Variable(object): self.stop_gradient = stop_gradient self.is_data = is_data if _in_imperative_mode(): - self._ivar = core.VarBase() + self._ivar = kwargs.get("ivar", None) + if not self._ivar: + self._ivar = core.VarBase() self._ivar.desc = self.desc self._ivar.stop_gradient = stop_gradient def _numpy(self): - tensor = self._ivar.value.get_tensor() + tensor = self._ivar.value().get_tensor() return np.array(tensor) def _backward(self): self._ivar._run_backward() def _gradient(self): - return np.array(self._ivar._grad()) - - @property - def _value(self): - return self._ivar.value - - @_value.setter - def _value(self, v): - self._ivar.value = v + return np.array(self._ivar._grad_value()) def __str__(self): return self.to_string(True) @@ -631,6 +626,11 @@ class Operator(object): if type is None: raise ValueError( "`type` to initilized an Operator can not be None.") + else: + callstack_var_name = op_maker.kOpCreationCallstackAttrName() + op_attrs[callstack_var_name] = list( + reversed(traceback.format_stack()))[1:] + self.desc.set_type(type) proto = OpProtoHolder.instance().get_op_proto(type) diff --git a/python/paddle/fluid/imperative/base.py b/python/paddle/fluid/imperative/base.py index c04dcc7e39be9946b561fd725647e87d7712d8b9..5d3ebb25a935cea6ec376e6bc044281dcba37337 100644 --- a/python/paddle/fluid/imperative/base.py +++ b/python/paddle/fluid/imperative/base.py @@ -45,7 +45,7 @@ def to_variable(value, block=None): name=None, shape=value.shape, dtype=value.dtype) - var = py_var._ivar.value + var = py_var._ivar.value() tensor = var.get_tensor() tensor.set(value, core.CPUPlace()) return py_var diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index d78d61eb3f02c27ec44806ae52e134068c2cb9be..6d3987c9d5437463960910834a2202be9fb32cfe 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -20,10 +20,12 @@ from paddle.fluid import core from paddle.fluid import framework from paddle.fluid.imperative import base -__all__ = ['PyLayer'] +__all__ = ['Layer', 'PyLayer'] -class PyLayer(core.Layer): +class Layer(core.Layer): + """Layers composed of operators.""" + def __init__(self, dtype=core.VarDesc.VarType.FP32, name=None): self._once_built = False self._dtype = dtype @@ -37,8 +39,56 @@ class PyLayer(core.Layer): self._once_built = True outputs = self.forward(*inputs) - return outputs def forward(self, *inputs): raise NotImplementedError + + def backward(self, *inputs): + raise ValueError("Layer shouldn't implement backward") + + +class PyLayer(core.PyLayer): + """Layers composed of user-defined python codes.""" + + def __init__(self): + super(PyLayer, self).__init__() + + @staticmethod + def forward(*inputs): + raise NotImplementedError + + @staticmethod + def backward(*douts): + raise NotImplementedError + + @classmethod + def __call__(cls, *inputs): + tracer = framework._imperative_tracer() + block = framework.default_main_program().current_block() + ivar_inputs = [x._ivar for x in inputs] + + if not hasattr(cls, 'forward_id'): + cls.forward_id = core.PyLayer.num_funcs() + 1 + PyLayer.register_func(cls.forward_id, cls.forward) + cls.backward_id = core.PyLayer.num_funcs() + 1 + PyLayer.register_func(cls.backward_id, cls.backward) + + iop = core.OpBase() + iop.forward_id = cls.forward_id + iop.backward_id = cls.backward_id + block.ops.append(iop) + ivars = tracer.py_trace(iop, ivar_inputs, False) + # ivars = core.PyLayer.apply(cls.forward, inputs) + ret = [] + for ivar in ivars: + tensor = ivar.value().get_tensor() + py_var = framework.Variable( + block, + type=core.VarDesc.VarType.LOD_TENSOR, + name=None, + shape=tensor.shape(), + dtype=tensor._dtype(), + ivar=ivar) + ret.append(py_var) + return ret diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 4f30417e99d21bcb66dacaab0257816c4d77f932..8754e5d4d0c8c829303f1fe9cd39ead36619ac3b 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -30,7 +30,7 @@ __all__ = [ ] -class Conv2D(layers.PyLayer): +class Conv2D(layers.Layer): def __init__(self, num_channels, num_filters, @@ -143,7 +143,7 @@ class Conv2D(layers.PyLayer): return self._helper.append_activation(pre_act) -class Pool2D(layers.PyLayer): +class Pool2D(layers.Layer): def __init__(self, pool_size=-1, pool_type="max", @@ -205,7 +205,7 @@ class Pool2D(layers.PyLayer): return pool_out -class FC(layers.PyLayer): +class FC(layers.Layer): def __init__(self, size, param_attr=None, diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 615a35ba916f813399dc21a87646884b3d01081e..56971cff43b919f946091cebf909731a23d60736 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -58,6 +58,7 @@ __all__ = [ 'adaptive_pool2d', 'adaptive_pool3d', 'batch_norm', + 'data_norm', 'beam_search_decode', 'conv2d_transpose', 'conv3d_transpose', @@ -180,6 +181,7 @@ __all__ = [ 'lstm', 'py_func', 'psroi_pool', + 'teacher_student_sigmoid_loss', 'huber_loss', ] @@ -2896,6 +2898,133 @@ def batch_norm(input, return helper.append_activation(batch_norm_out) +def data_norm(input, + act=None, + epsilon=1e-05, + param_attr=None, + data_layout='NCHW', + in_place=False, + use_mkldnn=False, + name=None, + moving_mean_name=None, + moving_variance_name=None, + do_model_average_for_mean_and_var=False): + """ + **Data Normalization Layer** + + Can be used as a normalizer function for conv2d and fully_connected operations. + The required data format for this layer is one of the following: + + 1. NHWC `[batch, in_height, in_width, in_channels]` + + 2. NCHW `[batch, in_channels, in_height, in_width]` + + :math:`input` is the input features over a mini-batch. + + .. math:: + + \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\ + \ mini-batch\ mean \\\\ + \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\ + \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\ + \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ + \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\ + y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift + + Args: + input(variable): The input variable which is a LoDTensor. + act(string, Default None): Activation type, linear|relu|prelu|... + epsilon(float, Default 1e-05): + param_attr(ParamAttr): The parameter attribute for Parameter `scale`. + data_layout(string, default NCHW): NCHW|NHWC + in_place(bool, Default False): Make the input and output of batch norm reuse memory. + use_mkldnn(bool, Default false): ${use_mkldnn_comment} + name(string, Default None): A name for this layer(optional). If set None, the layer + will be named automatically. + moving_mean_name(string, Default None): The name of moving_mean which store the global Mean. + moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance. + do_model_average_for_mean_and_var(bool, Default False): Do model average for mean and variance or not. + + Returns: + Variable: A tensor variable which is the result after applying data normalization on the input. + + Examples: + + .. code-block:: python + + data = fluid.layers.data(input=x, size=200, param_attr='fc1.w') + hidden2 = fluid.layers.data_norm(input=hidden1) + """ + helper = LayerHelper('data_norm', **locals()) + dtype = helper.input_dtype() + + input_shape = input.shape + if data_layout == 'NCHW': + channel_num = input_shape[1] + else: + if data_layout == 'NHWC': + channel_num = input_shape[-1] + else: + raise ValueError("unsupported data layout:" + data_layout) + + param_shape = [channel_num] + + batch_size_default = 1e4 + batch_sum_default = 0.0 + batch_square_sum_default = 1e4 + + if param_attr and isinstance(param_attr, dict): + batch_size_default = param_attr.get("batch_size", 1e4) + batch_sum_default = param_attr.get("batch_sum", 0.0) + batch_square_sum_default = param_attr.get("batch_square", 1e4) + + # create parameter + batch_size = helper.create_parameter( + attr=ParamAttr( + name=name + '.batch_size', + initializer=Constant(value=float(batch_size_default)), + trainable=True), + shape=param_shape, + dtype=input.dtype) + + batch_sum = helper.create_parameter( + attr=ParamAttr( + name=name + '.batch_sum', + initializer=Constant(value=float(batch_sum_default)), + trainable=True), + shape=param_shape, + dtype=input.dtype) + + batch_square_sum = helper.create_parameter( + attr=ParamAttr( + name=name + '.batch_square_sum', + initializer=Constant(value=float(batch_square_sum_default)), + trainable=True), + shape=param_shape, + dtype=input.dtype) + + means = helper.create_variable(dtype=dtype, stop_gradient=True) + scales = helper.create_variable(dtype=dtype, stop_gradient=True) + + data_norm_out = input if in_place else helper.create_variable(dtype=dtype) + + helper.append_op( + type="data_norm", + inputs={ + "X": input, + "BatchSize": batch_size, + "BatchSum": batch_sum, + "BatchSquareSum": batch_square_sum + }, + outputs={"Y": data_norm_out, + "Means": means, + "Scales": scales}, + attrs={"epsilon": epsilon, + "use_mkldnn": use_mkldnn}) + + return helper.append_activation(data_norm_out) + + @templatedoc() def layer_norm(input, scale=True, @@ -3064,9 +3193,9 @@ def group_norm(input, inputs['Bias'] = bias # create output - mean_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - variance_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - group_norm_out = helper.create_tmp_variable(dtype) + mean_out = helper.create_variable(dtype=dtype, stop_gradient=True) + variance_out = helper.create_variable(dtype=dtype, stop_gradient=True) + group_norm_out = helper.create_variable(dtype) helper.append_op( type="group_norm", @@ -8350,8 +8479,7 @@ def shape(input): """ helper = LayerHelper('shape', **locals()) - out = helper.create_variable_for_type_inference( - dtype=helper.input_dtype('input')) + out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='shape', inputs={'Input': input}, outputs={'Out': out}) @@ -9264,6 +9392,47 @@ def log_loss(input, label, epsilon=1e-4, name=None): return loss +def teacher_student_sigmoid_loss(input, + label, + soft_max_up_bound=15.0, + soft_max_lower_bound=-15.0): + """ + **Teacher Student Log Loss Layer** + + This layer accepts input predictions and target label and returns the + teacher_student loss. + + .. math:: + loss = max(x, 0) - x * z + log(1 + exp(-abs(x))) + max(x, 0) - x * z' + log(1 + exp(-abs(x))) + + Args: + input (Variable|list): a 2-D tensor with shape [N x 1], where N is the + batch size. This input is a probability computed + by the previous operator. + label (Variable|list): the ground truth which is a 2-D tensor with + shape [N x 1], where N is the batch size. + soft_max_up_bound (float): if input > soft_max_up_bound, will be bound + soft_max_lower_bound (float): if input < soft_max_lower_bound, will be bound + + Returns: + Variable: A 2-D tensor with shape [N x 1], the teacher_student_sigmoid_loss. + + Examples: + .. code-block:: python + cost = fluid.layers.teacher_student_sigmoid_loss(input=similarity, label=label) + """ + helper = LayerHelper('teacher_student_sigmoid_loss', **locals()) + out = helper.create_variable(dtype=input.dtype) + helper.append_op( + type='teacher_student_sigmoid_loss', + inputs={'X': [input], + 'Label': [label]}, + outputs={'Y': [out]}, + attrs={"soft_max_lower_bound": float(soft_max_lower_bound), \ + "soft_max_up_bound": float(soft_max_up_bound)}) + return out + + def add_position_encoding(input, alpha, beta, name=None): """ **Add Position Encoding Layer** diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 779cb5f961639aa919827a1c1726e974fdf1cbe1..f01a0eda9a711abb3265fe5bb86ecb702a6ac6aa 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -195,22 +195,18 @@ class Optimizer(object): format(name, param.name)) return self._accumulators[name][param.name] - def _create_optimization_pass(self, - parameters_and_grads, - loss, - startup_program=None): + def _create_optimization_pass(self, parameters_and_grads): """Add optimization operators to update gradients to variables. Args: - loss(Variable): the target that this optimization is for. parameters_and_grads(list(tuple(Variable, Variable))): - a list of (variable, gradient) pair to update. + a list of (variable, gradient) pair to update. Returns: return_op_list: a list of operators that will complete one step of - optimization. This will include parameter update ops, global step - update ops and any other custom ops required by subclasses to manage - their internal state. + optimization. This will include parameter update ops, global step + update ops and any other custom ops required by subclasses to manage + their internal state. """ # This is a default implementation of create_optimization_pass that # can be shared by most optimizers. This implementation assumes that @@ -219,37 +215,33 @@ class Optimizer(object): # _create_accumulators method if it needs to create accumulators # for parameters and extend _finish_update method to add custom ops. - # Create any accumulators - program = loss.block.program - self._dtype = loss.dtype - with program_guard(program, startup_program): - global_block = framework.default_main_program().global_block() - start = len(global_block.ops) - self.helper = LayerHelper(self.__class__.__name__) - self._create_accumulators(loss.block, - [p[0] for p in parameters_and_grads]) - self._create_global_learning_rate() - - optimize_ops = [] - for param_and_grad in parameters_and_grads: - if param_and_grad[1] is None: - continue - with param_and_grad[0].block.program._optimized_guard( - param_and_grad), name_scope("optimizer"): - if param_and_grad[0].trainable is True: - optimize_op = self._append_optimize_op(loss.block, - param_and_grad) - optimize_ops.append(optimize_op) - - # Get custom finish ops for subclasses - # FIXME: Need to fix this once we figure out how to handle dependencies - self._finish_update(loss.block, parameters_and_grads) - - end = len(global_block.ops) - return global_block._slice_ops(start, end) - - def _process_distribute_lookuptable(self, param_grads, loss, - startup_program): + # Allways called under program_guard use global block as loss block + global_block = framework.default_main_program().global_block() + start = len(global_block.ops) + self.helper = LayerHelper(self.__class__.__name__) + self._create_accumulators(global_block, + [p[0] for p in parameters_and_grads]) + self._create_global_learning_rate() + + optimize_ops = [] + for param_and_grad in parameters_and_grads: + if param_and_grad[1] is None: + continue + with param_and_grad[0].block.program._optimized_guard( + param_and_grad), name_scope("optimizer"): + if param_and_grad[0].trainable is True: + optimize_op = self._append_optimize_op(global_block, + param_and_grad) + optimize_ops.append(optimize_op) + + # Get custom finish ops for subclasses + # FIXME: Need to fix this once we figure out how to handle dependencies + self._finish_update(global_block, parameters_and_grads) + + end = len(global_block.ops) + return global_block._slice_ops(start, end) + + def _process_distribute_lookuptable(self, param_grads): """ Because distribute lookup table only support SGD optimizer for now, not support other optimizer and regularization, so we should find the table parameter out, @@ -259,7 +251,8 @@ class Optimizer(object): :param loss: the loss variable. :param startup_program: the startup program """ - program = loss.block.program + program = framework.default_main_program() + global_block = framework.default_main_program().global_block() table_name = find_distributed_lookup_table(program) table_param = None table_grad = None @@ -275,38 +268,121 @@ class Optimizer(object): new_param_grads.append((p, g)) sgd_op = None if table_param is not None: - with program_guard(program, startup_program): - param_and_grad = [table_param, table_grad] - with table_param.block.program._optimized_guard(param_and_grad), \ - framework.name_scope("optimizer"): - self._create_global_learning_rate() - # create the optimize op - sgd_op = loss.block.append_op( - type='sgd', - inputs={ - "Param": table_param, - "Grad": table_grad, - "LearningRate": - self._create_param_lr(param_and_grad) - }, - outputs={"ParamOut": param_and_grad[0]}) + param_and_grad = [table_param, table_grad] + with table_param.block.program._optimized_guard(param_and_grad), \ + framework.name_scope("optimizer"): + self._create_global_learning_rate() + # create the optimize op + sgd_op = global_block.append_op( + type='sgd', + inputs={ + "Param": table_param, + "Grad": table_grad, + "LearningRate": self._create_param_lr(param_and_grad) + }, + outputs={"ParamOut": param_and_grad[0]}) return new_param_grads, (table_param, table_grad), sgd_op + def backward(self, + loss, + startup_program=None, + parameter_list=None, + no_grad_set=None, + callbacks=None): + """ + First part of `minimize`, do auto-diff to append backward ops for + the current program. + + Args: + loss (Variable): loss variable to run optimizations. + startup_program (Program): startup_program for initializing parameters + in `parameter_list`. + parameter_list (list): list of Variables to update. + no_grad_set (set|None): set of Variables should be ignored. + callbacks (list|None): list of callables to run when appending backward + operator for one parameter. + + Return: + list: list of (param, grad) pair, grad is the output of backward. + + Examples: + See examples in `apply_gradients`. + """ + if callbacks is None: + callbacks = [error_clip_callback] + else: + assert (isinstance(callbacks, list)) + callbacks.append(error_clip_callback) + return append_backward(loss, parameter_list, no_grad_set, callbacks) + + def apply_gradients(self, params_grads): + """ + Second part of `minimize`, appending optimization operators for + given `params_grads` pairs. + + Args: + params_grads (list): list of (param, grad) pair to do optimization. + + Returns: + list: A list of operators appended to the current program. + + Examples: + .. code-block:: python + + loss = network() + optimizer = fluid.optimizer.SGD(learning_rate=0.1) + params_grads = optimizer.backward(loss) + # you may append operations for params_grads here + # ... + optimizer.apply_gradients(params_grads) + """ + params_grads = sorted(params_grads, key=lambda x: x[0].name) + + params_grads, table_param_and_grad, table_optimize_op = \ + self._process_distribute_lookuptable(params_grads) + + params_grads = append_gradient_clip_ops(params_grads) + + # Add regularization if any + params_grads = append_regularization_ops(params_grads, + self.regularization) + + optimize_ops = self._create_optimization_pass(params_grads) + if table_optimize_op is not None: + optimize_ops.append(table_optimize_op) + params_grads.append(table_param_and_grad) + + return optimize_ops + def minimize(self, loss, startup_program=None, parameter_list=None, no_grad_set=None): - """Add operations to minimize `loss` by updating `parameter_list`. + """ + Add operations to minimize `loss` by updating `parameter_list`. - This method combines interface `append_backward()` and - `create_optimization_pass()` into one. + This method combines interface `backward()` and + `apply_gradients()` into one. + + Args: + loss (Variable): loss variable to run optimizations. + startup_program (Program): startup_program for initializing parameters + in `parameter_list`. + parameter_list (list): list of Variables to update. + no_grad_set (set|None): set of Variables should be ignored. + + Returns: + tuple: (optimize_ops, params_grads) which are, list of operators appended; + and list of (param, grad) Variables pair for optimization. """ + self._dtype = loss.dtype + program = loss.block.program + optimize_ops = [] if imperative_base.enabled(): if parameter_list is not None: params_grads = parameter_list else: - program = loss.block.program parameters = program.global_block().all_parameters() params_grads = [] for param in parameters: @@ -314,32 +390,16 @@ class Optimizer(object): grad_var = Variable( block=loss.block, name=param._ivar._grad_name(), - stop_gradient=True) - grad_var._value = param._ivar.grad_value + stop_gradient=True, + ivar=param._ivar._grad_ivar()) params_grads.append((param, grad_var)) - - optimize_ops = self._create_optimization_pass(params_grads, loss, - startup_program) + with program_guard(program, startup_program): + optimize_ops = self._create_optimization_pass(params_grads) else: - params_grads = append_backward(loss, parameter_list, no_grad_set, - [error_clip_callback]) - - params_grads = sorted(params_grads, key=lambda x: x[0].name) - - params_grads, table_param_and_grad, table_optimize_op = \ - self._process_distribute_lookuptable(params_grads, loss, startup_program) - - params_grads = append_gradient_clip_ops(params_grads) - - # Add regularization if any - params_grads = append_regularization_ops(params_grads, - self.regularization) - - optimize_ops = self._create_optimization_pass(params_grads, loss, - startup_program) - if table_optimize_op is not None: - optimize_ops.append(table_optimize_op) - params_grads.append(table_param_and_grad) + with program_guard(program, startup_program): + params_grads = self.backward(loss, startup_program, + parameter_list, no_grad_set) + optimize_ops = self.apply_gradients(params_grads) return optimize_ops, params_grads diff --git a/python/paddle/fluid/tests/test_data_feeder.py b/python/paddle/fluid/tests/test_data_feeder.py index 01de564aa438e5f14a5c578f7bbbfb475155ca55..16a33fd3ab3c794494687ba39278e327560686ec 100644 --- a/python/paddle/fluid/tests/test_data_feeder.py +++ b/python/paddle/fluid/tests/test_data_feeder.py @@ -30,6 +30,12 @@ class TestDataFeeder(unittest.TestCase): self.assertEqual(result['image'].recursive_sequence_lengths(), []) self.assertEqual(result['label'].recursive_sequence_lengths(), []) + try: + result = feeder.feed([([0] * 783, [9]), ([1] * 783, [1])]) + self.assertTrue(False) + except ValueError: + self.assertTrue(True) + def test_lod_level_1_converter(self): # lod_level = 1 # each sentence has a different number of words diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index ec8b19c7ba07a9e57a32277ff3fc34b0ea25a819..e86af8b7ed94028ac077e4267645b8af0a1a5dfe 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -107,7 +107,7 @@ if(WITH_DISTRIBUTE) endif() py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf SERIAL) py_test_modules(test_parallel_executor_fetch_feed MODULES test_parallel_executor_fetch_feed SERIAL) -set_tests_properties(test_parallel_executor_fetch_feed PROPERTIES TIMEOUT 150) +set_tests_properties(test_parallel_executor_fetch_feed PROPERTIES TIMEOUT 450) py_test_modules(test_parallel_executor_transformer MODULES test_parallel_executor_transformer SERIAL) if(NOT APPLE) py_test_modules(test_image_classification_resnet MODULES test_image_classification_resnet SERIAL) diff --git a/python/paddle/fluid/tests/unittests/dist_ctr.py b/python/paddle/fluid/tests/unittests/dist_ctr.py index 65969824338a5c354415cac8a34bd3863716bef4..fd09d47258fdfbf6d4a285df7d53c81f7489f39e 100644 --- a/python/paddle/fluid/tests/unittests/dist_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_ctr.py @@ -31,6 +31,7 @@ fluid.default_main_program().random_seed = 1 class TestDistCTR2x2(TestDistRunnerBase): def get_model(self, batch_size=2): + dnn_input_dim, lr_input_dim = dist_ctr_reader.load_data_meta() """ network definition """ dnn_data = fluid.layers.data( @@ -97,7 +98,14 @@ class TestDistCTR2x2(TestDistRunnerBase): inference_program = paddle.fluid.default_main_program().clone() - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.0001) + regularization = None + use_l2_decay = bool(os.getenv('USE_L2_DECAY', 0)) + if use_l2_decay: + regularization = fluid.regularizer.L2DecayRegularizer( + regularization_coeff=1e-1) + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.0001, + regularization=regularization) sgd_optimizer.minimize(avg_cost) dataset = dist_ctr_reader.Dataset() diff --git a/python/paddle/fluid/tests/unittests/dist_se_resnext.py b/python/paddle/fluid/tests/unittests/dist_se_resnext.py index 5da370570680e9f10a22ad882e3346e6381dfe63..c3d84dba0ae27db992bb999291625c2975f7faa9 100644 --- a/python/paddle/fluid/tests/unittests/dist_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/dist_se_resnext.py @@ -235,7 +235,6 @@ class DistSeResneXt2x2(TestDistRunnerBase): bd = [step * e for e in epochs] base_lr = 0.1 - lr = [] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] optimizer = fluid.optimizer.Momentum( diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_int8_mkldnn_op.py index def188bfa632b5b1bb6b2621091d0526ffa345dc..5ad376cb08e488e85be6369a91d4e81031e9e9db 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_int8_mkldnn_op.py @@ -25,6 +25,15 @@ from test_conv2d_op import conv2d_forward_naive, TestConv2dOp def conv2d_forward_refer(input, filter, group, conv_param): out, in_n, out_h, out_w, out_c = conv2d_forward_naive(input, filter, group, conv_param) + size = [in_n, out_c, out_h, out_w] + return format_reorder(out, size) + + +def format_reorder(out, size): + in_n = size[0] + out_h = size[2] + out_w = size[3] + out_c = size[1] out_tmp = np.zeros((in_n, out_h, out_w, out_c)) for n in range(in_n): for i in range(out_h): @@ -48,6 +57,7 @@ class TestConv2dInt8Op(TestConv2dOp): self.init_dilation() self.init_test_case() self.init_fuse_relu() + self.init_fuse_residual() self.init_data_type() conv2d_param = { @@ -79,11 +89,24 @@ class TestConv2dInt8Op(TestConv2dOp): np.round((input_shift) * self.scale_in).astype(np.int32), filter_int, self.groups, conv2d_param).astype(np.float32) * scale_output_shift - if self.fuse_relu: - output = np.maximum(np.round(output1 - output2), - 0).astype(self.dsttype) + if self.fuse_residual: + input_residual = np.random.randint( + -5, 5, self.input_residual_size).astype(self.srctype) + output_tmp = np.round(output1 - output2 + format_reorder( + input_residual, self.input_residual_size).astype( + self.srctype) * (self.scale_out / self.scale_in_eltwise + )) + if self.fuse_relu: + output = np.maximum(output_tmp, 0).astype(self.dsttype) + else: + output = output_tmp.astype(self.dsttype) else: - output = np.round(output1 - output2).astype(self.dsttype) + if self.fuse_relu: + output = np.maximum(np.round(output1 - output2), + 0).astype(self.dsttype) + else: + output = np.round(output1 - output2).astype(self.dsttype) + else: filter_int = np.round(filter * self.scale_weights[0]).astype(np.int32) @@ -92,21 +115,35 @@ class TestConv2dInt8Op(TestConv2dOp): output1 = conv2d_forward_refer( input.astype(np.int32), filter_int, self.groups, conv2d_param).astype(np.float32) - if self.fuse_relu: - output = np.maximum( - np.round(output1 * (self.scale_out / ( - self.scale_in * self.scale_weights[0]))), - 0).astype(self.dsttype) + if self.fuse_residual: + input_residual = np.random.randint( + 0, 10, self.input_residual_size).astype(self.srctype) + output_tmp = np.round(output1 * (self.scale_out / ( + self.scale_in * self.scale_weights[0])) + format_reorder( + input_residual, self.input_residual_size).astype( + np.int32) * (self.scale_out / self.scale_in_eltwise + )) + output_tmp2 = np.round(output1 * ( + self.scale_out / (self.scale_in * self.scale_weights[0]))) + if self.fuse_relu: + output = np.maximum(output_tmp, 0).astype(self.dsttype) + else: + output = output_tmp.astype(self.dsttype) else: - output = np.round(output1 * (self.scale_out / ( - self.scale_in * - self.scale_weights[0]))).astype(self.dsttype) + if self.fuse_relu: + output = np.maximum(output_tmp2, 0).astype(self.dsttype) + else: + output = output_tmp2.astype(self.dsttype) self.inputs = { 'Input': OpTest.np_dtype_to_fluid_dtype(input.astype(self.srctype)), 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) } + if self.fuse_residual: + self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype( + input_residual) + self.attrs = { 'strides': self.stride, 'paddings': self.pad, @@ -119,7 +156,9 @@ class TestConv2dInt8Op(TestConv2dOp): 'Scale_in': self.scale_in, 'Scale_out': self.scale_out, 'Scale_weights': self.scale_weights, - 'fuse_relu': self.fuse_relu + 'Scale_in_eltwise': self.scale_in_eltwise, + 'fuse_relu': self.fuse_relu, + 'fuse_residual_connection': self.fuse_residual } self.outputs = {'Output': output} @@ -137,11 +176,14 @@ class TestConv2dInt8Op(TestConv2dOp): def init_test_case(self): TestConv2dOp.init_test_case(self) + self.input_size = [1, 1, 5, 5] # NCHW f_c = self.input_size[1] // self.groups - self.filter_size = [1, f_c, 3, 3] + self.input_residual_size = [1, 2, 3, 3] + self.filter_size = [2, f_c, 3, 3] self.scale_in = 1.0 self.scale_out = 0.5 self.scale_weights = [10.0] + self.scale_in_eltwise = 0.6 def init_data_type(self): self.srctype = np.uint8 @@ -150,8 +192,11 @@ class TestConv2dInt8Op(TestConv2dOp): def init_fuse_relu(self): self.fuse_relu = True + def init_fuse_residual(self): + self.fuse_residual = True + -#--------------------test conv2d u8 in and u8 out-------------------- +#--------------------test conv2d u8 in and u8 out with residual fuse-------------------- class TestConv2d(TestConv2dInt8Op): @@ -159,18 +204,21 @@ class TestConv2d(TestConv2dInt8Op): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW + self.input_residual_size = [2, 6, 3, 3] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] self.scale_in = 1.0 self.scale_out = 0.5 self.scale_weights = [10.0] + self.scale_in_eltwise = 0.6 class TestWithPad(TestConv2d): def init_test_case(self): TestConv2d.init_test_case(self) self.pad = [1, 1] + self.input_residual_size = [2, 6, 5, 5] class TestWithGroup(TestConv2d): @@ -183,12 +231,14 @@ class TestWithStride(TestConv2dInt8Op): self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 6, 6] + self.input_residual_size = [2, 6, 3, 3] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3] self.scale_in = 1.0 self.scale_out = 0.8 self.scale_weights = [10.0] + self.scale_in_eltwise = 0.5 class TestWith1x1(TestConv2dInt8Op): @@ -196,12 +246,14 @@ class TestWith1x1(TestConv2dInt8Op): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [1, 3, 5, 5] + self.input_residual_size = [1, 6, 5, 5] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 1, 1] self.scale_in = 1.0 self.scale_out = 0.5 self.scale_weights = [12.0] + self.scale_in_eltwise = 0.5 class TestWithInput1x1Filter1x1(TestConv2dInt8Op): @@ -209,24 +261,29 @@ class TestWithInput1x1Filter1x1(TestConv2dInt8Op): self.pad = [0, 0] self.stride = [1, 1] self.input_size = [2, 3, 1, 1] + self.input_residual_size = [2, 6, 1, 1] assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 1, 1] self.scale_in = 1.0 self.scale_out = 0.5 self.scale_weights = [10.0] + self.scale_in_eltwise = 0.8 def init_group(self): self.groups = 3 -def init_data_type_with_fusion(self, input_dt, fuse_relu): +def init_data_type_with_fusion(self, input_dt, fuse_relu, fuse_residual): self.srctype = input_dt self.dsttype = np.uint8 if fuse_relu else np.int8 def init_fuse_relu(self): self.fuse_relu = fuse_relu + def init_fuse_residual(self): + self.fuse_residual = fuse_residual + def create_test_int8_class(parent): @@ -234,29 +291,68 @@ def create_test_int8_class(parent): class TestS8U8Case(parent): def init_data_type(self): - init_data_type_with_fusion(self, np.int8, True) + init_data_type_with_fusion(self, np.int8, True, False) #--------------------test conv2d s8 in and s8 out-------------------- class TestS8S8Case(parent): def init_data_type(self): - init_data_type_with_fusion(self, np.int8, False) + init_data_type_with_fusion(self, np.int8, False, False) #--------------------test conv2d u8 in and s8 out-------------------- class TestU8S8Case(parent): def init_data_type(self): - init_data_type_with_fusion(self, np.uint8, False) + init_data_type_with_fusion(self, np.uint8, False, False) + + #--------------------test conv2d u8 in and u8 out without residual fuse-------------------- + + class TestU8U8Case(parent): + def init_data_type(self): + init_data_type_with_fusion(self, np.uint8, True, False) - cls_name_s8u8 = "{0}_relu_{1}".format(parent.__name__, "1") - cls_name_s8s8 = "{0}_relu_{1}".format(parent.__name__, "0") - cls_name_u8s8 = "{0}_relu_{1}".format(parent.__name__, "0") + #--------------------test conv2d s8 in and u8 out with residual fuse-------------------- + + class TestS8U8ResCase(parent): + def init_data_type(self): + init_data_type_with_fusion(self, np.int8, True, True) + + #--------------------test conv2d s8 in and s8 out with residual fuse-------------------- + + class TestS8S8ResCase(parent): + def init_data_type(self): + init_data_type_with_fusion(self, np.int8, False, True) + + #--------------------test conv2d u8 in and s8 out with residual fuse-------------------- + + class TestU8S8ResCase(parent): + def init_data_type(self): + init_data_type_with_fusion(self, np.uint8, False, True) + + cls_name_s8u8 = "{0}_relu_{1}_residual_0".format(parent.__name__, "1") + cls_name_s8s8 = "{0}_relu_{1}_residual_0".format(parent.__name__, "0") + cls_name_u8s8 = "{0}_relu_{1}_residual_0".format(parent.__name__, "0") + cls_name_u8u8 = "{0}_relu_{1}_residual_0".format(parent.__name__, "1") + cls_name_s8u8_re_1 = "{0}_relu_{1}_residual_{2}".format(parent.__name__, + "1", "1") + cls_name_s8s8_re_1 = "{0}_relu_{1}_residual_{2}".format(parent.__name__, + "0", "1") + cls_name_u8s8_re_1 = "{0}_relu_{1}_residual_{2}".format(parent.__name__, + "0", "1") TestS8U8Case.__name__ = cls_name_s8u8 TestS8S8Case.__name__ = cls_name_s8s8 TestU8S8Case.__name__ = cls_name_u8s8 + TestU8U8Case.__name__ = cls_name_u8u8 + TestS8U8ResCase.__name__ = cls_name_s8u8_re_1 + TestS8S8ResCase.__name__ = cls_name_s8s8_re_1 + TestU8S8ResCase.__name__ = cls_name_u8s8_re_1 globals()[cls_name_s8u8] = TestS8U8Case globals()[cls_name_s8s8] = TestS8S8Case globals()[cls_name_u8s8] = TestU8S8Case + globals()[cls_name_u8u8] = TestU8U8Case + globals()[cls_name_s8u8_re_1] = TestS8U8ResCase + globals()[cls_name_s8s8_re_1] = TestS8S8ResCase + globals()[cls_name_u8s8_re_1] = TestU8S8ResCase create_test_int8_class(TestConv2dInt8Op) diff --git a/python/paddle/fluid/tests/unittests/test_dist_ctr.py b/python/paddle/fluid/tests/unittests/test_dist_ctr.py index b2d979729bc9b2546375cb657f78abe0d8c2dcc7..cc11764d55952741a64676752692eda5cdcc71c0 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_ctr.py +++ b/python/paddle/fluid/tests/unittests/test_dist_ctr.py @@ -18,7 +18,6 @@ import unittest from test_dist_base import TestDistBase -# FIXME(tangwei): sum op can not handle when inputs is empty. class TestDistCTR2x2(TestDistBase): def _setup_config(self): self._sync_mode = True @@ -28,5 +27,19 @@ class TestDistCTR2x2(TestDistBase): self.check_with_place("dist_ctr.py", delta=1e-7, check_error_log=False) +class TestDistCTRWithL2Decay2x2(TestDistBase): + def _setup_config(self): + self._sync_mode = True + self._enforce_place = "CPU" + + def test_dist_ctr(self): + need_envs = {"USE_L2_DECAY": "1"} + self.check_with_place( + "dist_ctr.py", + delta=1e-7, + check_error_log=False, + need_envs=need_envs) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py index 81b0b667814e851e8bd47ae1a3b0bf00a9a73ecd..bc3c422f2f68b79b2d938e25625093b2ce8977bb 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py @@ -22,6 +22,7 @@ import unittest import paddle import paddle.fluid.core as core import paddle.fluid as fluid +from paddle.fluid import compiler def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): @@ -57,19 +58,19 @@ def train(network, use_cuda, use_parallel_executor, batch_size=32, pass_num=2): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) + train_cp = compiler.CompiledProgram(fluid.default_main_program()) if use_parallel_executor: - train_exe = fluid.ParallelExecutor( - use_cuda=use_cuda, loss_name=cost.name) + train_cp = train_cp.with_data_parallel(loss_name=cost.name) fetch_list = [cost.name] else: - train_exe = exe fetch_list = [cost] for pass_id in six.moves.xrange(pass_num): batch_id = 0 for data in reader(): - train_exe.run(feed=data, - fetch_list=fetch_list if batch_id % 4 == 0 else []) + exe.run(train_cp, + feed=data, + fetch_list=fetch_list if batch_id % 4 == 0 else []) batch_id += 1 if batch_id > 16: break diff --git a/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py b/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py new file mode 100644 index 0000000000000000000000000000000000000000..8a6837dae2c800ba7059f77978aa7bd3c2f50136 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fusion_seqpool_concat_op.py @@ -0,0 +1,118 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest +from test_reorder_lod_tensor import convert_to_offset +from test_seq_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt + + +class TestFusionSeqPoolConcatOp(OpTest): + def setUp(self): + self.w = 11 + self.lods = [[[2, 3, 5]], [[1, 5, 2]]] + self.set_conf() + self.set_pooltype() + self.op_type = 'fusion_seqpool_concat' + self.axis = 1 + bs = len(self.lods[0][0]) + inputs = [] + outs = [] + i = 0 + for lod in self.lods: + assert bs == len(lod[0]), 'All lod size should be equal' + x = np.random.uniform(0.1, 1, + [sum(lod[0]), self.w]).astype('float32') + offset = convert_to_offset(lod) + out = np.zeros((bs, self.w)).astype('float32') + if self.pooltype == "SUM": + compute_seqpool_sum(x, offset, out) + elif self.pooltype == "AVERAGE": + compute_seqpool_avg(x, offset, out) + elif self.pooltype == "SQRT": + compute_seqpool_sqrt(x, offset, out) + else: + raise Exception("Unsupported pool type!") + inputs.append(('x_{0}'.format(i), (x, lod))) + outs.append(out) + i = i + 1 + + self.inputs = {'X': inputs} + self.outputs = {'Out': np.concatenate(outs, axis=self.axis)} + self.attrs = { + 'pooltype': self.pooltype, + 'axis': self.axis, + } + + def set_pooltype(self): + self.pooltype = "SUM" + + def set_conf(self): + pass + + def test_check_output(self): + self.check_output() + + +class TestFusionSeqPoolConcatOpCase1(TestFusionSeqPoolConcatOp): + def set_conf(self): + self.lods = [[[1]]] + + +class TestFusionSeqPoolConcatOpCase2(TestFusionSeqPoolConcatOp): + def set_conf(self): + self.lods = [[[1]], [[1]], [[1]]] + + +class TestFusionSeqPoolConcatOpCase3(TestFusionSeqPoolConcatOp): + def set_conf(self): + self.lods = [[[1, 3, 4, 6]]] + self.w = 10 + + +class TestFusionSeqPoolConcatOpCase4(TestFusionSeqPoolConcatOp): + def set_conf(self): + self.lods = [[[2, 13, 4]], [[1, 1, 1]], [[5, 3, 1]], [[9, 10, 3]]] + self.w = 3 + + +## test avg pool and sqrt +def create_test_avg_sqrt_class(parent): + class TestSeqPoolAvgCase(parent): + def set_pooltype(self): + self.pooltype = "AVERAGE" + + class TestSeqPoolSqrtCase(parent): + def set_pooltype(self): + self.pooltype = "SQRT" + + cls_name_avg = "{0}_{1}".format(parent.__name__, "avg") + cls_name_sqrt = "{0}_{1}".format(parent.__name__, "sqrt") + TestSeqPoolAvgCase.__name__ = cls_name_avg + TestSeqPoolSqrtCase.__name__ = cls_name_sqrt + globals()[cls_name_avg] = TestSeqPoolAvgCase + globals()[cls_name_sqrt] = TestSeqPoolSqrtCase + + +create_test_avg_sqrt_class(TestFusionSeqPoolConcatOp) +create_test_avg_sqrt_class(TestFusionSeqPoolConcatOpCase1) +create_test_avg_sqrt_class(TestFusionSeqPoolConcatOpCase2) +create_test_avg_sqrt_class(TestFusionSeqPoolConcatOpCase3) +create_test_avg_sqrt_class(TestFusionSeqPoolConcatOpCase4) + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index 1dc13ec74e8da1f13d447950b3c7822bbbecb2a7..86baff3c589d7b8a14938886b3e2104b0beb1cc9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -15,6 +15,7 @@ import contextlib import unittest import numpy as np +import sys import paddle.fluid as fluid from paddle.fluid import core @@ -22,7 +23,7 @@ from paddle.fluid.imperative.nn import FC from test_imperative_base import new_program_scope -class MyLayer(fluid.imperative.PyLayer): +class MyLayer(fluid.imperative.Layer): def __init__(self): super(MyLayer, self).__init__() @@ -34,7 +35,35 @@ class MyLayer(fluid.imperative.PyLayer): return [x] -class MLP(fluid.imperative.PyLayer): +class MyPyLayer(fluid.imperative.PyLayer): + def __init__(self): + super(MyPyLayer, self).__init__() + + @staticmethod + def forward(inputs): + sys.stderr.write('before forward\n') + ret = np.tanh(inputs[0]) + sys.stderr.write('after forward: %s\n' % ret) + tensor = core.LoDTensor() + tensor.set(ret, core.CPUPlace()) + return tuple([tensor]) + + @staticmethod + def backward(inputs): + sys.stderr.write('calling into backward: %s\n' % str(inputs)) + inp, out, dout = inputs + inp = np.array(inp) + out = np.array(out) + dout = np.array(dout) + sys.stderr.write('calling into backward: %s, %s, %s\n' % + (inp, out, dout)) + ret = np.array(dout) * (1 - np.square(np.array(out))) + tensor = core.LoDTensor() + tensor.set(ret, core.CPUPlace()) + return tuple([tensor]) + + +class MLP(fluid.imperative.Layer): def __init__(self): super(MLP, self).__init__() self._fc1 = FC(3, @@ -56,9 +85,77 @@ class TestImperative(unittest.TestCase): with fluid.imperative.guard(): cl = core.Layer() cl.forward([]) - l = fluid.imperative.PyLayer() + l = fluid.imperative.Layer() self.assertRaises(NotImplementedError, l.forward, []) + def test_pylayer_func_id(self): + + with fluid.imperative.guard(): + + class PyLayer1(fluid.imperative.PyLayer): + def __init__(self): + super(PyLayer1, self).__init__() + + @staticmethod + def forward(input): + return input + + @staticmethod + def backward(input): + return input + + class PyLayer2(fluid.imperative.PyLayer): + def __init__(self): + super(PyLayer2, self).__init__() + + @staticmethod + def forward(input): + return input + + @staticmethod + def backward(input): + return input + + py_layer_1 = PyLayer1() + py_layer_2 = PyLayer2() + py_layer_1(fluid.imperative.base.to_variable(np.ones([2, 2]))) + py_layer_2(fluid.imperative.base.to_variable(np.ones([2, 2]))) + id = py_layer_1.forward_id + self.assertGreater(id, 0) + self.assertEqual(py_layer_1.backward_id, id + 1) + self.assertEqual(py_layer_2.forward_id, id + 2) + self.assertEqual(py_layer_2.backward_id, id + 3) + py_layer_1(fluid.imperative.base.to_variable(np.ones([2, 2]))) + self.assertEqual(py_layer_1.forward_id, id) + + def test_pylayer(self): + np_inp = np.ones([2, 2], np.float32) + with fluid.imperative.guard(): + my_py_layer = MyPyLayer() + var_inp = fluid.imperative.base.to_variable(np_inp) + outs = my_py_layer(var_inp) + dy_out = np.sum(outs[0]._numpy()) + outs[0]._backward() + dy_grad = var_inp._gradient() + + with new_program_scope(): + inp = fluid.layers.data( + name="inp", shape=[2, 2], append_batch_size=False) + # TODO(panyx0718): Paddle doesn't diff against data `inp`. + x1 = inp * 1 + # TODO(panyx0718): If reduce_sum is skipped, the result is wrong. + x = fluid.layers.reduce_sum(fluid.layers.tanh(x1)) + param_grads = fluid.backward.append_backward( + x, parameter_list=[x1.name])[0] + exe = fluid.Executor(fluid.CPUPlace()) + + static_out, static_grad = exe.run( + feed={inp.name: np_inp}, + fetch_list=[x.name, param_grads[1].name]) + + self.assertTrue(np.allclose(dy_out, static_out)) + self.assertTrue(np.allclose(dy_grad, static_grad)) + def test_layer_in_out(self): np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32) with fluid.imperative.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index 5d97edf8768d8d2cf1ba7f826fa4d588c30f2aee..63eeae4b712c2064309b664b91d5f0347b67817d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -26,7 +26,7 @@ from paddle.fluid.imperative.base import to_variable from test_imperative_base import new_program_scope -class SimpleImgConvPool(fluid.imperative.PyLayer): +class SimpleImgConvPool(fluid.imperative.Layer): def __init__(self, num_channels, num_filters, @@ -72,7 +72,7 @@ class SimpleImgConvPool(fluid.imperative.PyLayer): return x -class MNIST(fluid.imperative.PyLayer): +class MNIST(fluid.imperative.Layer): def __init__(self, param_attr=None, bias_attr=None): super(MNIST, self).__init__() @@ -105,7 +105,6 @@ class TestImperativeMnist(unittest.TestCase): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - # mnist = Conv2D(1, 20, 5) mnist = MNIST() sgd = SGDOptimizer(learning_rate=1e-3) train_reader = paddle.batch( @@ -126,16 +125,17 @@ class TestImperativeMnist(unittest.TestCase): label._stop_gradient = True cost = mnist(img) - loss = fluid.layers.reduce_mean(cost) - dy_out = loss._numpy() + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + dy_out = avg_loss._numpy() if batch_id == 0: for param in fluid.default_main_program().global_block( ).all_parameters(): dy_param_init_value[param.name] = param._numpy() - loss._backward() - sgd.minimize(loss) + avg_loss._backward() + sgd.minimize(avg_loss) dy_param_value = {} for param in fluid.default_main_program().global_block( ).all_parameters(): @@ -147,7 +147,6 @@ class TestImperativeMnist(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace()) - # mnist = Conv2D(1, 20, 5) mnist = MNIST() sgd = SGDOptimizer(learning_rate=1e-3) train_reader = paddle.batch( @@ -157,8 +156,9 @@ class TestImperativeMnist(unittest.TestCase): name='pixel', shape=[1, 28, 28], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = mnist(img) - loss = fluid.layers.reduce_mean(cost) - sgd.minimize(loss) + loss = fluid.layers.cross_entropy(cost, label) + avg_loss = fluid.layers.mean(loss) + sgd.minimize(avg_loss) # initialize params and fetch them static_param_init_value = {} @@ -182,7 +182,7 @@ class TestImperativeMnist(unittest.TestCase): y_data = np.array([x[1] for x in data]).astype('int64').reshape( [128, 1]) - fetch_list = [loss.name] + fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) out = exe.run(fluid.default_main_program(), feed={"pixel": x_data, diff --git a/python/paddle/fluid/tests/unittests/test_ir_graph.py b/python/paddle/fluid/tests/unittests/test_ir_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..ba6e4a8b2effade67821f5da9c2bbf7849a8cf79 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_ir_graph.py @@ -0,0 +1,146 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +import six +from paddle import fluid + + +class TestIRGraph(unittest.TestCase): + """ + TODO(fc500110): `resolve_hazard` api will be tested when it can be used. + """ + + def test_nodes(self): + graph = build_graph() + self.assertTrue( + {node.name() + for node in graph.nodes()} == {"x1", "x2", "out", "sum"}) + + def test_has_set_get(self): + graph = build_graph() + for attr_name in ["int", "float", "string"]: + self.assertFalse(graph.has(attr_name)) + graph.set("int", 1) + graph.set("float", 0.5) + graph.set("string", "string") + for attr_name in ["int", "float", "string"]: + self.assertTrue(graph.has(attr_name)) + + self.assertTrue(graph.get_int("int") == 1) + self.assertTrue(graph.get_float("float") == 0.5) + self.assertTrue(graph.get_string("string") == "string") + + def test_erase(self): + graph = build_graph() + graph.set("test", 0) + self.assertTrue(graph.has("test")) + graph.erase("test") + self.assertFalse(graph.has("test")) + + def test_create_var_node(self): + prog = fluid.core.ProgramDesc() + block = prog.block(0) + shape = [10, 20] + x1 = block.var(six.b("x1")) + x1.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR) + x1.set_shape(shape) + graph = fluid.core.Graph(prog) + node = graph.create_var_node(x1) + self.assertTrue(node.node_type() == fluid.core.Node.Type.Variable) + + def test_create_op_node(self): + prog = fluid.core.ProgramDesc() + block = prog.block(0) + sum_op_desc = block.append_op() + graph = fluid.core.Graph(prog) + node = graph.create_op_node(sum_op_desc) + self.assertTrue(node.node_type() == fluid.core.Node.Type.Operation) + + def test_create_control_dep_var(self): + graph = build_graph() + name = "__control_var@{}".format(len(graph.nodes())) + node = graph.create_control_dep_var() + self.assertTrue(node.name() == name) + + def test_create_empty_node(self): + prog = fluid.core.ProgramDesc() + graph = fluid.core.Graph(prog) + n1 = graph.create_empty_node('x', fluid.core.Node.Type.Operation) + self.assertTrue(n1.name() == 'x') + n2 = graph.create_empty_node('y', fluid.core.Node.Type.Variable) + self.assertTrue(n2.name() == 'y') + + def test_release_nodes(self): + graph = build_graph() + nodes = graph.release_nodes() + self.assertTrue(len(graph.nodes()) == 0) + self.assertTrue({node.name() + for node in nodes} == {"x1", "x2", "out", "sum"}) + + def test_remove_node(self): + graph = build_graph() + nodes = graph.nodes() + for node in nodes: + if node.name() == "sum": + break + self.assertTrue({node.name() + for node in nodes} == {"x1", "x2", "out", "sum"}) + nodes.remove(node) + self.assertTrue({node.name() for node in nodes} == {"x1", "x2", "out"}) + + def test_retrieve_node(self): + graph = build_graph() + nodes = [] + for i in range(len(graph.nodes())): + nodes.append(graph.retrieve_node(i)) + + for node in nodes: + self.assertTrue(node in graph.nodes()) + + def resolve_hazard(self): + pass + + +def build_graph(): + prog = fluid.core.ProgramDesc() + block = prog.block(0) + + shape = [10, 20] + + # prepare input/output + x1 = block.var(six.b("x1")) + x1.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR) + x1.set_shape(shape) + x2 = block.var(six.b("x2")) + x2.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR) + x2.set_shape(shape) + + out = block.var(six.b("out")) + out.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR) + + sum_op_desc = block.append_op() + sum_op_desc.set_type("sum") + sum_op_desc.set_input("X", ["x1", "x2"]) + sum_op_desc.set_output("Out", ["out"]) + + sum_op_desc.check_attrs() + sum_op_desc.infer_shape(block) + graph = fluid.core.Graph(prog) + return graph + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py index 4153394c1da776d0a41e1415a09fa7d6f4b14d6d..37b9a9188ab44df81029ae6d9925ae21c1929cff 100644 --- a/python/paddle/fluid/tests/unittests/test_operator_desc.py +++ b/python/paddle/fluid/tests/unittests/test_operator_desc.py @@ -69,7 +69,7 @@ class TestOperator(unittest.TestCase): set(mul_op.attr_names), set([ "x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var", - "op_namescope" + "op_namescope", "op_callstack" ])) self.assertEqual(mul_op.has_attr("x_num_col_dims"), True) self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT) diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index 4374d198f2f869afab5fb76fdcb43e3c445f7689..34c9b7e006950f1c10fb265ce903b1e836281de7 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -61,6 +61,48 @@ class TestOptimizer(unittest.TestCase): self.assertEqual([op.type for op in opts], ["sgd"]) +class TestOptimizerBackwardApplygrad(unittest.TestCase): + def test_sgd_optimizer(self): + def check_sgd_optimizer(optimizer_attr): + init_program = framework.Program() + program = framework.Program() + block = program.global_block() + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr=optimizer_attr) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out") + block.append_op( + type="mul", + inputs={"X": mul_x, + "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}) + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) + sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01) + with framework.program_guard(program, init_program): + p_g = sgd_optimizer.backward(mean_out) + opts = sgd_optimizer.apply_gradients(p_g) + return opts + + opts = check_sgd_optimizer({'learning_rate': 1.1}) + self.assertEqual(len(opts), 3) + self.assertEqual([op.type for op in opts], + ["fill_constant", "elementwise_mul", "sgd"]) + + opts = check_sgd_optimizer({'learning_rate': 1.0}) + self.assertEqual(len(opts), 1) + self.assertEqual([op.type for op in opts], ["sgd"]) + + class TestMomentumOptimizer(unittest.TestCase): class MockMomentum(optimizer.MomentumOptimizer): def get_accumulators(self): @@ -99,8 +141,8 @@ class TestMomentumOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(momentum_optimizer.get_accumulators()), 0) - opts = momentum_optimizer._create_optimization_pass( - params_grads, mul_out, init_program) + with framework.program_guard(program, init_program): + opts = momentum_optimizer.apply_gradients(params_grads) self.assertEqual(len(opts), 3) sgd_op = opts[-1] self.assertEqual([op.type for op in opts], @@ -153,8 +195,8 @@ class TestMomentumOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(momentum_optimizer.get_accumulators()), 0) - opts = momentum_optimizer._create_optimization_pass( - params_grads, mul_out, init_program) + with framework.program_guard(program, init_program): + opts = momentum_optimizer.apply_gradients(params_grads) self.assertEqual(len(opts), 3) sgd_op = opts[-1] self.assertEqual([op.type for op in opts], @@ -216,8 +258,8 @@ class TestAdagradOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0) - opts = adagrad_optimizer._create_optimization_pass( - params_grads, mul_out, init_program) + with framework.program_guard(program, init_program): + opts = adagrad_optimizer.apply_gradients(params_grads) self.assertEqual(len(opts), 3) self.assertEqual([op.type for op in opts], ["fill_constant", "elementwise_mul", "adagrad"]) @@ -280,8 +322,8 @@ class TestAdamOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adam_optimizer.get_accumulators()), 0) - opts = adam_optimizer._create_optimization_pass(params_grads, mul_out, - init_program) + with framework.program_guard(program, init_program): + opts = adam_optimizer.apply_gradients(params_grads) self.assertEqual(len(opts), 5) self.assertEqual( [op.type for op in opts], @@ -347,8 +389,8 @@ class TestAdamaxOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adamax_optimizer.get_accumulators()), 0) - opts = adamax_optimizer._create_optimization_pass(params_grads, mul_out, - init_program) + with framework.program_guard(program, init_program): + opts = adamax_optimizer.apply_gradients(params_grads) self.assertEqual(len(opts), 4) self.assertEqual( [op.type for op in opts], @@ -411,8 +453,8 @@ class TestDecayedAdagradOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0) - opts = decayed_adagrad_optimizer._create_optimization_pass( - params_grads, mul_out, init_program) + with framework.program_guard(program, init_program): + opts = decayed_adagrad_optimizer.apply_gradients(params_grads) self.assertEqual(len(opts), 3) self.assertEqual( [op.type for op in opts], @@ -477,8 +519,8 @@ class TestFtrlOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(ftrl_optimizer.get_accumulators()), 0) - opts = ftrl_optimizer._create_optimization_pass(params_grads, mul_out, - init_program) + with framework.program_guard(program, init_program): + opts = ftrl_optimizer.apply_gradients(params_grads) self.assertEqual(len(opts), 3) self.assertEqual([op.type for op in opts], ["fill_constant", "elementwise_mul", "ftrl"]) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py index 1c6cfce0c2b772fa78fa08fa1bfb383c1e4f7939..ba63213a410b8b2579b6842c5a6ecd720c7957b3 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py @@ -16,6 +16,7 @@ from __future__ import print_function import paddle.dataset.conll05 as conll05 import paddle.fluid as fluid +from paddle.fluid import compiler import paddle.fluid.core as core import unittest import paddle @@ -157,10 +158,8 @@ class TestCRFModel(unittest.TestCase): exe = fluid.Executor(place) exe.run(startup) - pe = fluid.ParallelExecutor( - use_cuda=use_cuda, - loss_name=avg_cost.name, - build_strategy=build_strategy) + train_cp = compiler.CompiledProgram(main).with_data_parallel( + loss_name=avg_cost.name, build_strategy=build_strategy) feeder = fluid.DataFeeder( feed_list=[ @@ -172,8 +171,9 @@ class TestCRFModel(unittest.TestCase): data = train_data() for i in range(10): cur_batch = next(data) - print(pe.run(feed=feeder.feed(cur_batch), - fetch_list=[avg_cost.name])[0]) + print(exe.run(train_cp, + feed=feeder.feed(cur_batch), + fetch_list=[avg_cost.name])[0]) def _new_build_strategy(self, use_reduce=False): build_strategy = fluid.BuildStrategy() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py index 18d95c94ad36316b7149eb5412260b40a57ac002..17f8f5a0b4f753aabe8af3f97c2018cd2cf54dc1 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py @@ -13,6 +13,7 @@ # limitations under the License. import paddle.fluid as fluid +from paddle.fluid import compiler import unittest import logging import six @@ -36,21 +37,18 @@ class TestBase(unittest.TestCase): with fluid.program_guard(main_prog, startup_prog): with fluid.scope_guard(scope): loss = network_func() - fluid.Executor( - fluid.CUDAPlace(0) - if use_gpu else fluid.CPUPlace()).run(startup_prog) + exe = fluid.Executor( + fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()) + exe.run(startup_prog) for _ in six.moves.xrange(iter): exe_strategy = fluid.ExecutionStrategy() exe_strategy._dry_run = True exe_strategy.use_experimental_executor = use_experimental_executor - pe = fluid.ParallelExecutor( - use_cuda=use_gpu, - loss_name=loss.name, - main_program=main_prog, - exec_strategy=exe_strategy) + train_cp = compiler.CompiledProgram(main_prog).with_data_parallel( + loss_name=loss.name, exec_strategy=exe_strategy) for _ in six.moves.xrange(iter_per_pe): - pe.run([]) + exe.run(train_cp) class TestMNISTDryRun(TestBase): diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py index a49c5d9b43ae1bffa7cb57764db497f68030b151..ee0941f19838355180edb5771f1e85292a64de59 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py @@ -14,13 +14,12 @@ from __future__ import print_function -import paddle.dataset.flowers as flowers import math import paddle.fluid as fluid +from paddle.fluid import compiler import paddle.fluid.core as core import unittest import numpy as np -import paddle import os @@ -38,114 +37,112 @@ def Lenet(data, class_dim): return fc2 -class TestFetchOp(unittest.TestCase): - def parallel_exe(self, train_inputs, seed, use_cuda): - main = fluid.Program() +class TestFetchAndFeed(unittest.TestCase): + def parallel_exe(self, use_cuda, run_parallel_exe, seed=1): + main_program = fluid.Program() startup = fluid.Program() startup.random_seed = seed - with fluid.program_guard(main, startup): + with fluid.program_guard(main_program, startup): data = fluid.layers.data( name='image', shape=[3, 224, 224], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = Lenet(data, class_dim=102) loss = fluid.layers.cross_entropy(input=out, label=label) loss = fluid.layers.mean(loss) - opt = fluid.optimizer.Momentum( learning_rate=0.1, momentum=0.9, regularization=fluid.regularizer.L2Decay(1e-4)) - opt.minimize(loss) - # TODO(zcd): I found that onece the memory optimizer is open, - # parallel_exe doesn't fetch some variable, such as conv2d_0.b_0@GRAD, - # conv2d_1.b_0@GRAD. Those variables should not be pruned. - # fluid.memory_optimize(main) - - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(startup) - - feeder = fluid.DataFeeder(place=place, feed_list=[data, label]) - pe = fluid.ParallelExecutor( - use_cuda=use_cuda, loss_name=loss.name, main_program=main) - - fetch_list = [] - all_vars = main.global_block().vars - for k, v in all_vars.items(): - if 'tmp' not in k and k[0] is not '_' or v.persistable: - fetch_list.append(k) - - for data in train_inputs: - ret = pe.run(fetch_list, - feed=feeder.feed(data), - return_numpy=True) - for i in range(len(fetch_list)): - assert not math.isnan(np.sum(ret[i])) and \ - not math.isinf(np.sum(ret[i])) - - @unittest.skip(reason="CI timeout") - def test_fetch_op(self): - tst_reader = paddle.batch(flowers.test(use_xmap=False), batch_size=16) - tst_reader_iter = tst_reader() - - iters = 3 - train_inputs = [] - for i in range(iters): - train_inputs.append(next(tst_reader_iter)) + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup) - os.environ['CPU_NUM'] = str(4) - if core.is_compiled_with_cuda(): - self.parallel_exe(train_inputs, seed=1, use_cuda=True) - self.parallel_exe(train_inputs, seed=1, use_cuda=False) + train_cp = compiler.CompiledProgram(main_program).with_data_parallel( + loss_name=loss.name) + + run_parallel_exe(train_cp, exe, use_cuda, data, label, loss) + + def run_parallel_exe_with_fetch(self, compiled_program, exe, use_cuda, data, + label, loss): + def get_data(batch_size=8): + np.random.seed(5) + while True: + img = np.random.random( + size=[batch_size, 3, 224, 224]).astype(np.float32) + l = (np.random.random(size=[batch_size, 1]) * + 10).astype(np.int64) + yield img, l + + # TODO(zcd): I found that onece the memory optimizer is open, + # parallel_exe doesn't fetch some variable, such as conv2d_0.b_0@GRAD, + # conv2d_1.b_0@GRAD. Those variables should not be pruned. + # fluid.memory_optimize(main) + fetch_list = [] + all_vars = compiled_program._program.global_block().vars + + for k, v in all_vars.items(): + if ('tmp' not in k) and ( + k[0] is not '_' or v.persistable + ) and v.type == core.VarDesc.VarType.LOD_TENSOR: + fetch_list.append(k) + + for batch_id, img_label in enumerate(get_data()): + img, l = img_label + train_inputs = {data.name: img, label.name: l} + ret = exe.run(compiled_program, + fetch_list=fetch_list, + feed=train_inputs, + return_numpy=True) + for i in range(len(fetch_list)): + assert not math.isnan(np.sum(ret[i])) and \ + not math.isinf(np.sum(ret[i])) + if batch_id == 2: + break - -class TestFeedParallel(unittest.TestCase): - def parallel_exe(self, use_cuda, seed): - main = fluid.Program() - startup = fluid.Program() - startup.random_seed = seed - with fluid.scope_guard(fluid.core.Scope()): - with fluid.program_guard(main, startup): - data = fluid.layers.data( - name='image', shape=[3, 224, 224], dtype='float32') - label = fluid.layers.data( - name='label', shape=[1], dtype='int64') - out = Lenet(data, class_dim=102) - loss = fluid.layers.cross_entropy(input=out, label=label) - loss = fluid.layers.mean(loss) - opt = fluid.optimizer.Momentum( - learning_rate=0.1, - momentum=0.9, - regularization=fluid.regularizer.L2Decay(1e-4)) - - opt.minimize(loss) + def run_parallel_exe_with_feed(self, compiled_program, exe, use_cuda, data, + label, loss): + def get_data(batch_size=8): + np.random.seed(5) + while True: + train_data = [] + for _ in range(batch_size): + img = np.random.random( + size=[1, 3, 224, 224]).astype(np.float32) + label = (np.random.random(size=[1, 1]) * + 10).astype(np.int64) + train_data.append([img, label]) + yield train_data place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() feeder = fluid.DataFeeder(place=place, feed_list=[data, label]) - reader = feeder.decorate_reader( - paddle.batch( - flowers.train(), batch_size=16), multi_devices=True) - - exe = fluid.Executor(place) - exe.run(startup) - - pe = fluid.ParallelExecutor( - use_cuda=use_cuda, loss_name=loss.name, main_program=main) + reader = feeder.decorate_reader(get_data, multi_devices=True) for batch_id, data in enumerate(reader()): - loss_np = pe.run(feed=data, fetch_list=[loss.name])[0] + loss_np = exe.run(compiled_program, + feed=data, + fetch_list=[loss.name])[0] print(batch_id, loss_np) if batch_id == 2: break - @unittest.skip(reason="CI timeout") - def test_feed_op(self): + def test_fetch(self): + os.environ['CPU_NUM'] = str(4) + if core.is_compiled_with_cuda(): + self.parallel_exe( + use_cuda=True, + run_parallel_exe=self.run_parallel_exe_with_fetch) + self.parallel_exe( + use_cuda=False, run_parallel_exe=self.run_parallel_exe_with_fetch) + + def test_feed(self): os.environ['CPU_NUM'] = str(4) if core.is_compiled_with_cuda(): - self.parallel_exe(use_cuda=True, seed=1) - self.parallel_exe(use_cuda=False, seed=1) + self.parallel_exe( + use_cuda=True, run_parallel_exe=self.run_parallel_exe_with_feed) + self.parallel_exe( + use_cuda=False, run_parallel_exe=self.run_parallel_exe_with_feed) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index 9768f7db26c76b1f6fcffa24fd2ea3c0abd17aeb..cb1f5fdaee8253bbb3df3063ecca9859682f8bb0 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -74,7 +74,11 @@ class TestMNIST(TestParallelExecutorBase): label = np.ones(shape=[32, 1], dtype='int64') return img, label - def _compare_reduce_and_allreduce(self, model, use_cuda): + def _compare_reduce_and_allreduce(self, + model, + use_cuda, + delta1=1e-6, + delta2=1e-4): if use_cuda and not core.is_compiled_with_cuda(): return @@ -95,9 +99,9 @@ class TestMNIST(TestParallelExecutorBase): use_reduce=True) for loss in zip(all_reduce_first_loss, reduce_first_loss): - self.assertAlmostEqual(loss[0], loss[1], delta=1e-6) + self.assertAlmostEqual(loss[0], loss[1], delta=delta1) for loss in zip(all_reduce_last_loss, reduce_last_loss): - self.assertAlmostEqual(loss[0], loss[1], delta=1e-4) + self.assertAlmostEqual(loss[0], loss[1], delta=delta2) # simple_fc def check_simple_fc_convergence(self, use_cuda, use_reduce=False): @@ -174,8 +178,9 @@ class TestMNIST(TestParallelExecutorBase): self.check_batchnorm_fc_convergence(use_cuda, use_fast_executor) def test_batchnorm_fc_with_new_strategy(self): - # FIXME(zcd): close this test temporally. - # self._compare_reduce_and_allreduce(fc_with_batchnorm, True) + # NOTE: the computation result of nccl_reduce is non-deterministic, + # related issue: https://github.com/NVIDIA/nccl/issues/157 + self._compare_reduce_and_allreduce(fc_with_batchnorm, True, 1e-5, 1e-2) self._compare_reduce_and_allreduce(fc_with_batchnorm, False) diff --git a/python/paddle/fluid/tests/unittests/test_pass_builder.py b/python/paddle/fluid/tests/unittests/test_pass_builder.py index 5a3ec8ff0180281babeaa006133b3ff9dc6d8083..8c9e489e02839e25cfabe14c16bfd91a908bd734 100644 --- a/python/paddle/fluid/tests/unittests/test_pass_builder.py +++ b/python/paddle/fluid/tests/unittests/test_pass_builder.py @@ -16,6 +16,7 @@ from __future__ import print_function import paddle.fluid as fluid import paddle.fluid.core as core +from paddle.fluid import compiler import numpy as np import unittest import os @@ -61,22 +62,21 @@ class TestPassBuilder(unittest.TestCase): exe.run(startup) feed_dict = {'image': image, 'label': label} - train_exe = fluid.ParallelExecutor( - use_cuda=use_cuda, + train_cp = compiler.CompiledProgram(main).with_data_parallel( + loss_name=loss.name, build_strategy=build_strategy) + test_cp = compiler.CompiledProgram(test_program).with_data_parallel( loss_name=loss.name, - main_program=main, - build_strategy=build_strategy) - - test_exe = fluid.ParallelExecutor( - use_cuda=use_cuda, - main_program=test_program, - share_vars_from=train_exe, - build_strategy=build_strategy) + build_strategy=build_strategy, + share_vars_from=train_cp) for i in range(5): - test_loss, = test_exe.run([loss.name], feed=feed_dict) - - train_loss, = train_exe.run([loss.name], feed=feed_dict) + _ = exe.run(train_cp, fetch_list=[loss.name], feed=feed_dict) + test_loss, = exe.run(test_cp, + fetch_list=[loss.name], + feed=feed_dict) + train_loss = exe.run(train_cp, + fetch_list=[loss.name], + feed=feed_dict) avg_test_loss_val = np.array(test_loss).mean() if math.isnan(float(avg_test_loss_val)): diff --git a/python/paddle/fluid/tests/unittests/test_py_func_op.py b/python/paddle/fluid/tests/unittests/test_py_func_op.py index 655378f7f8c18f5936643f6c178d0e6b3dd8cac8..18207373acae45678a68d84bdf05776f5cffca43 100644 --- a/python/paddle/fluid/tests/unittests/test_py_func_op.py +++ b/python/paddle/fluid/tests/unittests/test_py_func_op.py @@ -14,6 +14,7 @@ import os import paddle.fluid as fluid +from paddle.fluid import compiler import paddle import unittest import six @@ -140,9 +141,10 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) + + train_cp = compiler.CompiledProgram(fluid.default_main_program()) if use_parallel_executor: - exe = fluid.ParallelExecutor( - use_cuda=use_cuda, loss_name=loss.name) + train_cp = train_cp.with_data_parallel(loss_name=loss.name) fetch_list = [loss.name] else: fetch_list = [loss] @@ -150,9 +152,10 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): ret = [] for epoch_id in six.moves.range(2): for d in r(): - L, = exe.run(feed=feeder.feed(d), fetch_list=fetch_list) + L, = exe.run(train_cp, + feed=feeder.feed(d), + fetch_list=fetch_list) ret.append(L) - return np.array(ret) diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py b/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py index 559386545e7304aab50043a64f7a89f48d7bbea9..a3701f0808b98b80b62866ffe1250d065361025c 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py @@ -16,6 +16,7 @@ from __future__ import print_function import unittest import paddle.fluid as fluid +from paddle.fluid import compiler import paddle.fluid.core as core import numpy as np import threading @@ -188,18 +189,18 @@ class TestPyReaderUsingExecutor(unittest.TestCase): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - startup_exe = fluid.Executor(place) - startup_exe.run(startup_program) + exe = fluid.Executor(place) + exe.run(startup_program) + train_cp = compiler.CompiledProgram(main_program) if use_parallel_executor: - main_exe = fluid.ParallelExecutor(use_cuda, loss_name=loss.name) + train_cp = train_cp.with_data_parallel(loss_name=loss.name) if use_cuda: self.batch_size_times = core.get_cuda_device_count() else: self.batch_size_times = int( os.environ.get('CPU_NUM', multiprocessing.cpu_count())) else: - main_exe = startup_exe self.batch_size_times = 1 reader = self.tensor_reader(use_decorate_paddle_reader) @@ -214,13 +215,17 @@ class TestPyReaderUsingExecutor(unittest.TestCase): self.outputs = [] for _ in range(self.iterations): - fetches = main_exe.run(fetch_list=[in_data.name, label.name]) + fetches = exe.run(train_cp, + fetch_list=[in_data.name, label.name]) fetches = [as_numpy(fetch) for fetch in fetches] self.outputs.append(fetches) feed_queue.close() self.validate() - if not use_decorate_paddle_reader: + if use_decorate_paddle_reader: + py_reader.exited = True + py_reader.thread.join() + else: thread.join() def validate(self): diff --git a/python/paddle/fluid/tests/unittests/test_reader_reset.py b/python/paddle/fluid/tests/unittests/test_reader_reset.py index 7eeffa1039a1e14a8883c4a78305d253a4518b26..da89ccb961c0af99aea117218eb429a5599c2bd2 100644 --- a/python/paddle/fluid/tests/unittests/test_reader_reset.py +++ b/python/paddle/fluid/tests/unittests/test_reader_reset.py @@ -15,6 +15,7 @@ from __future__ import print_function import os import paddle.fluid as fluid +from paddle.fluid import compiler import paddle import numpy as np import unittest @@ -74,37 +75,21 @@ class TestReaderReset(unittest.TestCase): exe = fluid.Executor(place) exe.run(startup_prog) - build_strategy = fluid.BuildStrategy() - exec_strategy = fluid.ExecutionStrategy() - parallel_exe = fluid.ParallelExecutor( - use_cuda=self.use_cuda, - main_program=main_prog, - build_strategy=build_strategy, - exec_strategy=exec_strategy) - - data_appeared = [False] * self.total_ins_num + train_cp = compiler.CompiledProgram(main_prog).with_data_parallel() pass_count = 0 while (True): try: - data_val, label_val = parallel_exe.run(fetch_list, - return_numpy=True) + data_val, label_val = exe.run(train_cp, + fetch_list=fetch_list, + return_numpy=True) ins_num = data_val.shape[0] broadcasted_label = np.ones((ins_num, ) + tuple( self.ins_shape)) * label_val.reshape((ins_num, 1)) self.assertEqual(data_val.all(), broadcasted_label.all()) - for l in label_val: - self.assertFalse(data_appeared[l[0]]) - data_appeared[l[0]] = True except fluid.core.EOFException: pass_count += 1 - if with_double_buffer: - data_appeared = data_appeared[:-parallel_exe.device_count * - self.batch_size] - for i in data_appeared: - self.assertTrue(i) if pass_count < self.test_pass_num: - data_appeared = [False] * self.total_ins_num data_reader_handle.reset() else: break diff --git a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py index 28c8c4699adbc108c05e4a500815752e2ec24c61..a7fd271ae7dc554813e8c5f18487add8eff0a2b5 100644 --- a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py @@ -22,6 +22,14 @@ import numpy import functools +def convert_to_offset(lod): + offset = [[0] for i in lod] + for i, level in enumerate(lod): + for seq_len in level: + offset[i].append(offset[i][-1] + seq_len) + return offset + + class TestReorderLoDTensor(unittest.TestCase): num_seq = 5 # [name, shape, lod_level] pair indicating data info of source and target @@ -91,13 +99,6 @@ class TestReorderLoDTensor(unittest.TestCase): self.inputs[desc[0]] = tensor def reorder(self): - def convert_to_offset(lod): - offset_lod = [[0] for i in lod] - for i, level in enumerate(lod): - for seq_len in level: - offset_lod[i].append(offset_lod[i][-1] + seq_len) - return offset_lod - level = 0 # compute the rank_table according to ref_lod ref_lod = self.data[self.data_desc[1][0]][1][level] diff --git a/python/paddle/fluid/tests/unittests/test_seq_pool.py b/python/paddle/fluid/tests/unittests/test_seq_pool.py index a80ad5b079891efe1b0e1222b3c2455d4891d5f5..176265428c83c7758eabf86b5b703363b6ee3919 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_pool.py +++ b/python/paddle/fluid/tests/unittests/test_seq_pool.py @@ -17,33 +17,43 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +from test_reorder_lod_tensor import convert_to_offset -class TestSeqAvgPool(OpTest): - def convert_to_offset(self, lod): - offset = [[0] for i in lod] - for i, level in enumerate(lod): - for seq_len in level: - offset[i].append(offset[i][-1] + seq_len) - return offset +def compute_seqpool_sum(x, offset, out): + for i in range(len(offset[0]) - 1): + sub_x = x[offset[0][i]:offset[0][i + 1], :] + out[i] = sub_x.sum(axis=0) + + +def compute_seqpool_avg(x, offset, out): + for i in range(len(offset[0]) - 1): + sub_x = x[offset[0][i]:offset[0][i + 1], :] + out[i] = sub_x.mean(axis=0) + +def compute_seqpool_sqrt(x, offset, out): + for i in range(len(offset[0]) - 1): + sub_x = x[offset[0][i]:offset[0][i + 1], :] + seq_len = offset[0][i + 1] - offset[0][i] + out[i] = sub_x.sum(axis=0) / np.sqrt(seq_len) + + +class TestSeqAvgPool(OpTest): def set_data(self): self.op_type = 'sequence_pool' # one level, batch size is 4 x = np.random.uniform(0.1, 1, [11, 23]).astype('float32') lod = [[11]] self.inputs = {'X': (x, lod)} - offset = self.convert_to_offset(lod) - + offset = convert_to_offset(lod) out = np.zeros((len(lod[0]), 23)).astype('float32') self.outputs = {'Out': out} return x, offset, out def compute(self, x, offset, out): self.attrs = {'pooltype': "AVERAGE"} - for i in range(len(offset[0]) - 1): - sub_x = x[offset[0][i]:offset[0][i + 1], :] - out[i] = sub_x.mean(axis=0) + compute_seqpool_avg(x, offset, out) def setUp(self): x, offset, out = self.set_data() @@ -62,9 +72,7 @@ class TestSeqAvgPool(OpTest): class TestSeqSumPool(TestSeqAvgPool): def compute(self, x, offset, out): self.attrs = {'pooltype': "SUM"} - for i in range(len(offset[0]) - 1): - sub_x = x[offset[0][i]:offset[0][i + 1], :] - out[i] = sub_x.sum(axis=0) + compute_seqpool_sum(x, offset, out) class TestSeqMaxPool(TestSeqAvgPool): @@ -72,7 +80,7 @@ class TestSeqMaxPool(TestSeqAvgPool): self.op_type = 'sequence_pool' x = np.random.uniform(0.1, 1, [13, 23]).astype('float32') lod = [[13]] - offset = self.convert_to_offset(lod) + offset = convert_to_offset(lod) for i in range(len(offset[0]) - 1): l = offset[0][i + 1] - offset[0][i] x[offset[0][i] + np.random.randint(l), :] += 2.0 @@ -93,10 +101,7 @@ class TestSeqMaxPool(TestSeqAvgPool): class TestSeqSqrtPool(TestSeqAvgPool): def compute(self, x, offset, out): self.attrs = {'pooltype': "SQRT"} - for i in range(len(offset[0]) - 1): - sub_x = x[offset[0][i]:offset[0][i + 1], :] - seq_len = offset[0][i + 1] - offset[0][i] - out[i] = sub_x.sum(axis=0) / np.sqrt(seq_len) + compute_seqpool_sqrt(x, offset, out) class TestSeqLastPool(TestSeqAvgPool): @@ -122,7 +127,7 @@ class TestSeqAvgPool2D(TestSeqAvgPool): x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32') lod = [[4, 1, 3, 5]] self.inputs = {'X': (x, lod)} - offset = self.convert_to_offset(lod) + offset = convert_to_offset(lod) out = np.zeros((4, 3, 17)).astype('float32') self.outputs = {'Out': out} @@ -167,7 +172,7 @@ class TestSeqMaxPool2D(TestSeqAvgPool2D): x = np.random.uniform(0.1, 1, [13, 3, 11]).astype('float32') lod = [[4, 1, 3, 5]] self.inputs = {'X': (x, lod)} - offset = self.convert_to_offset(lod) + offset = convert_to_offset(lod) for i in range(len(offset[0]) - 1): l = offset[0][i + 1] - offset[0][i] x[offset[0][i] + np.random.randint(l), :] += 1.0 diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index 37ee880970cf7f6f235e7c43697b2b7872bed38b..b0494f114c5f7f4449e87ec67b97924fe77cd8c9 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -28,6 +28,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): def initParams(self): self.numeric_stable_mode = False + self.dtype = np.float64 def setUp(self): self.initParams() @@ -36,19 +37,19 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): class_num = 37 logits = np.random.uniform(0.1, 1.0, - [batch_size, class_num]).astype("float64") + [batch_size, class_num]).astype(self.dtype) softmax = np.apply_along_axis(stable_softmax, 1, logits) labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int64") cross_entropy = np.asmatrix( [[-np.log(softmax[i][labels[i][0]])] for i in range(softmax.shape[0])], - dtype="float64") + dtype=self.dtype) self.inputs = {"Logits": logits, "Label": labels} self.outputs = { - "Softmax": softmax.astype("float64"), - "Loss": cross_entropy.astype("float64") + "Softmax": softmax.astype(self.dtype), + "Loss": cross_entropy.astype(self.dtype) } self.attrs = {"numeric_stable_mode": self.numeric_stable_mode} @@ -56,7 +57,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(["Logits"], "Loss") + self.check_grad(["Logits"], "Loss", max_relative_error=0.05) class TestSoftmaxWithCrossEntropyOpNoCudnn(TestSoftmaxWithCrossEntropyOp): @@ -64,6 +65,55 @@ class TestSoftmaxWithCrossEntropyOpNoCudnn(TestSoftmaxWithCrossEntropyOp): self.numeric_stable_mode = True +class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): + def initParams(self): + self.numeric_stable_mode = False + self.dtype = np.float16 + + def setUp(self): + self.initParams() + self.op_type = "softmax_with_cross_entropy" + batch_size = 41 + class_num = 37 + + # NOTE: numpy float16 have very low accuracy, use float32 for numpy check. + logits = np.random.uniform(0.1, 1.0, + [batch_size, class_num]).astype(np.float32) + softmax = np.apply_along_axis(stable_softmax, 1, logits) + labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int64") + + cross_entropy = np.asmatrix( + [[-np.log(softmax[i][labels[i][0]])] + for i in range(softmax.shape[0])], + dtype=np.float32) + + self.inputs = { + "Logits": logits.astype(self.dtype).view(np.uint16), + "Label": labels + } + self.outputs = { + "Softmax": softmax.astype(self.dtype), + "Loss": cross_entropy.astype(self.dtype) + } + self.attrs = {"numeric_stable_mode": self.numeric_stable_mode} + + def test_check_output(self): + self.check_output(atol=1e-2) + + def test_check_grad(self): + self.check_grad(["Logits"], "Loss", max_relative_error=0.1) + + +class TestSoftmaxWithCrossEntropyOpNoCudnnFp16( + TestSoftmaxWithCrossEntropyOpFp16): + def initParams(self): + self.numeric_stable_mode = True + self.dtype = np.float16 + + def test_check_grad(self): + self.check_grad(["Logits"], "Loss", max_relative_error=0.1) + + class TestSoftmaxWithCrossEntropyOp2(OpTest): """ Test softmax with cross entropy operator with soft labels. diff --git a/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py b/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py new file mode 100644 index 0000000000000000000000000000000000000000..26bf0fd88368ed27e142e8515ec57a6c6bebd6fa --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py @@ -0,0 +1,59 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from math import log +from math import exp +from op_test import OpTest +from scipy.special import logit +from scipy.special import expit +import unittest + + +class TestTeacherStudentSigmoidLossOp(OpTest): + """ + Test teacher_student_sigmoid_loss with discrete one-hot labels. + """ + + def setUp(self): + self.op_type = "teacher_student_sigmoid_loss" + batch_size = 16 + num_classes = 1 + self.inputs = { + 'X': logit( + np.random.uniform(0, 1, (batch_size, num_classes)) + .astype("float32")), + 'Label': np.random.uniform(0, 2, (batch_size, num_classes)) + .astype("float32") + } + outs = [] + for index, label in enumerate(self.inputs["Label"]): + x = self.inputs["X"][index] + if label < -1.0: + outs.append(max(x, 0.0) + log(1.0 + exp(-abs(x)))) + elif label < 0.0: + outs.append(max(x, 0.0) - x + log(1.0 + exp(-abs(x)))) + elif label < 1.0: + outs.append(max(x, 0.0) + log(1.0 + exp(-abs(x))) + \ + max(x, 0.0) - x * label + log(1.0 + exp(-abs(x)))) + else: + outs.append(max(x, 0.0) - x + log(1.0 + exp(-abs(x))) + \ + max(x, 0.0) - x * (label - 1.0) + log(1.0 + exp(-abs(x)))) + self.outputs = {'Y': np.array(outs)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Y", numeric_grad_delta=0.005) diff --git a/python/paddle/fluid/tests/unittests/test_weight_decay.py b/python/paddle/fluid/tests/unittests/test_weight_decay.py index f37d2bfb2e86b452cf7fd05c3e5871de2e33d629..e5e7e76737177f7f4aaae7d7e28e9e5166b96de5 100644 --- a/python/paddle/fluid/tests/unittests/test_weight_decay.py +++ b/python/paddle/fluid/tests/unittests/test_weight_decay.py @@ -22,6 +22,7 @@ import paddle import paddle.fluid.core as core import paddle.fluid as fluid +from paddle.fluid import compiler def get_places(): @@ -111,17 +112,17 @@ class TestWeightDecay(unittest.TestCase): if use_reduce else fluid.BuildStrategy.ReduceStrategy.AllReduce build_strategy.memory_optimize = use_ir_memory_optimize - parallel_exe = fluid.ParallelExecutor( - use_cuda, + train_cp = compiler.CompiledProgram(fluid.default_main_program( + )).with_data_parallel( loss_name=loss.name, exec_strategy=exec_strategy, build_strategy=build_strategy) loss_set = [] for data in self.train_data: - out = parallel_exe.run(feed=feeder.feed(data), - fetch_list=[loss.name]) - print("loss %s" % (np.average(out))) + out = exe.run(train_cp, + feed=feeder.feed(data), + fetch_list=[loss.name]) loss_set.append(np.average(out)) return loss_set diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 07343b4051e0f44996d1d4617e2cbd1a0d22ce3e..ea5a4cf7cdb3ef91a02bb88d9b859da1ecd1ed0b 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -752,12 +752,6 @@ class DistributeTranspiler(object): elif op not in lr_ops: self._append_pserver_non_opt_ops(block, op) - def __op_have_grad_input__(op): - for varname in op.input_arg_names: - if varname.find("@GRAD") >= 0: - return varname - return "" - def __clone_lr_op_sub_block__(op, program, lr_block): if not op.has_attr('sub_block'): return @@ -808,7 +802,7 @@ class DistributeTranspiler(object): merged_var = None for _, op in enumerate(self.optimize_ops): # find the origin grad var before clipping/L2Decay, - # merged_var should be the input var name of L2Decaybuil + # merged_var should be the input var name of L2Decay grad_varname_for_block = op.attr(OP_ROLE_VAR_ATTR_NAME)[1] if op.attr(OP_ROLE_VAR_ATTR_NAME)[ 0] == optimize_target_param_name: @@ -1684,7 +1678,16 @@ class DistributeTranspiler(object): if self.config.enable_dc_asgd: new_inputs[key] = dc else: - new_inputs[key] = merged_var + # Note!! This is for l2decay on sparse gradient, because it will create a new tensor for + # decayed gradient but not inplace modify the origin one + origin_grad_name = opt_op.input(key)[0] + if core.kNewGradSuffix( + ) in origin_grad_name and pserver_block.has_var( + origin_grad_name): + new_grad = pserver_block.var(origin_grad_name) + new_inputs[key] = new_grad + else: + new_inputs[key] = merged_var elif key == "Param": param_block = _get_param_block(opt_op) if not param_block: